vk/formats: Rename anv_format::channels -> num_channels
[mesa.git] / src / vulkan / private.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #pragma once
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdbool.h>
29 #include <pthread.h>
30 #include <assert.h>
31 #include <i915_drm.h>
32
33 #ifdef HAVE_VALGRIND
34 #include <valgrind.h>
35 #include <memcheck.h>
36 #define VG(x) x
37 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
38 #else
39 #define VG(x)
40 #endif
41
42 #include "brw_device_info.h"
43 #include "util/macros.h"
44
45 #define VK_PROTOTYPES
46 #include <vulkan/vulkan.h>
47 #include <vulkan/vulkan_intel.h>
48 #include <vulkan/vk_wsi_lunarg.h>
49
50 #include "entrypoints.h"
51
52 #include "brw_context.h"
53
54 #ifdef __cplusplus
55 extern "C" {
56 #endif
57
58 #define anv_noreturn __attribute__((__noreturn__))
59 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
60
61 #define MAX(a, b) ((a) > (b) ? (a) : (b))
62
63 static inline uint32_t
64 ALIGN_U32(uint32_t v, uint32_t a)
65 {
66 return (v + a - 1) & ~(a - 1);
67 }
68
69 static inline int32_t
70 ALIGN_I32(int32_t v, int32_t a)
71 {
72 return (v + a - 1) & ~(a - 1);
73 }
74
75 /** Alignment must be a power of 2. */
76 static inline bool
77 anv_is_aligned(uintmax_t n, uintmax_t a)
78 {
79 assert(a == (a & -a));
80 return (n & (a - 1)) == 0;
81 }
82
83 static inline uint32_t
84 anv_minify(uint32_t n, uint32_t levels)
85 {
86 if (unlikely(n == 0))
87 return 0;
88 else
89 return MAX(n >> levels, 1);
90 }
91
92 #define for_each_bit(b, dword) \
93 for (uint32_t __dword = (dword); \
94 (b) = __builtin_ffs(__dword) - 1, __dword; \
95 __dword &= ~(1 << (b)))
96
97 /* Define no kernel as 1, since that's an illegal offset for a kernel */
98 #define NO_KERNEL 1
99
100 struct anv_common {
101 VkStructureType sType;
102 const void* pNext;
103 };
104
105 /* Whenever we generate an error, pass it through this function. Useful for
106 * debugging, where we can break on it. Only call at error site, not when
107 * propagating errors. Might be useful to plug in a stack trace here.
108 */
109
110 static inline VkResult
111 vk_error(VkResult error)
112 {
113 #ifdef DEBUG
114 fprintf(stderr, "vk_error: %x\n", error);
115 #endif
116
117 return error;
118 }
119
120 void __anv_finishme(const char *file, int line, const char *format, ...)
121 anv_printflike(3, 4);
122 void anv_loge(const char *format, ...) anv_printflike(1, 2);
123 void anv_loge_v(const char *format, va_list va);
124
125 /**
126 * Print a FINISHME message, including its source location.
127 */
128 #define anv_finishme(format, ...) \
129 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
130
131 /* A non-fatal assert. Useful for debugging. */
132 #ifdef DEBUG
133 #define anv_assert(x) ({ \
134 if (unlikely(!(x))) \
135 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
136 })
137 #else
138 #define anv_assert(x)
139 #endif
140
141 void anv_abortf(const char *format, ...) anv_noreturn anv_printflike(1, 2);
142 void anv_abortfv(const char *format, va_list va) anv_noreturn;
143
144 #define stub_return(v) \
145 do { \
146 anv_finishme("stub %s", __func__); \
147 return (v); \
148 } while (0)
149
150 #define stub(v) \
151 do { \
152 anv_finishme("stub %s", __func__); \
153 return; \
154 } while (0)
155
156 /**
157 * A dynamically growable, circular buffer. Elements are added at head and
158 * removed from tail. head and tail are free-running uint32_t indices and we
159 * only compute the modulo with size when accessing the array. This way,
160 * number of bytes in the queue is always head - tail, even in case of
161 * wraparound.
162 */
163
164 struct anv_vector {
165 uint32_t head;
166 uint32_t tail;
167 uint32_t element_size;
168 uint32_t size;
169 void *data;
170 };
171
172 int anv_vector_init(struct anv_vector *queue, uint32_t element_size, uint32_t size);
173 void *anv_vector_add(struct anv_vector *queue);
174 void *anv_vector_remove(struct anv_vector *queue);
175
176 static inline int
177 anv_vector_length(struct anv_vector *queue)
178 {
179 return (queue->head - queue->tail) / queue->element_size;
180 }
181
182 static inline void
183 anv_vector_finish(struct anv_vector *queue)
184 {
185 free(queue->data);
186 }
187
188 #define anv_vector_foreach(elem, queue) \
189 static_assert(__builtin_types_compatible_p(__typeof__(queue), struct anv_vector *), ""); \
190 for (uint32_t __anv_vector_offset = (queue)->tail; \
191 elem = (queue)->data + (__anv_vector_offset & ((queue)->size - 1)), __anv_vector_offset < (queue)->head; \
192 __anv_vector_offset += (queue)->element_size)
193
194 struct anv_bo {
195 int gem_handle;
196 uint32_t index;
197 uint64_t offset;
198 uint64_t size;
199
200 /* This field is here for the benefit of the aub dumper. It can (and for
201 * userptr bos it must) be set to the cpu map of the buffer. Destroying
202 * the bo won't clean up the mmap, it's still the responsibility of the bo
203 * user to do that. */
204 void *map;
205 };
206
207 /* Represents a lock-free linked list of "free" things. This is used by
208 * both the block pool and the state pools. Unfortunately, in order to
209 * solve the ABA problem, we can't use a single uint32_t head.
210 */
211 union anv_free_list {
212 struct {
213 uint32_t offset;
214
215 /* A simple count that is incremented every time the head changes. */
216 uint32_t count;
217 };
218 uint64_t u64;
219 };
220
221 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
222
223 struct anv_block_pool {
224 struct anv_device *device;
225
226 struct anv_bo bo;
227 void *map;
228 int fd;
229 uint32_t size;
230
231 /**
232 * Array of mmaps and gem handles owned by the block pool, reclaimed when
233 * the block pool is destroyed.
234 */
235 struct anv_vector mmap_cleanups;
236
237 uint32_t block_size;
238
239 uint32_t next_block;
240 union anv_free_list free_list;
241 };
242
243 struct anv_block_state {
244 union {
245 struct {
246 uint32_t next;
247 uint32_t end;
248 };
249 uint64_t u64;
250 };
251 };
252
253 struct anv_state {
254 uint32_t offset;
255 uint32_t alloc_size;
256 void *map;
257 };
258
259 struct anv_fixed_size_state_pool {
260 size_t state_size;
261 union anv_free_list free_list;
262 struct anv_block_state block;
263 };
264
265 #define ANV_MIN_STATE_SIZE_LOG2 6
266 #define ANV_MAX_STATE_SIZE_LOG2 10
267
268 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2)
269
270 struct anv_state_pool {
271 struct anv_block_pool *block_pool;
272 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
273 };
274
275 struct anv_state_stream {
276 struct anv_block_pool *block_pool;
277 uint32_t next;
278 uint32_t current_block;
279 uint32_t end;
280 };
281
282 void anv_block_pool_init(struct anv_block_pool *pool,
283 struct anv_device *device, uint32_t block_size);
284 void anv_block_pool_finish(struct anv_block_pool *pool);
285 uint32_t anv_block_pool_alloc(struct anv_block_pool *pool);
286 void anv_block_pool_free(struct anv_block_pool *pool, uint32_t offset);
287 void anv_state_pool_init(struct anv_state_pool *pool,
288 struct anv_block_pool *block_pool);
289 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
290 size_t state_size, size_t alignment);
291 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
292 void anv_state_stream_init(struct anv_state_stream *stream,
293 struct anv_block_pool *block_pool);
294 void anv_state_stream_finish(struct anv_state_stream *stream);
295 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
296 uint32_t size, uint32_t alignment);
297
298 /**
299 * Implements a pool of re-usable BOs. The interface is identical to that
300 * of block_pool except that each block is its own BO.
301 */
302 struct anv_bo_pool {
303 struct anv_device *device;
304
305 uint32_t bo_size;
306
307 void *free_list;
308 };
309
310 void anv_bo_pool_init(struct anv_bo_pool *pool,
311 struct anv_device *device, uint32_t block_size);
312 void anv_bo_pool_finish(struct anv_bo_pool *pool);
313 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo);
314 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
315
316 struct anv_object;
317 struct anv_device;
318
319 typedef void (*anv_object_destructor_cb)(struct anv_device *,
320 struct anv_object *,
321 VkObjectType);
322
323 struct anv_object {
324 anv_object_destructor_cb destructor;
325 };
326
327 struct anv_physical_device {
328 struct anv_instance * instance;
329 uint32_t chipset_id;
330 bool no_hw;
331 const char * path;
332 const char * name;
333 const struct brw_device_info * info;
334 };
335
336 struct anv_instance {
337 void * pAllocUserData;
338 PFN_vkAllocFunction pfnAlloc;
339 PFN_vkFreeFunction pfnFree;
340 uint32_t apiVersion;
341 uint32_t physicalDeviceCount;
342 struct anv_physical_device physicalDevice;
343 };
344
345 struct anv_meta_state {
346 struct {
347 VkPipeline pipeline;
348 } clear;
349
350 struct {
351 VkPipeline pipeline;
352 VkPipelineLayout pipeline_layout;
353 VkDescriptorSetLayout ds_layout;
354 } blit;
355
356 struct {
357 VkDynamicRsState rs_state;
358 VkDynamicCbState cb_state;
359 VkDynamicDsState ds_state;
360 } shared;
361 };
362
363 struct anv_queue {
364 struct anv_device * device;
365
366 struct anv_state_pool * pool;
367
368 /**
369 * Serial number of the most recently completed batch executed on the
370 * engine.
371 */
372 struct anv_state completed_serial;
373
374 /**
375 * The next batch submitted to the engine will be assigned this serial
376 * number.
377 */
378 uint32_t next_serial;
379
380 uint32_t last_collected_serial;
381 };
382
383 struct anv_device {
384 struct anv_instance * instance;
385 uint32_t chipset_id;
386 struct brw_device_info info;
387 int context_id;
388 int fd;
389 bool no_hw;
390 bool dump_aub;
391
392 struct anv_bo_pool batch_bo_pool;
393
394 struct anv_block_pool dynamic_state_block_pool;
395 struct anv_state_pool dynamic_state_pool;
396
397 struct anv_block_pool instruction_block_pool;
398 struct anv_block_pool surface_state_block_pool;
399 struct anv_state_pool surface_state_pool;
400
401 struct anv_meta_state meta_state;
402
403 struct anv_state float_border_colors;
404 struct anv_state uint32_border_colors;
405
406 struct anv_queue queue;
407
408 struct anv_block_pool scratch_block_pool;
409
410 struct anv_compiler * compiler;
411 struct anv_aub_writer * aub_writer;
412 pthread_mutex_t mutex;
413 };
414
415 void *
416 anv_device_alloc(struct anv_device * device,
417 size_t size,
418 size_t alignment,
419 VkSystemAllocType allocType);
420
421 void
422 anv_device_free(struct anv_device * device,
423 void * mem);
424
425 void* anv_gem_mmap(struct anv_device *device,
426 uint32_t gem_handle, uint64_t offset, uint64_t size);
427 void anv_gem_munmap(void *p, uint64_t size);
428 uint32_t anv_gem_create(struct anv_device *device, size_t size);
429 void anv_gem_close(struct anv_device *device, int gem_handle);
430 int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
431 int anv_gem_wait(struct anv_device *device, int gem_handle, int64_t *timeout_ns);
432 int anv_gem_execbuffer(struct anv_device *device,
433 struct drm_i915_gem_execbuffer2 *execbuf);
434 int anv_gem_set_tiling(struct anv_device *device, int gem_handle,
435 uint32_t stride, uint32_t tiling);
436 int anv_gem_create_context(struct anv_device *device);
437 int anv_gem_destroy_context(struct anv_device *device, int context);
438 int anv_gem_get_param(int fd, uint32_t param);
439 int anv_gem_get_aperture(struct anv_device *device, uint64_t *size);
440 int anv_gem_handle_to_fd(struct anv_device *device, int gem_handle);
441 int anv_gem_fd_to_handle(struct anv_device *device, int fd);
442 int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
443
444 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
445
446 struct anv_reloc_list {
447 size_t num_relocs;
448 size_t array_length;
449 struct drm_i915_gem_relocation_entry * relocs;
450 struct anv_bo ** reloc_bos;
451 };
452
453 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
454 struct anv_device *device);
455 void anv_reloc_list_finish(struct anv_reloc_list *list,
456 struct anv_device *device);
457
458 struct anv_batch_bo {
459 struct anv_bo bo;
460
461 /* Bytes actually consumed in this batch BO */
462 size_t length;
463
464 /* These offsets reference the per-batch reloc list */
465 size_t first_reloc;
466 size_t num_relocs;
467
468 struct anv_batch_bo * prev_batch_bo;
469 };
470
471 struct anv_batch {
472 struct anv_device * device;
473
474 void * start;
475 void * end;
476 void * next;
477
478 struct anv_reloc_list relocs;
479
480 /* This callback is called (with the associated user data) in the event
481 * that the batch runs out of space.
482 */
483 VkResult (*extend_cb)(struct anv_batch *, void *);
484 void * user_data;
485 };
486
487 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
488 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
489 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
490 void *location, struct anv_bo *bo, uint32_t offset);
491
492 struct anv_address {
493 struct anv_bo *bo;
494 uint32_t offset;
495 };
496
497 #define __gen_address_type struct anv_address
498 #define __gen_user_data struct anv_batch
499
500 static inline uint64_t
501 __gen_combine_address(struct anv_batch *batch, void *location,
502 const struct anv_address address, uint32_t delta)
503 {
504 if (address.bo == NULL) {
505 return delta;
506 } else {
507 assert(batch->start <= location && location < batch->end);
508
509 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
510 }
511 }
512
513 #include "gen7_pack.h"
514 #include "gen75_pack.h"
515 #undef GEN8_3DSTATE_MULTISAMPLE
516 #include "gen8_pack.h"
517
518 #define anv_batch_emit(batch, cmd, ...) do { \
519 struct cmd __template = { \
520 cmd ## _header, \
521 __VA_ARGS__ \
522 }; \
523 void *__dst = anv_batch_emit_dwords(batch, cmd ## _length); \
524 cmd ## _pack(batch, __dst, &__template); \
525 } while (0)
526
527 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
528 struct cmd __template = { \
529 cmd ## _header, \
530 .DwordLength = n - cmd ## _length_bias, \
531 __VA_ARGS__ \
532 }; \
533 void *__dst = anv_batch_emit_dwords(batch, n); \
534 cmd ## _pack(batch, __dst, &__template); \
535 __dst; \
536 })
537
538 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
539 do { \
540 uint32_t *dw; \
541 \
542 assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
543 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
544 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
545 dw[i] = (dwords0)[i] | (dwords1)[i]; \
546 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
547 } while (0)
548
549 #define GEN8_MOCS { \
550 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
551 .TargetCache = L3DefertoPATforLLCeLLCselection, \
552 .AgeforQUADLRU = 0 \
553 }
554
555 struct anv_device_memory {
556 struct anv_bo bo;
557 VkDeviceSize map_size;
558 void * map;
559 };
560
561 struct anv_dynamic_vp_state {
562 struct anv_object base;
563 struct anv_state sf_clip_vp;
564 struct anv_state cc_vp;
565 struct anv_state scissor;
566 };
567
568 struct anv_dynamic_rs_state {
569 uint32_t state_sf[GEN8_3DSTATE_SF_length];
570 uint32_t state_raster[GEN8_3DSTATE_RASTER_length];
571 };
572
573 struct anv_dynamic_ds_state {
574 uint32_t state_wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
575 uint32_t state_color_calc[GEN8_COLOR_CALC_STATE_length];
576 };
577
578 struct anv_dynamic_cb_state {
579 uint32_t state_color_calc[GEN8_COLOR_CALC_STATE_length];
580
581 };
582
583 struct anv_descriptor_slot {
584 int8_t dynamic_slot;
585 uint8_t index;
586 };
587
588 struct anv_descriptor_set_layout {
589 struct {
590 uint32_t surface_count;
591 struct anv_descriptor_slot *surface_start;
592 uint32_t sampler_count;
593 struct anv_descriptor_slot *sampler_start;
594 } stage[VK_NUM_SHADER_STAGE];
595
596 uint32_t count;
597 uint32_t num_dynamic_buffers;
598 uint32_t shader_stages;
599 struct anv_descriptor_slot entries[0];
600 };
601
602 struct anv_descriptor {
603 struct anv_sampler *sampler;
604 struct anv_surface_view *view;
605 };
606
607 struct anv_descriptor_set {
608 struct anv_descriptor descriptors[0];
609 };
610
611 #define MAX_VBS 32
612 #define MAX_SETS 8
613 #define MAX_RTS 8
614
615 struct anv_pipeline_layout {
616 struct {
617 struct anv_descriptor_set_layout *layout;
618 uint32_t surface_start[VK_NUM_SHADER_STAGE];
619 uint32_t sampler_start[VK_NUM_SHADER_STAGE];
620 } set[MAX_SETS];
621
622 uint32_t num_sets;
623
624 struct {
625 uint32_t surface_count;
626 uint32_t sampler_count;
627 } stage[VK_NUM_SHADER_STAGE];
628 };
629
630 struct anv_buffer {
631 struct anv_device * device;
632 VkDeviceSize size;
633
634 /* Set when bound */
635 struct anv_bo * bo;
636 VkDeviceSize offset;
637 };
638
639 #define ANV_CMD_BUFFER_PIPELINE_DIRTY (1 << 0)
640 #define ANV_CMD_BUFFER_RS_DIRTY (1 << 2)
641 #define ANV_CMD_BUFFER_DS_DIRTY (1 << 3)
642 #define ANV_CMD_BUFFER_CB_DIRTY (1 << 4)
643 #define ANV_CMD_BUFFER_VP_DIRTY (1 << 5)
644
645 struct anv_vertex_binding {
646 struct anv_buffer * buffer;
647 VkDeviceSize offset;
648 };
649
650 struct anv_descriptor_set_binding {
651 struct anv_descriptor_set * set;
652 uint32_t dynamic_offsets[128];
653 };
654
655 struct anv_cmd_buffer {
656 struct anv_object base;
657 struct anv_device * device;
658
659 struct drm_i915_gem_execbuffer2 execbuf;
660 struct drm_i915_gem_exec_object2 * exec2_objects;
661 struct anv_bo ** exec2_bos;
662 uint32_t exec2_array_length;
663 bool need_reloc;
664 uint32_t serial;
665
666 uint32_t bo_count;
667 struct anv_batch batch;
668 struct anv_batch_bo * last_batch_bo;
669 struct anv_batch_bo * surface_batch_bo;
670 uint32_t surface_next;
671 struct anv_reloc_list surface_relocs;
672 struct anv_state_stream surface_state_stream;
673 struct anv_state_stream dynamic_state_stream;
674
675 /* State required while building cmd buffer */
676 uint32_t current_pipeline;
677 uint32_t vb_dirty;
678 uint32_t dirty;
679 uint32_t compute_dirty;
680 uint32_t descriptors_dirty;
681 uint32_t scratch_size;
682 struct anv_pipeline * pipeline;
683 struct anv_pipeline * compute_pipeline;
684 struct anv_framebuffer * framebuffer;
685 struct anv_dynamic_rs_state * rs_state;
686 struct anv_dynamic_ds_state * ds_state;
687 struct anv_dynamic_vp_state * vp_state;
688 struct anv_dynamic_cb_state * cb_state;
689 struct anv_vertex_binding vertex_bindings[MAX_VBS];
690 struct anv_descriptor_set_binding descriptors[MAX_SETS];
691 };
692
693 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
694 void anv_aub_writer_destroy(struct anv_aub_writer *writer);
695
696 struct anv_fence {
697 struct anv_object base;
698 struct anv_bo bo;
699 struct drm_i915_gem_execbuffer2 execbuf;
700 struct drm_i915_gem_exec_object2 exec2_objects[1];
701 bool ready;
702 };
703
704 struct anv_shader {
705 uint32_t size;
706 char data[0];
707 };
708
709 struct anv_pipeline {
710 struct anv_object base;
711 struct anv_device * device;
712 struct anv_batch batch;
713 uint32_t batch_data[256];
714 struct anv_shader * shaders[VK_NUM_SHADER_STAGE];
715 struct anv_pipeline_layout * layout;
716 bool use_repclear;
717
718 struct brw_vs_prog_data vs_prog_data;
719 struct brw_wm_prog_data wm_prog_data;
720 struct brw_gs_prog_data gs_prog_data;
721 struct brw_cs_prog_data cs_prog_data;
722 struct brw_stage_prog_data * prog_data[VK_NUM_SHADER_STAGE];
723 uint32_t scratch_start[VK_NUM_SHADER_STAGE];
724 uint32_t total_scratch;
725 struct {
726 uint32_t vs_start;
727 uint32_t vs_size;
728 uint32_t nr_vs_entries;
729 uint32_t gs_start;
730 uint32_t gs_size;
731 uint32_t nr_gs_entries;
732 } urb;
733
734 uint32_t active_stages;
735 struct anv_state_stream program_stream;
736 struct anv_state blend_state;
737 uint32_t vs_simd8;
738 uint32_t ps_simd8;
739 uint32_t ps_simd16;
740 uint32_t gs_vec4;
741 uint32_t gs_vertex_count;
742 uint32_t cs_simd;
743
744 uint32_t vb_used;
745 uint32_t binding_stride[MAX_VBS];
746
747 uint32_t state_sf[GEN8_3DSTATE_SF_length];
748 uint32_t state_raster[GEN8_3DSTATE_RASTER_length];
749 uint32_t state_wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
750
751 uint32_t cs_thread_width_max;
752 uint32_t cs_right_mask;
753 };
754
755 struct anv_pipeline_create_info {
756 bool use_repclear;
757 bool disable_viewport;
758 bool disable_scissor;
759 bool disable_vs;
760 bool use_rectlist;
761 };
762
763 VkResult
764 anv_pipeline_create(VkDevice device,
765 const VkGraphicsPipelineCreateInfo *pCreateInfo,
766 const struct anv_pipeline_create_info *extra,
767 VkPipeline *pPipeline);
768
769 struct anv_compiler *anv_compiler_create(struct anv_device *device);
770 void anv_compiler_destroy(struct anv_compiler *compiler);
771 int anv_compiler_run(struct anv_compiler *compiler, struct anv_pipeline *pipeline);
772 void anv_compiler_free(struct anv_pipeline *pipeline);
773
774 struct anv_format {
775 const char *name;
776 uint16_t format;
777 uint8_t cpp;
778 uint8_t num_channels;
779 bool has_stencil;
780 };
781
782 const struct anv_format *
783 anv_format_for_vk_format(VkFormat format);
784
785 struct anv_image {
786 VkImageType type;
787 VkExtent3D extent;
788 VkFormat format;
789 uint32_t tile_mode;
790 VkDeviceSize size;
791 uint32_t alignment;
792 uint32_t stride;
793
794 uint32_t stencil_offset;
795 uint32_t stencil_stride;
796
797 /* Set when bound */
798 struct anv_bo * bo;
799 VkDeviceSize offset;
800
801 struct anv_swap_chain * swap_chain;
802
803 /**
804 * \name Alignment of miptree images, in units of pixels.
805 *
806 * These fields contain the actual alignment values, not the values the
807 * hardware expects. For example, if h_align is 4, then program the hardware
808 * with HALIGN_4.
809 *
810 * \see RENDER_SURFACE_STATE.SurfaceHorizontalAlignment
811 * \see RENDER_SURFACE_STATE.SurfaceVerticalAlignment
812 * \{
813 */
814 uint8_t h_align;
815 uint8_t v_align;
816 /** \} */
817
818 /** RENDER_SURFACE_STATE.SurfaceType */
819 uint8_t surf_type;
820 };
821
822 struct anv_surface_view {
823 struct anv_object base;
824
825 struct anv_state surface_state;
826 struct anv_bo * bo;
827 uint32_t offset;
828 uint32_t range;
829 VkExtent3D extent;
830 VkFormat format;
831 };
832
833 struct anv_image_create_info {
834 uint32_t tile_mode;
835 };
836
837 VkResult anv_image_create(VkDevice _device,
838 const VkImageCreateInfo *pCreateInfo,
839 const struct anv_image_create_info *extra,
840 VkImage *pImage);
841
842 void anv_image_view_init(struct anv_surface_view *view,
843 struct anv_device *device,
844 const VkImageViewCreateInfo* pCreateInfo,
845 struct anv_cmd_buffer *cmd_buffer);
846
847 void anv_color_attachment_view_init(struct anv_surface_view *view,
848 struct anv_device *device,
849 const VkColorAttachmentViewCreateInfo* pCreateInfo,
850 struct anv_cmd_buffer *cmd_buffer);
851
852 void anv_surface_view_destroy(struct anv_device *device,
853 struct anv_object *obj, VkObjectType obj_type);
854
855 struct anv_sampler {
856 uint32_t state[4];
857 };
858
859 struct anv_depth_stencil_view {
860 struct anv_bo * bo;
861
862 uint32_t depth_offset;
863 uint32_t depth_stride;
864 uint32_t depth_format;
865
866 uint32_t stencil_offset;
867 uint32_t stencil_stride;
868 };
869
870 struct anv_framebuffer {
871 struct anv_object base;
872 uint32_t color_attachment_count;
873 const struct anv_surface_view * color_attachments[MAX_RTS];
874 const struct anv_depth_stencil_view * depth_stencil;
875
876 uint32_t sample_count;
877 uint32_t width;
878 uint32_t height;
879 uint32_t layers;
880
881 /* Viewport for clears */
882 VkDynamicVpState vp_state;
883 };
884
885 struct anv_render_pass_layer {
886 VkAttachmentLoadOp color_load_op;
887 VkClearColor clear_color;
888 };
889
890 struct anv_render_pass {
891 VkRect render_area;
892
893 uint32_t num_clear_layers;
894 uint32_t num_layers;
895 struct anv_render_pass_layer layers[0];
896 };
897
898 void anv_device_init_meta(struct anv_device *device);
899 void anv_device_finish_meta(struct anv_device *device);
900
901 void
902 anv_cmd_buffer_clear(struct anv_cmd_buffer *cmd_buffer,
903 struct anv_render_pass *pass);
904
905 void *
906 anv_lookup_entrypoint(const char *name);
907
908 #ifdef __cplusplus
909 }
910 #endif