vk: Create a minimal context for the compiler
[mesa.git] / src / vulkan / private.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #pragma once
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdbool.h>
29 #include <pthread.h>
30 #include <assert.h>
31 #include <i915_drm.h>
32
33 #include "brw_device_info.h"
34 #include "util/macros.h"
35
36 #define VK_PROTOTYPES
37 #include <vulkan/vulkan.h>
38 #include <vulkan/vulkan_intel.h>
39 #include <vulkan/vk_wsi_lunarg.h>
40
41 #include "entrypoints.h"
42
43 #include "brw_context.h"
44
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48
49 static inline uint32_t
50 ALIGN_U32(uint32_t v, uint32_t a)
51 {
52 return (v + a - 1) & ~(a - 1);
53 }
54
55 static inline int32_t
56 ALIGN_I32(int32_t v, int32_t a)
57 {
58 return (v + a - 1) & ~(a - 1);
59 }
60
61 #define for_each_bit(b, dword) \
62 for (uint32_t __dword = (dword); \
63 (b) = __builtin_ffs(__dword) - 1, __dword; \
64 __dword &= ~(1 << (b)))
65
66 /* Define no kernel as 1, since that's an illegal offset for a kernel */
67 #define NO_KERNEL 1
68
69 struct anv_common {
70 VkStructureType sType;
71 const void* pNext;
72 };
73
74 /* Whenever we generate an error, pass it through this function. Useful for
75 * debugging, where we can break on it. Only call at error site, not when
76 * propagating errors. Might be useful to plug in a stack trace here.
77 */
78
79 static inline VkResult
80 vk_error(VkResult error)
81 {
82 #ifdef DEBUG
83 fprintf(stderr, "vk_error: %x\n", error);
84 #endif
85
86 return error;
87 }
88
89 void __anv_finishme(const char *file, int line, const char *format, ...);
90
91 /**
92 * Print a FINISHME message, including its source location.
93 */
94 #define anv_finishme(format, ...) \
95 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
96
97 #define stub_return(v) \
98 do { \
99 anv_finishme("stub %s", __func__); \
100 return (v); \
101 } while (0)
102
103 #define stub(v) \
104 do { \
105 anv_finishme("stub %s", __func__); \
106 return; \
107 } while (0)
108
109 /**
110 * A dynamically growable, circular buffer. Elements are added at head and
111 * removed from tail. head and tail are free-running uint32_t indices and we
112 * only compute the modulo with size when accessing the array. This way,
113 * number of bytes in the queue is always head - tail, even in case of
114 * wraparound.
115 */
116
117 struct anv_vector {
118 uint32_t head;
119 uint32_t tail;
120 uint32_t element_size;
121 uint32_t size;
122 void *data;
123 };
124
125 int anv_vector_init(struct anv_vector *queue, uint32_t element_size, uint32_t size);
126 void *anv_vector_add(struct anv_vector *queue);
127 void *anv_vector_remove(struct anv_vector *queue);
128
129 static inline int
130 anv_vector_length(struct anv_vector *queue)
131 {
132 return (queue->head - queue->tail) / queue->element_size;
133 }
134
135 static inline void
136 anv_vector_finish(struct anv_vector *queue)
137 {
138 free(queue->data);
139 }
140
141 #define anv_vector_foreach(elem, queue) \
142 static_assert(__builtin_types_compatible_p(__typeof__(queue), struct anv_vector *), ""); \
143 for (uint32_t __anv_vector_offset = (queue)->tail; \
144 elem = (queue)->data + (__anv_vector_offset & ((queue)->size - 1)), __anv_vector_offset < (queue)->head; \
145 __anv_vector_offset += (queue)->element_size)
146
147 struct anv_bo {
148 int gem_handle;
149 uint32_t index;
150 uint64_t offset;
151 uint64_t size;
152
153 /* This field is here for the benefit of the aub dumper. It can (and for
154 * userptr bos it must) be set to the cpu map of the buffer. Destroying
155 * the bo won't clean up the mmap, it's still the responsibility of the bo
156 * user to do that. */
157 void *map;
158 };
159
160 /* Represents a lock-free linked list of "free" things. This is used by
161 * both the block pool and the state pools. Unfortunately, in order to
162 * solve the ABA problem, we can't use a single uint32_t head.
163 */
164 union anv_free_list {
165 struct {
166 uint32_t offset;
167
168 /* A simple count that is incremented every time the head changes. */
169 uint32_t count;
170 };
171 uint64_t u64;
172 };
173
174 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
175
176 struct anv_block_pool {
177 struct anv_device *device;
178
179 struct anv_bo bo;
180 void *map;
181 int fd;
182 uint32_t size;
183
184 /**
185 * Array of mmaps and gem handles owned by the block pool, reclaimed when
186 * the block pool is destroyed.
187 */
188 struct anv_vector mmap_cleanups;
189
190 uint32_t block_size;
191
192 uint32_t next_block;
193 union anv_free_list free_list;
194 };
195
196 struct anv_block_state {
197 union {
198 struct {
199 uint32_t next;
200 uint32_t end;
201 };
202 uint64_t u64;
203 };
204 };
205
206 struct anv_state {
207 uint32_t offset;
208 uint32_t alloc_size;
209 void *map;
210 };
211
212 struct anv_fixed_size_state_pool {
213 size_t state_size;
214 union anv_free_list free_list;
215 struct anv_block_state block;
216 };
217
218 #define ANV_MIN_STATE_SIZE_LOG2 6
219 #define ANV_MAX_STATE_SIZE_LOG2 10
220
221 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2)
222
223 struct anv_state_pool {
224 struct anv_block_pool *block_pool;
225 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
226 };
227
228 struct anv_state_stream {
229 struct anv_block_pool *block_pool;
230 uint32_t next;
231 uint32_t current_block;
232 uint32_t end;
233 };
234
235 void anv_block_pool_init(struct anv_block_pool *pool,
236 struct anv_device *device, uint32_t block_size);
237 void anv_block_pool_init_slave(struct anv_block_pool *pool,
238 struct anv_block_pool *master_pool,
239 uint32_t num_blocks);
240 void anv_block_pool_finish(struct anv_block_pool *pool);
241 uint32_t anv_block_pool_alloc(struct anv_block_pool *pool);
242 void anv_block_pool_free(struct anv_block_pool *pool, uint32_t offset);
243 void anv_state_pool_init(struct anv_state_pool *pool,
244 struct anv_block_pool *block_pool);
245 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
246 size_t state_size, size_t alignment);
247 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
248 void anv_state_stream_init(struct anv_state_stream *stream,
249 struct anv_block_pool *block_pool);
250 void anv_state_stream_finish(struct anv_state_stream *stream);
251 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
252 uint32_t size, uint32_t alignment);
253
254 /**
255 * Implements a pool of re-usable BOs. The interface is identical to that
256 * of block_pool except that each block is its own BO.
257 */
258 struct anv_bo_pool {
259 struct anv_device *device;
260
261 uint32_t bo_size;
262
263 void *free_list;
264 };
265
266 void anv_bo_pool_init(struct anv_bo_pool *pool,
267 struct anv_device *device, uint32_t block_size);
268 void anv_bo_pool_finish(struct anv_bo_pool *pool);
269 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo);
270 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
271
272 struct anv_object;
273 struct anv_device;
274
275 typedef void (*anv_object_destructor_cb)(struct anv_device *,
276 struct anv_object *,
277 VkObjectType);
278
279 struct anv_object {
280 anv_object_destructor_cb destructor;
281 };
282
283 struct anv_physical_device {
284 struct anv_instance * instance;
285 uint32_t chipset_id;
286 bool no_hw;
287 const char * path;
288 const char * name;
289 const struct brw_device_info * info;
290 };
291
292 struct anv_instance {
293 void * pAllocUserData;
294 PFN_vkAllocFunction pfnAlloc;
295 PFN_vkFreeFunction pfnFree;
296 uint32_t apiVersion;
297 uint32_t physicalDeviceCount;
298 struct anv_physical_device physicalDevice;
299 };
300
301 struct anv_meta_state {
302 struct {
303 VkPipeline pipeline;
304 } clear;
305
306 struct {
307 VkPipeline pipeline;
308 VkDescriptorSetLayout ds_layout;
309 } blit;
310
311 struct {
312 VkDynamicRsState rs_state;
313 VkDynamicCbState cb_state;
314 VkDynamicDsState ds_state;
315 } shared;
316 };
317
318 struct anv_device {
319 struct anv_instance * instance;
320 uint32_t chipset_id;
321 struct brw_device_info info;
322 int context_id;
323 int fd;
324 bool no_hw;
325 bool dump_aub;
326
327 struct anv_bo_pool batch_bo_pool;
328
329 struct anv_block_pool dynamic_state_block_pool;
330 struct anv_state_pool dynamic_state_pool;
331
332 struct anv_block_pool instruction_block_pool;
333 struct anv_block_pool surface_state_block_pool;
334 struct anv_block_pool binding_table_block_pool;
335 struct anv_state_pool surface_state_pool;
336
337 struct anv_meta_state meta_state;
338
339 struct anv_state float_border_colors;
340 struct anv_state uint32_border_colors;
341
342 struct anv_compiler * compiler;
343 struct anv_aub_writer * aub_writer;
344 pthread_mutex_t mutex;
345 };
346
347 struct anv_queue {
348 struct anv_device * device;
349
350 struct anv_state_pool * pool;
351
352 /**
353 * Serial number of the most recently completed batch executed on the
354 * engine.
355 */
356 struct anv_state completed_serial;
357
358 /**
359 * The next batch submitted to the engine will be assigned this serial
360 * number.
361 */
362 uint32_t next_serial;
363
364 uint32_t last_collected_serial;
365 };
366
367 void *
368 anv_device_alloc(struct anv_device * device,
369 size_t size,
370 size_t alignment,
371 VkSystemAllocType allocType);
372
373 void
374 anv_device_free(struct anv_device * device,
375 void * mem);
376
377 void* anv_gem_mmap(struct anv_device *device,
378 uint32_t gem_handle, uint64_t offset, uint64_t size);
379 void anv_gem_munmap(void *p, uint64_t size);
380 uint32_t anv_gem_create(struct anv_device *device, size_t size);
381 void anv_gem_close(struct anv_device *device, int gem_handle);
382 int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
383 int anv_gem_wait(struct anv_device *device, int gem_handle, int64_t *timeout_ns);
384 int anv_gem_execbuffer(struct anv_device *device,
385 struct drm_i915_gem_execbuffer2 *execbuf);
386 int anv_gem_set_tiling(struct anv_device *device, int gem_handle,
387 uint32_t stride, uint32_t tiling);
388 int anv_gem_create_context(struct anv_device *device);
389 int anv_gem_destroy_context(struct anv_device *device, int context);
390 int anv_gem_get_param(int fd, uint32_t param);
391 int anv_gem_get_aperture(struct anv_device *device, uint64_t *size);
392 int anv_gem_handle_to_fd(struct anv_device *device, int gem_handle);
393 int anv_gem_fd_to_handle(struct anv_device *device, int fd);
394 int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
395
396 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
397
398 struct anv_reloc_list {
399 size_t num_relocs;
400 size_t array_length;
401 struct drm_i915_gem_relocation_entry * relocs;
402 struct anv_bo ** reloc_bos;
403 };
404
405 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
406 struct anv_device *device);
407 void anv_reloc_list_finish(struct anv_reloc_list *list,
408 struct anv_device *device);
409
410 struct anv_batch_bo {
411 struct anv_bo bo;
412
413 /* Bytes actually consumed in this batch BO */
414 size_t length;
415
416 /* These offsets reference the per-batch reloc list */
417 size_t first_reloc;
418 size_t num_relocs;
419
420 struct anv_batch_bo * prev_batch_bo;
421 };
422
423 struct anv_batch {
424 struct anv_device * device;
425
426 void * start;
427 void * end;
428 void * next;
429
430 struct anv_reloc_list relocs;
431
432 /* This callback is called (with the associated user data) in the event
433 * that the batch runs out of space.
434 */
435 VkResult (*extend_cb)(struct anv_batch *, void *);
436 void * user_data;
437 };
438
439 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
440 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
441 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
442 void *location, struct anv_bo *bo, uint32_t offset);
443
444 struct anv_address {
445 struct anv_bo *bo;
446 uint32_t offset;
447 };
448
449 #define __gen_address_type struct anv_address
450 #define __gen_user_data struct anv_batch
451
452 static inline uint64_t
453 __gen_combine_address(struct anv_batch *batch, void *location,
454 const struct anv_address address, uint32_t delta)
455 {
456 if (address.bo == NULL) {
457 return delta;
458 } else {
459 assert(batch->start <= location && location < batch->end);
460
461 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
462 }
463 }
464
465 #include "gen7_pack.h"
466 #include "gen75_pack.h"
467 #undef GEN8_3DSTATE_MULTISAMPLE
468 #include "gen8_pack.h"
469
470 #define anv_batch_emit(batch, cmd, ...) do { \
471 struct cmd __template = { \
472 cmd ## _header, \
473 __VA_ARGS__ \
474 }; \
475 void *__dst = anv_batch_emit_dwords(batch, cmd ## _length); \
476 cmd ## _pack(batch, __dst, &__template); \
477 } while (0)
478
479 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
480 struct cmd __template = { \
481 cmd ## _header, \
482 .DwordLength = n - cmd ## _length_bias, \
483 __VA_ARGS__ \
484 }; \
485 void *__dst = anv_batch_emit_dwords(batch, n); \
486 cmd ## _pack(batch, __dst, &__template); \
487 __dst; \
488 })
489
490 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
491 do { \
492 uint32_t *dw; \
493 \
494 assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
495 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
496 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
497 dw[i] = (dwords0)[i] | (dwords1)[i]; \
498 } while (0)
499
500 #define GEN8_MOCS { \
501 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
502 .TargetCache = L3DefertoPATforLLCeLLCselection, \
503 .AgeforQUADLRU = 0 \
504 }
505
506 struct anv_device_memory {
507 struct anv_bo bo;
508 VkDeviceSize map_size;
509 void * map;
510 };
511
512 struct anv_dynamic_vp_state {
513 struct anv_object base;
514 struct anv_state sf_clip_vp;
515 struct anv_state cc_vp;
516 struct anv_state scissor;
517 };
518
519 struct anv_dynamic_rs_state {
520 uint32_t state_sf[GEN8_3DSTATE_SF_length];
521 uint32_t state_raster[GEN8_3DSTATE_RASTER_length];
522 };
523
524 struct anv_dynamic_ds_state {
525 uint32_t state_wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
526 uint32_t state_color_calc[GEN8_COLOR_CALC_STATE_length];
527 };
528
529 struct anv_dynamic_cb_state {
530 uint32_t state_color_calc[GEN8_COLOR_CALC_STATE_length];
531
532 };
533
534 struct anv_descriptor_slot {
535 int8_t dynamic_slot;
536 uint8_t index;
537 };
538
539 struct anv_descriptor_set_layout {
540 struct {
541 uint32_t surface_count;
542 struct anv_descriptor_slot *surface_start;
543 uint32_t sampler_count;
544 struct anv_descriptor_slot *sampler_start;
545 } stage[VK_NUM_SHADER_STAGE];
546
547 uint32_t count;
548 uint32_t num_dynamic_buffers;
549 uint32_t shader_stages;
550 struct anv_descriptor_slot entries[0];
551 };
552
553 struct anv_descriptor {
554 struct anv_sampler *sampler;
555 struct anv_surface_view *view;
556 };
557
558 struct anv_descriptor_set {
559 struct anv_descriptor descriptors[0];
560 };
561
562 #define MAX_VBS 32
563 #define MAX_SETS 8
564 #define MAX_RTS 8
565
566 struct anv_pipeline_layout {
567 struct {
568 struct anv_descriptor_set_layout *layout;
569 uint32_t surface_start[VK_NUM_SHADER_STAGE];
570 uint32_t sampler_start[VK_NUM_SHADER_STAGE];
571 } set[MAX_SETS];
572
573 uint32_t num_sets;
574
575 struct {
576 uint32_t surface_count;
577 uint32_t sampler_count;
578 } stage[VK_NUM_SHADER_STAGE];
579 };
580
581 struct anv_buffer {
582 struct anv_device * device;
583 VkDeviceSize size;
584
585 /* Set when bound */
586 struct anv_bo * bo;
587 VkDeviceSize offset;
588 };
589
590 #define ANV_CMD_BUFFER_PIPELINE_DIRTY (1 << 0)
591 #define ANV_CMD_BUFFER_RS_DIRTY (1 << 2)
592 #define ANV_CMD_BUFFER_DS_DIRTY (1 << 3)
593 #define ANV_CMD_BUFFER_CB_DIRTY (1 << 4)
594
595 struct anv_vertex_binding {
596 struct anv_buffer * buffer;
597 VkDeviceSize offset;
598 };
599
600 struct anv_descriptor_set_binding {
601 struct anv_descriptor_set * set;
602 uint32_t dynamic_offsets[128];
603 };
604
605 struct anv_cmd_buffer {
606 struct anv_object base;
607 struct anv_device * device;
608
609 struct drm_i915_gem_execbuffer2 execbuf;
610 struct drm_i915_gem_exec_object2 * exec2_objects;
611 struct anv_bo ** exec2_bos;
612 uint32_t exec2_array_length;
613 bool need_reloc;
614 uint32_t serial;
615
616 uint32_t bo_count;
617 struct anv_batch batch;
618 struct anv_batch_bo * last_batch_bo;
619 struct anv_batch_bo * surface_batch_bo;
620 uint32_t surface_next;
621 struct anv_reloc_list surface_relocs;
622 struct anv_state_stream binding_table_state_stream;
623 struct anv_state_stream surface_state_stream;
624 struct anv_state_stream dynamic_state_stream;
625
626 /* State required while building cmd buffer */
627 uint32_t vb_dirty;
628 uint32_t dirty;
629 uint32_t descriptors_dirty;
630 struct anv_pipeline * pipeline;
631 struct anv_framebuffer * framebuffer;
632 struct anv_dynamic_rs_state * rs_state;
633 struct anv_dynamic_ds_state * ds_state;
634 struct anv_dynamic_vp_state * vp_state;
635 struct anv_dynamic_cb_state * cb_state;
636 struct anv_vertex_binding vertex_bindings[MAX_VBS];
637 struct anv_descriptor_set_binding descriptors[MAX_SETS];
638 };
639
640 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
641 void anv_aub_writer_destroy(struct anv_aub_writer *writer);
642
643 struct anv_fence {
644 struct anv_object base;
645 struct anv_bo bo;
646 struct drm_i915_gem_execbuffer2 execbuf;
647 struct drm_i915_gem_exec_object2 exec2_objects[1];
648 bool ready;
649 };
650
651 struct anv_shader {
652 uint32_t size;
653 char data[0];
654 };
655
656 struct anv_pipeline {
657 struct anv_object base;
658 struct anv_device * device;
659 struct anv_batch batch;
660 uint32_t batch_data[256];
661 struct anv_shader * shaders[VK_NUM_SHADER_STAGE];
662 struct anv_pipeline_layout * layout;
663 bool use_repclear;
664
665 struct brw_vs_prog_data vs_prog_data;
666 struct brw_wm_prog_data wm_prog_data;
667 struct brw_gs_prog_data gs_prog_data;
668 struct brw_stage_prog_data * prog_data[VK_NUM_SHADER_STAGE];
669 struct {
670 uint32_t vs_start;
671 uint32_t vs_size;
672 uint32_t nr_vs_entries;
673 uint32_t gs_start;
674 uint32_t gs_size;
675 uint32_t nr_gs_entries;
676 } urb;
677
678 struct anv_bo vs_scratch_bo;
679 struct anv_bo ps_scratch_bo;
680 struct anv_bo gs_scratch_bo;
681
682 uint32_t active_stages;
683 struct anv_state_stream program_stream;
684 struct anv_state blend_state;
685 uint32_t vs_simd8;
686 uint32_t ps_simd8;
687 uint32_t ps_simd16;
688 uint32_t gs_vec4;
689 uint32_t gs_vertex_count;
690
691 uint32_t vb_used;
692 uint32_t binding_stride[MAX_VBS];
693
694 uint32_t state_sf[GEN8_3DSTATE_SF_length];
695 uint32_t state_raster[GEN8_3DSTATE_RASTER_length];
696 uint32_t state_wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
697 };
698
699 struct anv_pipeline_create_info {
700 bool use_repclear;
701 bool disable_viewport;
702 bool disable_scissor;
703 bool disable_vs;
704 bool use_rectlist;
705 };
706
707 VkResult
708 anv_pipeline_create(VkDevice device,
709 const VkGraphicsPipelineCreateInfo *pCreateInfo,
710 const struct anv_pipeline_create_info *extra,
711 VkPipeline *pPipeline);
712
713 struct anv_compiler *anv_compiler_create(struct anv_device *device);
714 void anv_compiler_destroy(struct anv_compiler *compiler);
715 int anv_compiler_run(struct anv_compiler *compiler, struct anv_pipeline *pipeline);
716 void anv_compiler_free(struct anv_pipeline *pipeline);
717
718 struct anv_format {
719 const char * name;
720 uint16_t format;
721 uint8_t cpp;
722 uint8_t channels;
723 bool has_stencil;
724 };
725
726 const struct anv_format *
727 anv_format_for_vk_format(VkFormat format);
728
729 struct anv_image {
730 VkImageType type;
731 VkExtent3D extent;
732 VkFormat format;
733 uint32_t tile_mode;
734 VkDeviceSize size;
735 uint32_t alignment;
736 uint32_t stride;
737
738 uint32_t stencil_offset;
739 uint32_t stencil_stride;
740
741 /* Set when bound */
742 struct anv_bo * bo;
743 VkDeviceSize offset;
744
745 struct anv_swap_chain * swap_chain;
746 };
747
748 struct anv_surface_view {
749 struct anv_state surface_state;
750 struct anv_bo * bo;
751 uint32_t offset;
752 uint32_t range;
753 VkExtent3D extent;
754 VkFormat format;
755 };
756
757 struct anv_image_create_info {
758 uint32_t tile_mode;
759 };
760
761 VkResult anv_image_create(VkDevice _device,
762 const VkImageCreateInfo *pCreateInfo,
763 const struct anv_image_create_info *extra,
764 VkImage *pImage);
765
766 void anv_image_view_init(struct anv_surface_view *view,
767 struct anv_device *device,
768 const VkImageViewCreateInfo* pCreateInfo,
769 struct anv_cmd_buffer *cmd_buffer);
770
771 void anv_color_attachment_view_init(struct anv_surface_view *view,
772 struct anv_device *device,
773 const VkColorAttachmentViewCreateInfo* pCreateInfo,
774 struct anv_cmd_buffer *cmd_buffer);
775
776 struct anv_sampler {
777 uint32_t state[4];
778 };
779
780 struct anv_depth_stencil_view {
781 struct anv_bo * bo;
782
783 uint32_t depth_offset;
784 uint32_t depth_stride;
785 uint32_t depth_format;
786
787 uint32_t stencil_offset;
788 uint32_t stencil_stride;
789 };
790
791 struct anv_framebuffer {
792 struct anv_object base;
793 uint32_t color_attachment_count;
794 const struct anv_surface_view * color_attachments[MAX_RTS];
795 const struct anv_depth_stencil_view * depth_stencil;
796
797 uint32_t sample_count;
798 uint32_t width;
799 uint32_t height;
800 uint32_t layers;
801
802 /* Viewport for clears */
803 VkDynamicVpState vp_state;
804 };
805
806 struct anv_render_pass_layer {
807 VkAttachmentLoadOp color_load_op;
808 VkClearColor clear_color;
809 };
810
811 struct anv_render_pass {
812 VkRect render_area;
813
814 uint32_t num_clear_layers;
815 uint32_t num_layers;
816 struct anv_render_pass_layer layers[0];
817 };
818
819 void anv_device_init_meta(struct anv_device *device);
820
821 void
822 anv_cmd_buffer_clear(struct anv_cmd_buffer *cmd_buffer,
823 struct anv_render_pass *pass);
824
825 void *
826 anv_lookup_entrypoint(const char *name);
827
828 #ifdef __cplusplus
829 }
830 #endif