vk/meta: Clean up temporary objects
[mesa.git] / src / vulkan / private.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #pragma once
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdbool.h>
29 #include <pthread.h>
30 #include <assert.h>
31 #include <i915_drm.h>
32
33 #include "brw_device_info.h"
34 #include "util/macros.h"
35
36 #define VK_PROTOTYPES
37 #include <vulkan/vulkan.h>
38 #include <vulkan/vulkan_intel.h>
39 #include <vulkan/vk_wsi_lunarg.h>
40
41 #include "entrypoints.h"
42
43 #include "brw_context.h"
44
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48
49 static inline uint32_t
50 ALIGN_U32(uint32_t v, uint32_t a)
51 {
52 return (v + a - 1) & ~(a - 1);
53 }
54
55 static inline int32_t
56 ALIGN_I32(int32_t v, int32_t a)
57 {
58 return (v + a - 1) & ~(a - 1);
59 }
60
61 #define for_each_bit(b, dword) \
62 for (uint32_t __dword = (dword); \
63 (b) = __builtin_ffs(__dword) - 1, __dword; \
64 __dword &= ~(1 << (b)))
65
66 /* Define no kernel as 1, since that's an illegal offset for a kernel */
67 #define NO_KERNEL 1
68
69 struct anv_common {
70 VkStructureType sType;
71 const void* pNext;
72 };
73
74 /* Whenever we generate an error, pass it through this function. Useful for
75 * debugging, where we can break on it. Only call at error site, not when
76 * propagating errors. Might be useful to plug in a stack trace here.
77 */
78
79 static inline VkResult
80 vk_error(VkResult error)
81 {
82 #ifdef DEBUG
83 fprintf(stderr, "vk_error: %x\n", error);
84 #endif
85
86 return error;
87 }
88
89 void __anv_finishme(const char *file, int line, const char *format, ...);
90
91 /**
92 * Print a FINISHME message, including its source location.
93 */
94 #define anv_finishme(format, ...) \
95 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
96
97 #define stub_return(v) \
98 do { \
99 anv_finishme("stub %s", __func__); \
100 return (v); \
101 } while (0)
102
103 #define stub(v) \
104 do { \
105 anv_finishme("stub %s", __func__); \
106 return; \
107 } while (0)
108
109 /**
110 * A dynamically growable, circular buffer. Elements are added at head and
111 * removed from tail. head and tail are free-running uint32_t indices and we
112 * only compute the modulo with size when accessing the array. This way,
113 * number of bytes in the queue is always head - tail, even in case of
114 * wraparound.
115 */
116
117 struct anv_vector {
118 uint32_t head;
119 uint32_t tail;
120 uint32_t element_size;
121 uint32_t size;
122 void *data;
123 };
124
125 int anv_vector_init(struct anv_vector *queue, uint32_t element_size, uint32_t size);
126 void *anv_vector_add(struct anv_vector *queue);
127 void *anv_vector_remove(struct anv_vector *queue);
128
129 static inline int
130 anv_vector_length(struct anv_vector *queue)
131 {
132 return (queue->head - queue->tail) / queue->element_size;
133 }
134
135 static inline void
136 anv_vector_finish(struct anv_vector *queue)
137 {
138 free(queue->data);
139 }
140
141 #define anv_vector_foreach(elem, queue) \
142 static_assert(__builtin_types_compatible_p(__typeof__(queue), struct anv_vector *), ""); \
143 for (uint32_t __anv_vector_offset = (queue)->tail; \
144 elem = (queue)->data + (__anv_vector_offset & ((queue)->size - 1)), __anv_vector_offset < (queue)->head; \
145 __anv_vector_offset += (queue)->element_size)
146
147 struct anv_bo {
148 int gem_handle;
149 uint32_t index;
150 uint64_t offset;
151 uint64_t size;
152
153 /* This field is here for the benefit of the aub dumper. It can (and for
154 * userptr bos it must) be set to the cpu map of the buffer. Destroying
155 * the bo won't clean up the mmap, it's still the responsibility of the bo
156 * user to do that. */
157 void *map;
158 };
159
160 /* Represents a lock-free linked list of "free" things. This is used by
161 * both the block pool and the state pools. Unfortunately, in order to
162 * solve the ABA problem, we can't use a single uint32_t head.
163 */
164 union anv_free_list {
165 struct {
166 uint32_t offset;
167
168 /* A simple count that is incremented every time the head changes. */
169 uint32_t count;
170 };
171 uint64_t u64;
172 };
173
174 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
175
176 struct anv_block_pool {
177 struct anv_device *device;
178
179 struct anv_bo bo;
180 void *map;
181 int fd;
182 uint32_t size;
183
184 /**
185 * Array of mmaps and gem handles owned by the block pool, reclaimed when
186 * the block pool is destroyed.
187 */
188 struct anv_vector mmap_cleanups;
189
190 uint32_t block_size;
191
192 uint32_t next_block;
193 union anv_free_list free_list;
194 };
195
196 struct anv_block_state {
197 union {
198 struct {
199 uint32_t next;
200 uint32_t end;
201 };
202 uint64_t u64;
203 };
204 };
205
206 struct anv_state {
207 uint32_t offset;
208 uint32_t alloc_size;
209 void *map;
210 };
211
212 struct anv_fixed_size_state_pool {
213 size_t state_size;
214 union anv_free_list free_list;
215 struct anv_block_state block;
216 };
217
218 #define ANV_MIN_STATE_SIZE_LOG2 6
219 #define ANV_MAX_STATE_SIZE_LOG2 10
220
221 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2)
222
223 struct anv_state_pool {
224 struct anv_block_pool *block_pool;
225 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
226 };
227
228 struct anv_state_stream {
229 struct anv_block_pool *block_pool;
230 uint32_t next;
231 uint32_t current_block;
232 uint32_t end;
233 };
234
235 void anv_block_pool_init(struct anv_block_pool *pool,
236 struct anv_device *device, uint32_t block_size);
237 void anv_block_pool_finish(struct anv_block_pool *pool);
238 uint32_t anv_block_pool_alloc(struct anv_block_pool *pool);
239 void anv_block_pool_free(struct anv_block_pool *pool, uint32_t offset);
240 void anv_state_pool_init(struct anv_state_pool *pool,
241 struct anv_block_pool *block_pool);
242 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
243 size_t state_size, size_t alignment);
244 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
245 void anv_state_stream_init(struct anv_state_stream *stream,
246 struct anv_block_pool *block_pool);
247 void anv_state_stream_finish(struct anv_state_stream *stream);
248 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
249 uint32_t size, uint32_t alignment);
250
251 /**
252 * Implements a pool of re-usable BOs. The interface is identical to that
253 * of block_pool except that each block is its own BO.
254 */
255 struct anv_bo_pool {
256 struct anv_device *device;
257
258 uint32_t bo_size;
259
260 void *free_list;
261 };
262
263 void anv_bo_pool_init(struct anv_bo_pool *pool,
264 struct anv_device *device, uint32_t block_size);
265 void anv_bo_pool_finish(struct anv_bo_pool *pool);
266 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo);
267 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
268
269 struct anv_object;
270 struct anv_device;
271
272 typedef void (*anv_object_destructor_cb)(struct anv_device *,
273 struct anv_object *,
274 VkObjectType);
275
276 struct anv_object {
277 anv_object_destructor_cb destructor;
278 };
279
280 struct anv_physical_device {
281 struct anv_instance * instance;
282 uint32_t chipset_id;
283 bool no_hw;
284 const char * path;
285 const char * name;
286 const struct brw_device_info * info;
287 };
288
289 struct anv_instance {
290 void * pAllocUserData;
291 PFN_vkAllocFunction pfnAlloc;
292 PFN_vkFreeFunction pfnFree;
293 uint32_t apiVersion;
294 uint32_t physicalDeviceCount;
295 struct anv_physical_device physicalDevice;
296 };
297
298 struct anv_meta_state {
299 struct {
300 VkPipeline pipeline;
301 } clear;
302
303 struct {
304 VkPipeline pipeline;
305 VkPipelineLayout pipeline_layout;
306 VkDescriptorSetLayout ds_layout;
307 } blit;
308
309 struct {
310 VkDynamicRsState rs_state;
311 VkDynamicCbState cb_state;
312 VkDynamicDsState ds_state;
313 } shared;
314 };
315
316 struct anv_device {
317 struct anv_instance * instance;
318 uint32_t chipset_id;
319 struct brw_device_info info;
320 int context_id;
321 int fd;
322 bool no_hw;
323 bool dump_aub;
324
325 struct anv_bo_pool batch_bo_pool;
326
327 struct anv_block_pool dynamic_state_block_pool;
328 struct anv_state_pool dynamic_state_pool;
329
330 struct anv_block_pool instruction_block_pool;
331 struct anv_block_pool surface_state_block_pool;
332 struct anv_state_pool surface_state_pool;
333
334 struct anv_meta_state meta_state;
335
336 struct anv_state float_border_colors;
337 struct anv_state uint32_border_colors;
338
339 struct anv_compiler * compiler;
340 struct anv_aub_writer * aub_writer;
341 pthread_mutex_t mutex;
342 };
343
344 struct anv_queue {
345 struct anv_device * device;
346
347 struct anv_state_pool * pool;
348
349 /**
350 * Serial number of the most recently completed batch executed on the
351 * engine.
352 */
353 struct anv_state completed_serial;
354
355 /**
356 * The next batch submitted to the engine will be assigned this serial
357 * number.
358 */
359 uint32_t next_serial;
360
361 uint32_t last_collected_serial;
362 };
363
364 void *
365 anv_device_alloc(struct anv_device * device,
366 size_t size,
367 size_t alignment,
368 VkSystemAllocType allocType);
369
370 void
371 anv_device_free(struct anv_device * device,
372 void * mem);
373
374 void* anv_gem_mmap(struct anv_device *device,
375 uint32_t gem_handle, uint64_t offset, uint64_t size);
376 void anv_gem_munmap(void *p, uint64_t size);
377 uint32_t anv_gem_create(struct anv_device *device, size_t size);
378 void anv_gem_close(struct anv_device *device, int gem_handle);
379 int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
380 int anv_gem_wait(struct anv_device *device, int gem_handle, int64_t *timeout_ns);
381 int anv_gem_execbuffer(struct anv_device *device,
382 struct drm_i915_gem_execbuffer2 *execbuf);
383 int anv_gem_set_tiling(struct anv_device *device, int gem_handle,
384 uint32_t stride, uint32_t tiling);
385 int anv_gem_create_context(struct anv_device *device);
386 int anv_gem_destroy_context(struct anv_device *device, int context);
387 int anv_gem_get_param(int fd, uint32_t param);
388 int anv_gem_get_aperture(struct anv_device *device, uint64_t *size);
389 int anv_gem_handle_to_fd(struct anv_device *device, int gem_handle);
390 int anv_gem_fd_to_handle(struct anv_device *device, int fd);
391 int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
392
393 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
394
395 struct anv_reloc_list {
396 size_t num_relocs;
397 size_t array_length;
398 struct drm_i915_gem_relocation_entry * relocs;
399 struct anv_bo ** reloc_bos;
400 };
401
402 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
403 struct anv_device *device);
404 void anv_reloc_list_finish(struct anv_reloc_list *list,
405 struct anv_device *device);
406
407 struct anv_batch_bo {
408 struct anv_bo bo;
409
410 /* Bytes actually consumed in this batch BO */
411 size_t length;
412
413 /* These offsets reference the per-batch reloc list */
414 size_t first_reloc;
415 size_t num_relocs;
416
417 struct anv_batch_bo * prev_batch_bo;
418 };
419
420 struct anv_batch {
421 struct anv_device * device;
422
423 void * start;
424 void * end;
425 void * next;
426
427 struct anv_reloc_list relocs;
428
429 /* This callback is called (with the associated user data) in the event
430 * that the batch runs out of space.
431 */
432 VkResult (*extend_cb)(struct anv_batch *, void *);
433 void * user_data;
434 };
435
436 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
437 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
438 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
439 void *location, struct anv_bo *bo, uint32_t offset);
440
441 struct anv_address {
442 struct anv_bo *bo;
443 uint32_t offset;
444 };
445
446 #define __gen_address_type struct anv_address
447 #define __gen_user_data struct anv_batch
448
449 static inline uint64_t
450 __gen_combine_address(struct anv_batch *batch, void *location,
451 const struct anv_address address, uint32_t delta)
452 {
453 if (address.bo == NULL) {
454 return delta;
455 } else {
456 assert(batch->start <= location && location < batch->end);
457
458 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
459 }
460 }
461
462 #include "gen7_pack.h"
463 #include "gen75_pack.h"
464 #undef GEN8_3DSTATE_MULTISAMPLE
465 #include "gen8_pack.h"
466
467 #define anv_batch_emit(batch, cmd, ...) do { \
468 struct cmd __template = { \
469 cmd ## _header, \
470 __VA_ARGS__ \
471 }; \
472 void *__dst = anv_batch_emit_dwords(batch, cmd ## _length); \
473 cmd ## _pack(batch, __dst, &__template); \
474 } while (0)
475
476 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
477 struct cmd __template = { \
478 cmd ## _header, \
479 .DwordLength = n - cmd ## _length_bias, \
480 __VA_ARGS__ \
481 }; \
482 void *__dst = anv_batch_emit_dwords(batch, n); \
483 cmd ## _pack(batch, __dst, &__template); \
484 __dst; \
485 })
486
487 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
488 do { \
489 uint32_t *dw; \
490 \
491 assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
492 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
493 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
494 dw[i] = (dwords0)[i] | (dwords1)[i]; \
495 } while (0)
496
497 #define GEN8_MOCS { \
498 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
499 .TargetCache = L3DefertoPATforLLCeLLCselection, \
500 .AgeforQUADLRU = 0 \
501 }
502
503 struct anv_device_memory {
504 struct anv_bo bo;
505 VkDeviceSize map_size;
506 void * map;
507 };
508
509 struct anv_dynamic_vp_state {
510 struct anv_object base;
511 struct anv_state sf_clip_vp;
512 struct anv_state cc_vp;
513 struct anv_state scissor;
514 };
515
516 struct anv_dynamic_rs_state {
517 uint32_t state_sf[GEN8_3DSTATE_SF_length];
518 uint32_t state_raster[GEN8_3DSTATE_RASTER_length];
519 };
520
521 struct anv_dynamic_ds_state {
522 uint32_t state_wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
523 uint32_t state_color_calc[GEN8_COLOR_CALC_STATE_length];
524 };
525
526 struct anv_dynamic_cb_state {
527 uint32_t state_color_calc[GEN8_COLOR_CALC_STATE_length];
528
529 };
530
531 struct anv_descriptor_slot {
532 int8_t dynamic_slot;
533 uint8_t index;
534 };
535
536 struct anv_descriptor_set_layout {
537 struct {
538 uint32_t surface_count;
539 struct anv_descriptor_slot *surface_start;
540 uint32_t sampler_count;
541 struct anv_descriptor_slot *sampler_start;
542 } stage[VK_NUM_SHADER_STAGE];
543
544 uint32_t count;
545 uint32_t num_dynamic_buffers;
546 uint32_t shader_stages;
547 struct anv_descriptor_slot entries[0];
548 };
549
550 struct anv_descriptor {
551 struct anv_sampler *sampler;
552 struct anv_surface_view *view;
553 };
554
555 struct anv_descriptor_set {
556 struct anv_descriptor descriptors[0];
557 };
558
559 #define MAX_VBS 32
560 #define MAX_SETS 8
561 #define MAX_RTS 8
562
563 struct anv_pipeline_layout {
564 struct {
565 struct anv_descriptor_set_layout *layout;
566 uint32_t surface_start[VK_NUM_SHADER_STAGE];
567 uint32_t sampler_start[VK_NUM_SHADER_STAGE];
568 } set[MAX_SETS];
569
570 uint32_t num_sets;
571
572 struct {
573 uint32_t surface_count;
574 uint32_t sampler_count;
575 } stage[VK_NUM_SHADER_STAGE];
576 };
577
578 struct anv_buffer {
579 struct anv_device * device;
580 VkDeviceSize size;
581
582 /* Set when bound */
583 struct anv_bo * bo;
584 VkDeviceSize offset;
585 };
586
587 #define ANV_CMD_BUFFER_PIPELINE_DIRTY (1 << 0)
588 #define ANV_CMD_BUFFER_RS_DIRTY (1 << 2)
589 #define ANV_CMD_BUFFER_DS_DIRTY (1 << 3)
590 #define ANV_CMD_BUFFER_CB_DIRTY (1 << 4)
591
592 struct anv_vertex_binding {
593 struct anv_buffer * buffer;
594 VkDeviceSize offset;
595 };
596
597 struct anv_descriptor_set_binding {
598 struct anv_descriptor_set * set;
599 uint32_t dynamic_offsets[128];
600 };
601
602 struct anv_cmd_buffer {
603 struct anv_object base;
604 struct anv_device * device;
605
606 struct drm_i915_gem_execbuffer2 execbuf;
607 struct drm_i915_gem_exec_object2 * exec2_objects;
608 struct anv_bo ** exec2_bos;
609 uint32_t exec2_array_length;
610 bool need_reloc;
611 uint32_t serial;
612
613 uint32_t bo_count;
614 struct anv_batch batch;
615 struct anv_batch_bo * last_batch_bo;
616 struct anv_batch_bo * surface_batch_bo;
617 uint32_t surface_next;
618 struct anv_reloc_list surface_relocs;
619 struct anv_state_stream surface_state_stream;
620 struct anv_state_stream dynamic_state_stream;
621
622 /* State required while building cmd buffer */
623 uint32_t vb_dirty;
624 uint32_t dirty;
625 uint32_t descriptors_dirty;
626 struct anv_pipeline * pipeline;
627 struct anv_framebuffer * framebuffer;
628 struct anv_dynamic_rs_state * rs_state;
629 struct anv_dynamic_ds_state * ds_state;
630 struct anv_dynamic_vp_state * vp_state;
631 struct anv_dynamic_cb_state * cb_state;
632 struct anv_vertex_binding vertex_bindings[MAX_VBS];
633 struct anv_descriptor_set_binding descriptors[MAX_SETS];
634 };
635
636 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
637 void anv_aub_writer_destroy(struct anv_aub_writer *writer);
638
639 struct anv_fence {
640 struct anv_object base;
641 struct anv_bo bo;
642 struct drm_i915_gem_execbuffer2 execbuf;
643 struct drm_i915_gem_exec_object2 exec2_objects[1];
644 bool ready;
645 };
646
647 struct anv_shader {
648 uint32_t size;
649 char data[0];
650 };
651
652 struct anv_pipeline {
653 struct anv_object base;
654 struct anv_device * device;
655 struct anv_batch batch;
656 uint32_t batch_data[256];
657 struct anv_shader * shaders[VK_NUM_SHADER_STAGE];
658 struct anv_pipeline_layout * layout;
659 bool use_repclear;
660
661 struct brw_vs_prog_data vs_prog_data;
662 struct brw_wm_prog_data wm_prog_data;
663 struct brw_gs_prog_data gs_prog_data;
664 struct brw_stage_prog_data * prog_data[VK_NUM_SHADER_STAGE];
665 struct {
666 uint32_t vs_start;
667 uint32_t vs_size;
668 uint32_t nr_vs_entries;
669 uint32_t gs_start;
670 uint32_t gs_size;
671 uint32_t nr_gs_entries;
672 } urb;
673
674 struct anv_bo vs_scratch_bo;
675 struct anv_bo ps_scratch_bo;
676 struct anv_bo gs_scratch_bo;
677
678 uint32_t active_stages;
679 struct anv_state_stream program_stream;
680 struct anv_state blend_state;
681 uint32_t vs_simd8;
682 uint32_t ps_simd8;
683 uint32_t ps_simd16;
684 uint32_t gs_vec4;
685 uint32_t gs_vertex_count;
686
687 uint32_t vb_used;
688 uint32_t binding_stride[MAX_VBS];
689
690 uint32_t state_sf[GEN8_3DSTATE_SF_length];
691 uint32_t state_raster[GEN8_3DSTATE_RASTER_length];
692 uint32_t state_wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
693 };
694
695 struct anv_pipeline_create_info {
696 bool use_repclear;
697 bool disable_viewport;
698 bool disable_scissor;
699 bool disable_vs;
700 bool use_rectlist;
701 };
702
703 VkResult
704 anv_pipeline_create(VkDevice device,
705 const VkGraphicsPipelineCreateInfo *pCreateInfo,
706 const struct anv_pipeline_create_info *extra,
707 VkPipeline *pPipeline);
708
709 struct anv_compiler *anv_compiler_create(struct anv_device *device);
710 void anv_compiler_destroy(struct anv_compiler *compiler);
711 int anv_compiler_run(struct anv_compiler *compiler, struct anv_pipeline *pipeline);
712 void anv_compiler_free(struct anv_pipeline *pipeline);
713
714 struct anv_format {
715 const char * name;
716 uint16_t format;
717 uint8_t cpp;
718 uint8_t channels;
719 bool has_stencil;
720 };
721
722 const struct anv_format *
723 anv_format_for_vk_format(VkFormat format);
724
725 struct anv_image {
726 VkImageType type;
727 VkExtent3D extent;
728 VkFormat format;
729 uint32_t tile_mode;
730 VkDeviceSize size;
731 uint32_t alignment;
732 uint32_t stride;
733
734 uint32_t stencil_offset;
735 uint32_t stencil_stride;
736
737 /* Set when bound */
738 struct anv_bo * bo;
739 VkDeviceSize offset;
740
741 struct anv_swap_chain * swap_chain;
742
743 /**
744 * \name Alignment of miptree images, in units of pixels.
745 *
746 * These fields contain the actual alignment values, not the values the
747 * hardware expects. For example, if h_align is 4, then program the hardware
748 * with HALIGN_4.
749 *
750 * \see RENDER_SURFACE_STATE.SurfaceHorizontalAlignment
751 * \see RENDER_SURFACE_STATE.SurfaceVerticalAlignment
752 * \{
753 */
754 uint8_t h_align;
755 uint8_t v_align;
756 /** \} */
757 };
758
759 struct anv_surface_view {
760 struct anv_object base;
761
762 struct anv_state surface_state;
763 struct anv_bo * bo;
764 uint32_t offset;
765 uint32_t range;
766 VkExtent3D extent;
767 VkFormat format;
768 };
769
770 struct anv_image_create_info {
771 uint32_t tile_mode;
772 };
773
774 VkResult anv_image_create(VkDevice _device,
775 const VkImageCreateInfo *pCreateInfo,
776 const struct anv_image_create_info *extra,
777 VkImage *pImage);
778
779 void anv_image_view_init(struct anv_surface_view *view,
780 struct anv_device *device,
781 const VkImageViewCreateInfo* pCreateInfo,
782 struct anv_cmd_buffer *cmd_buffer);
783
784 void anv_color_attachment_view_init(struct anv_surface_view *view,
785 struct anv_device *device,
786 const VkColorAttachmentViewCreateInfo* pCreateInfo,
787 struct anv_cmd_buffer *cmd_buffer);
788
789 void anv_surface_view_destroy(struct anv_device *device,
790 struct anv_object *obj, VkObjectType obj_type);
791
792 struct anv_sampler {
793 uint32_t state[4];
794 };
795
796 struct anv_depth_stencil_view {
797 struct anv_bo * bo;
798
799 uint32_t depth_offset;
800 uint32_t depth_stride;
801 uint32_t depth_format;
802
803 uint32_t stencil_offset;
804 uint32_t stencil_stride;
805 };
806
807 struct anv_framebuffer {
808 struct anv_object base;
809 uint32_t color_attachment_count;
810 const struct anv_surface_view * color_attachments[MAX_RTS];
811 const struct anv_depth_stencil_view * depth_stencil;
812
813 uint32_t sample_count;
814 uint32_t width;
815 uint32_t height;
816 uint32_t layers;
817
818 /* Viewport for clears */
819 VkDynamicVpState vp_state;
820 };
821
822 struct anv_render_pass_layer {
823 VkAttachmentLoadOp color_load_op;
824 VkClearColor clear_color;
825 };
826
827 struct anv_render_pass {
828 VkRect render_area;
829
830 uint32_t num_clear_layers;
831 uint32_t num_layers;
832 struct anv_render_pass_layer layers[0];
833 };
834
835 void anv_device_init_meta(struct anv_device *device);
836 void anv_device_finish_meta(struct anv_device *device);
837
838 void
839 anv_cmd_buffer_clear(struct anv_cmd_buffer *cmd_buffer,
840 struct anv_render_pass *pass);
841
842 void *
843 anv_lookup_entrypoint(const char *name);
844
845 #ifdef __cplusplus
846 }
847 #endif