vk/queue: Embed the queue in and allocate it with the device
[mesa.git] / src / vulkan / private.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #pragma once
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdbool.h>
29 #include <pthread.h>
30 #include <assert.h>
31 #include <i915_drm.h>
32
33 #include "brw_device_info.h"
34 #include "util/macros.h"
35
36 #define VK_PROTOTYPES
37 #include <vulkan/vulkan.h>
38 #include <vulkan/vulkan_intel.h>
39 #include <vulkan/vk_wsi_lunarg.h>
40
41 #include "entrypoints.h"
42
43 #include "brw_context.h"
44
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48
49 static inline uint32_t
50 ALIGN_U32(uint32_t v, uint32_t a)
51 {
52 return (v + a - 1) & ~(a - 1);
53 }
54
55 static inline int32_t
56 ALIGN_I32(int32_t v, int32_t a)
57 {
58 return (v + a - 1) & ~(a - 1);
59 }
60
61 #define for_each_bit(b, dword) \
62 for (uint32_t __dword = (dword); \
63 (b) = __builtin_ffs(__dword) - 1, __dword; \
64 __dword &= ~(1 << (b)))
65
66 /* Define no kernel as 1, since that's an illegal offset for a kernel */
67 #define NO_KERNEL 1
68
69 struct anv_common {
70 VkStructureType sType;
71 const void* pNext;
72 };
73
74 /* Whenever we generate an error, pass it through this function. Useful for
75 * debugging, where we can break on it. Only call at error site, not when
76 * propagating errors. Might be useful to plug in a stack trace here.
77 */
78
79 static inline VkResult
80 vk_error(VkResult error)
81 {
82 #ifdef DEBUG
83 fprintf(stderr, "vk_error: %x\n", error);
84 #endif
85
86 return error;
87 }
88
89 void __anv_finishme(const char *file, int line, const char *format, ...);
90
91 /**
92 * Print a FINISHME message, including its source location.
93 */
94 #define anv_finishme(format, ...) \
95 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
96
97 #define stub_return(v) \
98 do { \
99 anv_finishme("stub %s", __func__); \
100 return (v); \
101 } while (0)
102
103 #define stub(v) \
104 do { \
105 anv_finishme("stub %s", __func__); \
106 return; \
107 } while (0)
108
109 /**
110 * A dynamically growable, circular buffer. Elements are added at head and
111 * removed from tail. head and tail are free-running uint32_t indices and we
112 * only compute the modulo with size when accessing the array. This way,
113 * number of bytes in the queue is always head - tail, even in case of
114 * wraparound.
115 */
116
117 struct anv_vector {
118 uint32_t head;
119 uint32_t tail;
120 uint32_t element_size;
121 uint32_t size;
122 void *data;
123 };
124
125 int anv_vector_init(struct anv_vector *queue, uint32_t element_size, uint32_t size);
126 void *anv_vector_add(struct anv_vector *queue);
127 void *anv_vector_remove(struct anv_vector *queue);
128
129 static inline int
130 anv_vector_length(struct anv_vector *queue)
131 {
132 return (queue->head - queue->tail) / queue->element_size;
133 }
134
135 static inline void
136 anv_vector_finish(struct anv_vector *queue)
137 {
138 free(queue->data);
139 }
140
141 #define anv_vector_foreach(elem, queue) \
142 static_assert(__builtin_types_compatible_p(__typeof__(queue), struct anv_vector *), ""); \
143 for (uint32_t __anv_vector_offset = (queue)->tail; \
144 elem = (queue)->data + (__anv_vector_offset & ((queue)->size - 1)), __anv_vector_offset < (queue)->head; \
145 __anv_vector_offset += (queue)->element_size)
146
147 struct anv_bo {
148 int gem_handle;
149 uint32_t index;
150 uint64_t offset;
151 uint64_t size;
152
153 /* This field is here for the benefit of the aub dumper. It can (and for
154 * userptr bos it must) be set to the cpu map of the buffer. Destroying
155 * the bo won't clean up the mmap, it's still the responsibility of the bo
156 * user to do that. */
157 void *map;
158 };
159
160 /* Represents a lock-free linked list of "free" things. This is used by
161 * both the block pool and the state pools. Unfortunately, in order to
162 * solve the ABA problem, we can't use a single uint32_t head.
163 */
164 union anv_free_list {
165 struct {
166 uint32_t offset;
167
168 /* A simple count that is incremented every time the head changes. */
169 uint32_t count;
170 };
171 uint64_t u64;
172 };
173
174 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
175
176 struct anv_block_pool {
177 struct anv_device *device;
178
179 struct anv_bo bo;
180 void *map;
181 int fd;
182 uint32_t size;
183
184 /**
185 * Array of mmaps and gem handles owned by the block pool, reclaimed when
186 * the block pool is destroyed.
187 */
188 struct anv_vector mmap_cleanups;
189
190 uint32_t block_size;
191
192 uint32_t next_block;
193 union anv_free_list free_list;
194 };
195
196 struct anv_block_state {
197 union {
198 struct {
199 uint32_t next;
200 uint32_t end;
201 };
202 uint64_t u64;
203 };
204 };
205
206 struct anv_state {
207 uint32_t offset;
208 uint32_t alloc_size;
209 void *map;
210 };
211
212 struct anv_fixed_size_state_pool {
213 size_t state_size;
214 union anv_free_list free_list;
215 struct anv_block_state block;
216 };
217
218 #define ANV_MIN_STATE_SIZE_LOG2 6
219 #define ANV_MAX_STATE_SIZE_LOG2 10
220
221 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2)
222
223 struct anv_state_pool {
224 struct anv_block_pool *block_pool;
225 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
226 };
227
228 struct anv_state_stream {
229 struct anv_block_pool *block_pool;
230 uint32_t next;
231 uint32_t current_block;
232 uint32_t end;
233 };
234
235 void anv_block_pool_init(struct anv_block_pool *pool,
236 struct anv_device *device, uint32_t block_size);
237 void anv_block_pool_finish(struct anv_block_pool *pool);
238 uint32_t anv_block_pool_alloc(struct anv_block_pool *pool);
239 void anv_block_pool_free(struct anv_block_pool *pool, uint32_t offset);
240 void anv_state_pool_init(struct anv_state_pool *pool,
241 struct anv_block_pool *block_pool);
242 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
243 size_t state_size, size_t alignment);
244 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
245 void anv_state_stream_init(struct anv_state_stream *stream,
246 struct anv_block_pool *block_pool);
247 void anv_state_stream_finish(struct anv_state_stream *stream);
248 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
249 uint32_t size, uint32_t alignment);
250
251 /**
252 * Implements a pool of re-usable BOs. The interface is identical to that
253 * of block_pool except that each block is its own BO.
254 */
255 struct anv_bo_pool {
256 struct anv_device *device;
257
258 uint32_t bo_size;
259
260 void *free_list;
261 };
262
263 void anv_bo_pool_init(struct anv_bo_pool *pool,
264 struct anv_device *device, uint32_t block_size);
265 void anv_bo_pool_finish(struct anv_bo_pool *pool);
266 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo);
267 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
268
269 struct anv_object;
270 struct anv_device;
271
272 typedef void (*anv_object_destructor_cb)(struct anv_device *,
273 struct anv_object *,
274 VkObjectType);
275
276 struct anv_object {
277 anv_object_destructor_cb destructor;
278 };
279
280 struct anv_physical_device {
281 struct anv_instance * instance;
282 uint32_t chipset_id;
283 bool no_hw;
284 const char * path;
285 const char * name;
286 const struct brw_device_info * info;
287 };
288
289 struct anv_instance {
290 void * pAllocUserData;
291 PFN_vkAllocFunction pfnAlloc;
292 PFN_vkFreeFunction pfnFree;
293 uint32_t apiVersion;
294 uint32_t physicalDeviceCount;
295 struct anv_physical_device physicalDevice;
296 };
297
298 struct anv_meta_state {
299 struct {
300 VkPipeline pipeline;
301 } clear;
302
303 struct {
304 VkPipeline pipeline;
305 VkPipelineLayout pipeline_layout;
306 VkDescriptorSetLayout ds_layout;
307 } blit;
308
309 struct {
310 VkDynamicRsState rs_state;
311 VkDynamicCbState cb_state;
312 VkDynamicDsState ds_state;
313 } shared;
314 };
315
316 struct anv_queue {
317 struct anv_device * device;
318
319 struct anv_state_pool * pool;
320
321 /**
322 * Serial number of the most recently completed batch executed on the
323 * engine.
324 */
325 struct anv_state completed_serial;
326
327 /**
328 * The next batch submitted to the engine will be assigned this serial
329 * number.
330 */
331 uint32_t next_serial;
332
333 uint32_t last_collected_serial;
334 };
335
336 struct anv_device {
337 struct anv_instance * instance;
338 uint32_t chipset_id;
339 struct brw_device_info info;
340 int context_id;
341 int fd;
342 bool no_hw;
343 bool dump_aub;
344
345 struct anv_bo_pool batch_bo_pool;
346
347 struct anv_block_pool dynamic_state_block_pool;
348 struct anv_state_pool dynamic_state_pool;
349
350 struct anv_block_pool instruction_block_pool;
351 struct anv_block_pool surface_state_block_pool;
352 struct anv_state_pool surface_state_pool;
353
354 struct anv_meta_state meta_state;
355
356 struct anv_state float_border_colors;
357 struct anv_state uint32_border_colors;
358
359 struct anv_queue queue;
360
361 struct anv_compiler * compiler;
362 struct anv_aub_writer * aub_writer;
363 pthread_mutex_t mutex;
364 };
365
366 void *
367 anv_device_alloc(struct anv_device * device,
368 size_t size,
369 size_t alignment,
370 VkSystemAllocType allocType);
371
372 void
373 anv_device_free(struct anv_device * device,
374 void * mem);
375
376 void* anv_gem_mmap(struct anv_device *device,
377 uint32_t gem_handle, uint64_t offset, uint64_t size);
378 void anv_gem_munmap(void *p, uint64_t size);
379 uint32_t anv_gem_create(struct anv_device *device, size_t size);
380 void anv_gem_close(struct anv_device *device, int gem_handle);
381 int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
382 int anv_gem_wait(struct anv_device *device, int gem_handle, int64_t *timeout_ns);
383 int anv_gem_execbuffer(struct anv_device *device,
384 struct drm_i915_gem_execbuffer2 *execbuf);
385 int anv_gem_set_tiling(struct anv_device *device, int gem_handle,
386 uint32_t stride, uint32_t tiling);
387 int anv_gem_create_context(struct anv_device *device);
388 int anv_gem_destroy_context(struct anv_device *device, int context);
389 int anv_gem_get_param(int fd, uint32_t param);
390 int anv_gem_get_aperture(struct anv_device *device, uint64_t *size);
391 int anv_gem_handle_to_fd(struct anv_device *device, int gem_handle);
392 int anv_gem_fd_to_handle(struct anv_device *device, int fd);
393 int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
394
395 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
396
397 struct anv_reloc_list {
398 size_t num_relocs;
399 size_t array_length;
400 struct drm_i915_gem_relocation_entry * relocs;
401 struct anv_bo ** reloc_bos;
402 };
403
404 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
405 struct anv_device *device);
406 void anv_reloc_list_finish(struct anv_reloc_list *list,
407 struct anv_device *device);
408
409 struct anv_batch_bo {
410 struct anv_bo bo;
411
412 /* Bytes actually consumed in this batch BO */
413 size_t length;
414
415 /* These offsets reference the per-batch reloc list */
416 size_t first_reloc;
417 size_t num_relocs;
418
419 struct anv_batch_bo * prev_batch_bo;
420 };
421
422 struct anv_batch {
423 struct anv_device * device;
424
425 void * start;
426 void * end;
427 void * next;
428
429 struct anv_reloc_list relocs;
430
431 /* This callback is called (with the associated user data) in the event
432 * that the batch runs out of space.
433 */
434 VkResult (*extend_cb)(struct anv_batch *, void *);
435 void * user_data;
436 };
437
438 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
439 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
440 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
441 void *location, struct anv_bo *bo, uint32_t offset);
442
443 struct anv_address {
444 struct anv_bo *bo;
445 uint32_t offset;
446 };
447
448 #define __gen_address_type struct anv_address
449 #define __gen_user_data struct anv_batch
450
451 static inline uint64_t
452 __gen_combine_address(struct anv_batch *batch, void *location,
453 const struct anv_address address, uint32_t delta)
454 {
455 if (address.bo == NULL) {
456 return delta;
457 } else {
458 assert(batch->start <= location && location < batch->end);
459
460 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
461 }
462 }
463
464 #include "gen7_pack.h"
465 #include "gen75_pack.h"
466 #undef GEN8_3DSTATE_MULTISAMPLE
467 #include "gen8_pack.h"
468
469 #define anv_batch_emit(batch, cmd, ...) do { \
470 struct cmd __template = { \
471 cmd ## _header, \
472 __VA_ARGS__ \
473 }; \
474 void *__dst = anv_batch_emit_dwords(batch, cmd ## _length); \
475 cmd ## _pack(batch, __dst, &__template); \
476 } while (0)
477
478 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
479 struct cmd __template = { \
480 cmd ## _header, \
481 .DwordLength = n - cmd ## _length_bias, \
482 __VA_ARGS__ \
483 }; \
484 void *__dst = anv_batch_emit_dwords(batch, n); \
485 cmd ## _pack(batch, __dst, &__template); \
486 __dst; \
487 })
488
489 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
490 do { \
491 uint32_t *dw; \
492 \
493 assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
494 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
495 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
496 dw[i] = (dwords0)[i] | (dwords1)[i]; \
497 } while (0)
498
499 #define GEN8_MOCS { \
500 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
501 .TargetCache = L3DefertoPATforLLCeLLCselection, \
502 .AgeforQUADLRU = 0 \
503 }
504
505 struct anv_device_memory {
506 struct anv_bo bo;
507 VkDeviceSize map_size;
508 void * map;
509 };
510
511 struct anv_dynamic_vp_state {
512 struct anv_object base;
513 struct anv_state sf_clip_vp;
514 struct anv_state cc_vp;
515 struct anv_state scissor;
516 };
517
518 struct anv_dynamic_rs_state {
519 uint32_t state_sf[GEN8_3DSTATE_SF_length];
520 uint32_t state_raster[GEN8_3DSTATE_RASTER_length];
521 };
522
523 struct anv_dynamic_ds_state {
524 uint32_t state_wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
525 uint32_t state_color_calc[GEN8_COLOR_CALC_STATE_length];
526 };
527
528 struct anv_dynamic_cb_state {
529 uint32_t state_color_calc[GEN8_COLOR_CALC_STATE_length];
530
531 };
532
533 struct anv_descriptor_slot {
534 int8_t dynamic_slot;
535 uint8_t index;
536 };
537
538 struct anv_descriptor_set_layout {
539 struct {
540 uint32_t surface_count;
541 struct anv_descriptor_slot *surface_start;
542 uint32_t sampler_count;
543 struct anv_descriptor_slot *sampler_start;
544 } stage[VK_NUM_SHADER_STAGE];
545
546 uint32_t count;
547 uint32_t num_dynamic_buffers;
548 uint32_t shader_stages;
549 struct anv_descriptor_slot entries[0];
550 };
551
552 struct anv_descriptor {
553 struct anv_sampler *sampler;
554 struct anv_surface_view *view;
555 };
556
557 struct anv_descriptor_set {
558 struct anv_descriptor descriptors[0];
559 };
560
561 #define MAX_VBS 32
562 #define MAX_SETS 8
563 #define MAX_RTS 8
564
565 struct anv_pipeline_layout {
566 struct {
567 struct anv_descriptor_set_layout *layout;
568 uint32_t surface_start[VK_NUM_SHADER_STAGE];
569 uint32_t sampler_start[VK_NUM_SHADER_STAGE];
570 } set[MAX_SETS];
571
572 uint32_t num_sets;
573
574 struct {
575 uint32_t surface_count;
576 uint32_t sampler_count;
577 } stage[VK_NUM_SHADER_STAGE];
578 };
579
580 struct anv_buffer {
581 struct anv_device * device;
582 VkDeviceSize size;
583
584 /* Set when bound */
585 struct anv_bo * bo;
586 VkDeviceSize offset;
587 };
588
589 #define ANV_CMD_BUFFER_PIPELINE_DIRTY (1 << 0)
590 #define ANV_CMD_BUFFER_RS_DIRTY (1 << 2)
591 #define ANV_CMD_BUFFER_DS_DIRTY (1 << 3)
592 #define ANV_CMD_BUFFER_CB_DIRTY (1 << 4)
593
594 struct anv_vertex_binding {
595 struct anv_buffer * buffer;
596 VkDeviceSize offset;
597 };
598
599 struct anv_descriptor_set_binding {
600 struct anv_descriptor_set * set;
601 uint32_t dynamic_offsets[128];
602 };
603
604 struct anv_cmd_buffer {
605 struct anv_object base;
606 struct anv_device * device;
607
608 struct drm_i915_gem_execbuffer2 execbuf;
609 struct drm_i915_gem_exec_object2 * exec2_objects;
610 struct anv_bo ** exec2_bos;
611 uint32_t exec2_array_length;
612 bool need_reloc;
613 uint32_t serial;
614
615 uint32_t bo_count;
616 struct anv_batch batch;
617 struct anv_batch_bo * last_batch_bo;
618 struct anv_batch_bo * surface_batch_bo;
619 uint32_t surface_next;
620 struct anv_reloc_list surface_relocs;
621 struct anv_state_stream surface_state_stream;
622 struct anv_state_stream dynamic_state_stream;
623
624 /* State required while building cmd buffer */
625 uint32_t vb_dirty;
626 uint32_t dirty;
627 uint32_t descriptors_dirty;
628 struct anv_pipeline * pipeline;
629 struct anv_framebuffer * framebuffer;
630 struct anv_dynamic_rs_state * rs_state;
631 struct anv_dynamic_ds_state * ds_state;
632 struct anv_dynamic_vp_state * vp_state;
633 struct anv_dynamic_cb_state * cb_state;
634 struct anv_vertex_binding vertex_bindings[MAX_VBS];
635 struct anv_descriptor_set_binding descriptors[MAX_SETS];
636 };
637
638 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
639 void anv_aub_writer_destroy(struct anv_aub_writer *writer);
640
641 struct anv_fence {
642 struct anv_object base;
643 struct anv_bo bo;
644 struct drm_i915_gem_execbuffer2 execbuf;
645 struct drm_i915_gem_exec_object2 exec2_objects[1];
646 bool ready;
647 };
648
649 struct anv_shader {
650 uint32_t size;
651 char data[0];
652 };
653
654 struct anv_pipeline {
655 struct anv_object base;
656 struct anv_device * device;
657 struct anv_batch batch;
658 uint32_t batch_data[256];
659 struct anv_shader * shaders[VK_NUM_SHADER_STAGE];
660 struct anv_pipeline_layout * layout;
661 bool use_repclear;
662
663 struct brw_vs_prog_data vs_prog_data;
664 struct brw_wm_prog_data wm_prog_data;
665 struct brw_gs_prog_data gs_prog_data;
666 struct brw_stage_prog_data * prog_data[VK_NUM_SHADER_STAGE];
667 struct {
668 uint32_t vs_start;
669 uint32_t vs_size;
670 uint32_t nr_vs_entries;
671 uint32_t gs_start;
672 uint32_t gs_size;
673 uint32_t nr_gs_entries;
674 } urb;
675
676 struct anv_bo vs_scratch_bo;
677 struct anv_bo ps_scratch_bo;
678 struct anv_bo gs_scratch_bo;
679
680 uint32_t active_stages;
681 struct anv_state_stream program_stream;
682 struct anv_state blend_state;
683 uint32_t vs_simd8;
684 uint32_t ps_simd8;
685 uint32_t ps_simd16;
686 uint32_t gs_vec4;
687 uint32_t gs_vertex_count;
688
689 uint32_t vb_used;
690 uint32_t binding_stride[MAX_VBS];
691
692 uint32_t state_sf[GEN8_3DSTATE_SF_length];
693 uint32_t state_raster[GEN8_3DSTATE_RASTER_length];
694 uint32_t state_wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
695 };
696
697 struct anv_pipeline_create_info {
698 bool use_repclear;
699 bool disable_viewport;
700 bool disable_scissor;
701 bool disable_vs;
702 bool use_rectlist;
703 };
704
705 VkResult
706 anv_pipeline_create(VkDevice device,
707 const VkGraphicsPipelineCreateInfo *pCreateInfo,
708 const struct anv_pipeline_create_info *extra,
709 VkPipeline *pPipeline);
710
711 struct anv_compiler *anv_compiler_create(struct anv_device *device);
712 void anv_compiler_destroy(struct anv_compiler *compiler);
713 int anv_compiler_run(struct anv_compiler *compiler, struct anv_pipeline *pipeline);
714 void anv_compiler_free(struct anv_pipeline *pipeline);
715
716 struct anv_format {
717 const char * name;
718 uint16_t format;
719 uint8_t cpp;
720 uint8_t channels;
721 bool has_stencil;
722 };
723
724 const struct anv_format *
725 anv_format_for_vk_format(VkFormat format);
726
727 struct anv_image {
728 VkImageType type;
729 VkExtent3D extent;
730 VkFormat format;
731 uint32_t tile_mode;
732 VkDeviceSize size;
733 uint32_t alignment;
734 uint32_t stride;
735
736 uint32_t stencil_offset;
737 uint32_t stencil_stride;
738
739 /* Set when bound */
740 struct anv_bo * bo;
741 VkDeviceSize offset;
742
743 struct anv_swap_chain * swap_chain;
744
745 /**
746 * \name Alignment of miptree images, in units of pixels.
747 *
748 * These fields contain the actual alignment values, not the values the
749 * hardware expects. For example, if h_align is 4, then program the hardware
750 * with HALIGN_4.
751 *
752 * \see RENDER_SURFACE_STATE.SurfaceHorizontalAlignment
753 * \see RENDER_SURFACE_STATE.SurfaceVerticalAlignment
754 * \{
755 */
756 uint8_t h_align;
757 uint8_t v_align;
758 /** \} */
759 };
760
761 struct anv_surface_view {
762 struct anv_object base;
763
764 struct anv_state surface_state;
765 struct anv_bo * bo;
766 uint32_t offset;
767 uint32_t range;
768 VkExtent3D extent;
769 VkFormat format;
770 };
771
772 struct anv_image_create_info {
773 uint32_t tile_mode;
774 };
775
776 VkResult anv_image_create(VkDevice _device,
777 const VkImageCreateInfo *pCreateInfo,
778 const struct anv_image_create_info *extra,
779 VkImage *pImage);
780
781 void anv_image_view_init(struct anv_surface_view *view,
782 struct anv_device *device,
783 const VkImageViewCreateInfo* pCreateInfo,
784 struct anv_cmd_buffer *cmd_buffer);
785
786 void anv_color_attachment_view_init(struct anv_surface_view *view,
787 struct anv_device *device,
788 const VkColorAttachmentViewCreateInfo* pCreateInfo,
789 struct anv_cmd_buffer *cmd_buffer);
790
791 void anv_surface_view_destroy(struct anv_device *device,
792 struct anv_object *obj, VkObjectType obj_type);
793
794 struct anv_sampler {
795 uint32_t state[4];
796 };
797
798 struct anv_depth_stencil_view {
799 struct anv_bo * bo;
800
801 uint32_t depth_offset;
802 uint32_t depth_stride;
803 uint32_t depth_format;
804
805 uint32_t stencil_offset;
806 uint32_t stencil_stride;
807 };
808
809 struct anv_framebuffer {
810 struct anv_object base;
811 uint32_t color_attachment_count;
812 const struct anv_surface_view * color_attachments[MAX_RTS];
813 const struct anv_depth_stencil_view * depth_stencil;
814
815 uint32_t sample_count;
816 uint32_t width;
817 uint32_t height;
818 uint32_t layers;
819
820 /* Viewport for clears */
821 VkDynamicVpState vp_state;
822 };
823
824 struct anv_render_pass_layer {
825 VkAttachmentLoadOp color_load_op;
826 VkClearColor clear_color;
827 };
828
829 struct anv_render_pass {
830 VkRect render_area;
831
832 uint32_t num_clear_layers;
833 uint32_t num_layers;
834 struct anv_render_pass_layer layers[0];
835 };
836
837 void anv_device_init_meta(struct anv_device *device);
838 void anv_device_finish_meta(struct anv_device *device);
839
840 void
841 anv_cmd_buffer_clear(struct anv_cmd_buffer *cmd_buffer,
842 struct anv_render_pass *pass);
843
844 void *
845 anv_lookup_entrypoint(const char *name);
846
847 #ifdef __cplusplus
848 }
849 #endif