7dbf288022ebdee34fe13b6c2aa5896b2e5c9875
[mesa.git] / src / vulkan / private.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #pragma once
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdbool.h>
29 #include <pthread.h>
30 #include <assert.h>
31 #include <i915_drm.h>
32
33 #include "brw_device_info.h"
34 #include "util/macros.h"
35
36 #define VK_PROTOTYPES
37 #include <vulkan/vulkan.h>
38 #include <vulkan/vulkan_intel.h>
39 #include <vulkan/vk_wsi_lunarg.h>
40
41 #include "entrypoints.h"
42
43 #include "brw_context.h"
44
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48
49 static inline uint32_t
50 ALIGN_U32(uint32_t v, uint32_t a)
51 {
52 return (v + a - 1) & ~(a - 1);
53 }
54
55 static inline int32_t
56 ALIGN_I32(int32_t v, int32_t a)
57 {
58 return (v + a - 1) & ~(a - 1);
59 }
60
61 #define for_each_bit(b, dword) \
62 for (uint32_t __dword = (dword); \
63 (b) = __builtin_ffs(__dword) - 1, __dword; \
64 __dword &= ~(1 << (b)))
65
66 /* Define no kernel as 1, since that's an illegal offset for a kernel */
67 #define NO_KERNEL 1
68
69 struct anv_common {
70 VkStructureType sType;
71 const void* pNext;
72 };
73
74 /* Whenever we generate an error, pass it through this function. Useful for
75 * debugging, where we can break on it. Only call at error site, not when
76 * propagating errors. Might be useful to plug in a stack trace here.
77 */
78
79 static inline VkResult
80 vk_error(VkResult error)
81 {
82 #ifdef DEBUG
83 fprintf(stderr, "vk_error: %x\n", error);
84 #endif
85
86 return error;
87 }
88
89 void __anv_finishme(const char *file, int line, const char *format, ...);
90
91 /**
92 * Print a FINISHME message, including its source location.
93 */
94 #define anv_finishme(format, ...) \
95 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
96
97 #define stub_return(v) \
98 do { \
99 anv_finishme("stub %s", __func__); \
100 return (v); \
101 } while (0)
102
103 #define stub(v) \
104 do { \
105 anv_finishme("stub %s", __func__); \
106 return; \
107 } while (0)
108
109 /**
110 * A dynamically growable, circular buffer. Elements are added at head and
111 * removed from tail. head and tail are free-running uint32_t indices and we
112 * only compute the modulo with size when accessing the array. This way,
113 * number of bytes in the queue is always head - tail, even in case of
114 * wraparound.
115 */
116
117 struct anv_vector {
118 uint32_t head;
119 uint32_t tail;
120 uint32_t element_size;
121 uint32_t size;
122 void *data;
123 };
124
125 int anv_vector_init(struct anv_vector *queue, uint32_t element_size, uint32_t size);
126 void *anv_vector_add(struct anv_vector *queue);
127 void *anv_vector_remove(struct anv_vector *queue);
128
129 static inline int
130 anv_vector_length(struct anv_vector *queue)
131 {
132 return (queue->head - queue->tail) / queue->element_size;
133 }
134
135 static inline void
136 anv_vector_finish(struct anv_vector *queue)
137 {
138 free(queue->data);
139 }
140
141 #define anv_vector_foreach(elem, queue) \
142 static_assert(__builtin_types_compatible_p(__typeof__(queue), struct anv_vector *), ""); \
143 for (uint32_t __anv_vector_offset = (queue)->tail; \
144 elem = (queue)->data + (__anv_vector_offset & ((queue)->size - 1)), __anv_vector_offset < (queue)->head; \
145 __anv_vector_offset += (queue)->element_size)
146
147 struct anv_bo {
148 int gem_handle;
149 uint32_t index;
150 uint64_t offset;
151 uint64_t size;
152
153 /* This field is here for the benefit of the aub dumper. It can (and for
154 * userptr bos it must) be set to the cpu map of the buffer. Destroying
155 * the bo won't clean up the mmap, it's still the responsibility of the bo
156 * user to do that. */
157 void *map;
158 };
159
160 /* Represents a lock-free linked list of "free" things. This is used by
161 * both the block pool and the state pools. Unfortunately, in order to
162 * solve the ABA problem, we can't use a single uint32_t head.
163 */
164 union anv_free_list {
165 struct {
166 uint32_t offset;
167
168 /* A simple count that is incremented every time the head changes. */
169 uint32_t count;
170 };
171 uint64_t u64;
172 };
173
174 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
175
176 struct anv_block_pool {
177 struct anv_device *device;
178
179 struct anv_bo bo;
180 void *map;
181 int fd;
182 uint32_t size;
183
184 /**
185 * Array of mmaps and gem handles owned by the block pool, reclaimed when
186 * the block pool is destroyed.
187 */
188 struct anv_vector mmap_cleanups;
189
190 uint32_t block_size;
191
192 uint32_t next_block;
193 union anv_free_list free_list;
194 };
195
196 struct anv_block_state {
197 union {
198 struct {
199 uint32_t next;
200 uint32_t end;
201 };
202 uint64_t u64;
203 };
204 };
205
206 struct anv_state {
207 uint32_t offset;
208 uint32_t alloc_size;
209 void *map;
210 };
211
212 struct anv_fixed_size_state_pool {
213 size_t state_size;
214 union anv_free_list free_list;
215 struct anv_block_state block;
216 };
217
218 #define ANV_MIN_STATE_SIZE_LOG2 6
219 #define ANV_MAX_STATE_SIZE_LOG2 10
220
221 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2)
222
223 struct anv_state_pool {
224 struct anv_block_pool *block_pool;
225 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
226 };
227
228 struct anv_state_stream {
229 struct anv_block_pool *block_pool;
230 uint32_t next;
231 uint32_t current_block;
232 uint32_t end;
233 };
234
235 void anv_block_pool_init(struct anv_block_pool *pool,
236 struct anv_device *device, uint32_t block_size);
237 void anv_block_pool_init_slave(struct anv_block_pool *pool,
238 struct anv_block_pool *master_pool,
239 uint32_t num_blocks);
240 void anv_block_pool_finish(struct anv_block_pool *pool);
241 uint32_t anv_block_pool_alloc(struct anv_block_pool *pool);
242 void anv_block_pool_free(struct anv_block_pool *pool, uint32_t offset);
243 void anv_state_pool_init(struct anv_state_pool *pool,
244 struct anv_block_pool *block_pool);
245 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
246 size_t state_size, size_t alignment);
247 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
248 void anv_state_stream_init(struct anv_state_stream *stream,
249 struct anv_block_pool *block_pool);
250 void anv_state_stream_finish(struct anv_state_stream *stream);
251 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
252 uint32_t size, uint32_t alignment);
253
254 struct anv_object;
255 struct anv_device;
256
257 typedef void (*anv_object_destructor_cb)(struct anv_device *,
258 struct anv_object *,
259 VkObjectType);
260
261 struct anv_object {
262 anv_object_destructor_cb destructor;
263 };
264
265 struct anv_physical_device {
266 struct anv_instance * instance;
267 uint32_t chipset_id;
268 bool no_hw;
269 const char * path;
270 const char * name;
271 const struct brw_device_info * info;
272 };
273
274 struct anv_instance {
275 void * pAllocUserData;
276 PFN_vkAllocFunction pfnAlloc;
277 PFN_vkFreeFunction pfnFree;
278 uint32_t apiVersion;
279 uint32_t physicalDeviceCount;
280 struct anv_physical_device physicalDevice;
281 };
282
283 struct anv_clear_state {
284 VkPipeline pipeline;
285 VkDynamicRsState rs_state;
286 VkDynamicCbState cb_state;
287 };
288
289 struct anv_blit_state {
290 VkPipeline pipeline;
291 VkDynamicRsState rs_state;
292 VkDescriptorSetLayout ds_layout;
293 VkDynamicCbState cb_state;
294 };
295
296 struct anv_device {
297 struct anv_instance * instance;
298 uint32_t chipset_id;
299 struct brw_device_info info;
300 int context_id;
301 int fd;
302 bool no_hw;
303 bool dump_aub;
304
305 struct anv_block_pool dynamic_state_block_pool;
306 struct anv_state_pool dynamic_state_pool;
307
308 struct anv_block_pool instruction_block_pool;
309 struct anv_block_pool surface_state_block_pool;
310 struct anv_block_pool binding_table_block_pool;
311 struct anv_state_pool surface_state_pool;
312
313 struct anv_clear_state clear_state;
314 struct anv_blit_state blit_state;
315
316 struct anv_compiler * compiler;
317 struct anv_aub_writer * aub_writer;
318 pthread_mutex_t mutex;
319 };
320
321 struct anv_queue {
322 struct anv_device * device;
323
324 struct anv_state_pool * pool;
325
326 /**
327 * Serial number of the most recently completed batch executed on the
328 * engine.
329 */
330 struct anv_state completed_serial;
331
332 /**
333 * The next batch submitted to the engine will be assigned this serial
334 * number.
335 */
336 uint32_t next_serial;
337
338 uint32_t last_collected_serial;
339 };
340
341 void *
342 anv_device_alloc(struct anv_device * device,
343 size_t size,
344 size_t alignment,
345 VkSystemAllocType allocType);
346
347 void
348 anv_device_free(struct anv_device * device,
349 void * mem);
350
351 void* anv_gem_mmap(struct anv_device *device,
352 uint32_t gem_handle, uint64_t offset, uint64_t size);
353 void anv_gem_munmap(void *p, uint64_t size);
354 uint32_t anv_gem_create(struct anv_device *device, size_t size);
355 void anv_gem_close(struct anv_device *device, int gem_handle);
356 int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
357 int anv_gem_wait(struct anv_device *device, int gem_handle, int64_t *timeout_ns);
358 int anv_gem_execbuffer(struct anv_device *device,
359 struct drm_i915_gem_execbuffer2 *execbuf);
360 int anv_gem_set_tiling(struct anv_device *device, int gem_handle,
361 uint32_t stride, uint32_t tiling);
362 int anv_gem_create_context(struct anv_device *device);
363 int anv_gem_destroy_context(struct anv_device *device, int context);
364 int anv_gem_get_param(int fd, uint32_t param);
365 int anv_gem_get_aperture(struct anv_device *device, uint64_t *size);
366 int anv_gem_handle_to_fd(struct anv_device *device, int gem_handle);
367 int anv_gem_fd_to_handle(struct anv_device *device, int fd);
368 int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
369
370 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
371
372 /* TODO: Remove hardcoded reloc limit. */
373 #define ANV_BATCH_MAX_RELOCS 256
374
375 struct anv_reloc_list {
376 size_t num_relocs;
377 struct drm_i915_gem_relocation_entry relocs[ANV_BATCH_MAX_RELOCS];
378 struct anv_bo * reloc_bos[ANV_BATCH_MAX_RELOCS];
379 };
380
381 struct anv_batch {
382 struct anv_bo bo;
383 void * next;
384 struct anv_reloc_list cmd_relocs;
385 };
386
387 VkResult anv_batch_init(struct anv_batch *batch, struct anv_device *device);
388 void anv_batch_finish(struct anv_batch *batch, struct anv_device *device);
389 void anv_batch_reset(struct anv_batch *batch);
390 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
391 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
392 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
393 void *location, struct anv_bo *bo, uint32_t offset);
394
395 struct anv_address {
396 struct anv_bo *bo;
397 uint32_t offset;
398 };
399
400 #define __gen_address_type struct anv_address
401 #define __gen_user_data struct anv_batch
402
403 static inline uint64_t
404 __gen_combine_address(struct anv_batch *batch, void *location,
405 const struct anv_address address, uint32_t delta)
406 {
407 if (address.bo == NULL) {
408 return delta;
409 } else {
410 assert(batch->bo.map <= location &&
411 (char *) location < (char *) batch->bo.map + batch->bo.size);
412
413 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
414 }
415 }
416
417 #include "gen7_pack.h"
418 #include "gen75_pack.h"
419 #undef GEN8_3DSTATE_MULTISAMPLE
420 #include "gen8_pack.h"
421
422 #define anv_batch_emit(batch, cmd, ...) do { \
423 struct cmd __template = { \
424 cmd ## _header, \
425 __VA_ARGS__ \
426 }; \
427 void *__dst = anv_batch_emit_dwords(batch, cmd ## _length); \
428 cmd ## _pack(batch, __dst, &__template); \
429 } while (0)
430
431 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
432 struct cmd __template = { \
433 cmd ## _header, \
434 .DwordLength = n - cmd ## _length_bias, \
435 __VA_ARGS__ \
436 }; \
437 void *__dst = anv_batch_emit_dwords(batch, n); \
438 cmd ## _pack(batch, __dst, &__template); \
439 __dst; \
440 })
441
442 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
443 do { \
444 uint32_t *dw; \
445 \
446 assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
447 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
448 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
449 dw[i] = (dwords0)[i] | (dwords1)[i]; \
450 } while (0)
451
452 #define GEN8_MOCS { \
453 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
454 .TargetCache = L3DefertoPATforLLCeLLCselection, \
455 .AgeforQUADLRU = 0 \
456 }
457
458 struct anv_device_memory {
459 struct anv_bo bo;
460 VkDeviceSize map_size;
461 void * map;
462 };
463
464 struct anv_dynamic_vp_state {
465 struct anv_object base;
466 struct anv_state sf_clip_vp;
467 struct anv_state cc_vp;
468 struct anv_state scissor;
469 };
470
471 struct anv_dynamic_rs_state {
472 uint32_t state_sf[GEN8_3DSTATE_SF_length];
473 uint32_t state_raster[GEN8_3DSTATE_RASTER_length];
474 };
475
476 struct anv_dynamic_ds_state {
477 uint32_t state_wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
478 uint32_t state_color_calc[GEN8_COLOR_CALC_STATE_length];
479 };
480
481 struct anv_dynamic_cb_state {
482 uint32_t state_color_calc[GEN8_COLOR_CALC_STATE_length];
483
484 };
485
486 struct anv_query_pool_slot {
487 uint64_t begin;
488 uint64_t end;
489 uint64_t available;
490 };
491
492 struct anv_query_pool {
493 struct anv_object base;
494 VkQueryType type;
495 uint32_t slots;
496 struct anv_bo bo;
497 };
498
499 struct anv_descriptor_set_layout {
500 struct {
501 uint32_t surface_count;
502 uint32_t *surface_start;
503 uint32_t sampler_count;
504 uint32_t *sampler_start;
505 } stage[VK_NUM_SHADER_STAGE];
506
507 uint32_t count;
508 uint32_t num_dynamic_buffers;
509 uint32_t entries[0];
510 };
511
512 struct anv_descriptor {
513 struct anv_sampler *sampler;
514 struct anv_surface_view *view;
515 };
516
517 struct anv_descriptor_set {
518 struct anv_descriptor descriptors[0];
519 };
520
521 #define MAX_VBS 32
522 #define MAX_SETS 8
523 #define MAX_RTS 8
524
525 struct anv_pipeline_layout {
526 struct {
527 struct anv_descriptor_set_layout *layout;
528 uint32_t surface_start[VK_NUM_SHADER_STAGE];
529 uint32_t sampler_start[VK_NUM_SHADER_STAGE];
530 } set[MAX_SETS];
531
532 uint32_t num_sets;
533
534 struct {
535 uint32_t surface_count;
536 uint32_t sampler_count;
537 } stage[VK_NUM_SHADER_STAGE];
538 };
539
540 struct anv_buffer {
541 struct anv_device * device;
542 VkDeviceSize size;
543
544 /* Set when bound */
545 struct anv_bo * bo;
546 VkDeviceSize offset;
547 };
548
549 #define ANV_CMD_BUFFER_PIPELINE_DIRTY (1 << 0)
550 #define ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY (1 << 1)
551 #define ANV_CMD_BUFFER_RS_DIRTY (1 << 2)
552 #define ANV_CMD_BUFFER_DS_DIRTY (1 << 3)
553 #define ANV_CMD_BUFFER_CB_DIRTY (1 << 4)
554
555 struct anv_bindings {
556 struct {
557 struct anv_buffer *buffer;
558 VkDeviceSize offset;
559 } vb[MAX_VBS];
560
561 struct {
562 uint32_t surfaces[256];
563 struct { uint32_t dwords[4]; } samplers[16];
564 } descriptors[VK_NUM_SHADER_STAGE];
565 };
566
567 struct anv_cmd_buffer {
568 struct anv_object base;
569 struct anv_device * device;
570
571 struct drm_i915_gem_execbuffer2 execbuf;
572 struct drm_i915_gem_exec_object2 * exec2_objects;
573 struct anv_bo ** exec2_bos;
574 bool need_reloc;
575 uint32_t serial;
576
577 uint32_t bo_count;
578 struct anv_batch batch;
579 struct anv_bo surface_bo;
580 uint32_t surface_next;
581 struct anv_reloc_list surface_relocs;
582 struct anv_state_stream binding_table_state_stream;
583 struct anv_state_stream surface_state_stream;
584 struct anv_state_stream dynamic_state_stream;
585
586 /* State required while building cmd buffer */
587 uint32_t vb_dirty;
588 uint32_t dirty;
589 struct anv_pipeline * pipeline;
590 struct anv_framebuffer * framebuffer;
591 struct anv_dynamic_rs_state * rs_state;
592 struct anv_dynamic_ds_state * ds_state;
593 struct anv_dynamic_vp_state * vp_state;
594 struct anv_dynamic_cb_state * cb_state;
595 struct anv_bindings * bindings;
596 struct anv_bindings default_bindings;
597 };
598
599 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
600 void anv_aub_writer_destroy(struct anv_aub_writer *writer);
601
602 struct anv_fence {
603 struct anv_object base;
604 struct anv_bo bo;
605 struct drm_i915_gem_execbuffer2 execbuf;
606 struct drm_i915_gem_exec_object2 exec2_objects[1];
607 bool ready;
608 };
609
610 struct anv_shader {
611 uint32_t size;
612 char data[0];
613 };
614
615 struct anv_pipeline {
616 struct anv_object base;
617 struct anv_device * device;
618 struct anv_batch batch;
619 struct anv_shader * shaders[VK_NUM_SHADER_STAGE];
620 struct anv_pipeline_layout * layout;
621 bool use_repclear;
622
623 struct brw_vs_prog_data vs_prog_data;
624 struct brw_wm_prog_data wm_prog_data;
625 struct brw_gs_prog_data gs_prog_data;
626 struct brw_stage_prog_data * prog_data[VK_NUM_SHADER_STAGE];
627 struct {
628 uint32_t vs_start;
629 uint32_t vs_size;
630 uint32_t nr_vs_entries;
631 uint32_t gs_start;
632 uint32_t gs_size;
633 uint32_t nr_gs_entries;
634 } urb;
635
636 struct anv_bo vs_scratch_bo;
637 struct anv_bo ps_scratch_bo;
638 struct anv_bo gs_scratch_bo;
639
640 uint32_t active_stages;
641 struct anv_state_stream program_stream;
642 struct anv_state blend_state;
643 uint32_t vs_simd8;
644 uint32_t ps_simd8;
645 uint32_t ps_simd16;
646 uint32_t gs_vec4;
647 uint32_t gs_vertex_count;
648
649 uint32_t vb_used;
650 uint32_t binding_stride[MAX_VBS];
651
652 uint32_t state_sf[GEN8_3DSTATE_SF_length];
653 uint32_t state_raster[GEN8_3DSTATE_RASTER_length];
654 uint32_t state_wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
655 };
656
657 struct anv_pipeline_create_info {
658 bool use_repclear;
659 bool disable_viewport;
660 bool disable_scissor;
661 bool disable_vs;
662 bool use_rectlist;
663 };
664
665 VkResult
666 anv_pipeline_create(VkDevice device,
667 const VkGraphicsPipelineCreateInfo *pCreateInfo,
668 const struct anv_pipeline_create_info *extra,
669 VkPipeline *pPipeline);
670
671 struct anv_compiler *anv_compiler_create(int fd);
672 void anv_compiler_destroy(struct anv_compiler *compiler);
673 int anv_compiler_run(struct anv_compiler *compiler, struct anv_pipeline *pipeline);
674 void anv_compiler_free(struct anv_pipeline *pipeline);
675
676 struct anv_format {
677 uint16_t format;
678 uint8_t cpp;
679 uint8_t channels;
680 bool has_stencil;
681 };
682
683 const struct anv_format *
684 anv_format_for_vk_format(VkFormat format);
685
686 struct anv_image {
687 VkImageType type;
688 VkExtent3D extent;
689 VkFormat format;
690 uint32_t tile_mode;
691 VkDeviceSize size;
692 uint32_t alignment;
693 uint32_t stride;
694
695 uint32_t stencil_offset;
696 uint32_t stencil_stride;
697
698 /* Set when bound */
699 struct anv_bo * bo;
700 VkDeviceSize offset;
701
702 struct anv_swap_chain * swap_chain;
703 };
704
705 struct anv_surface_view {
706 struct anv_state surface_state;
707 struct anv_bo * bo;
708 uint32_t offset;
709 VkExtent3D extent;
710 VkFormat format;
711 };
712
713 struct anv_image_create_info {
714 uint32_t tile_mode;
715 };
716
717 VkResult anv_image_create(VkDevice _device,
718 const VkImageCreateInfo *pCreateInfo,
719 const struct anv_image_create_info *extra,
720 VkImage *pImage);
721
722 void anv_image_view_init(struct anv_surface_view *view,
723 struct anv_device *device,
724 const VkImageViewCreateInfo* pCreateInfo,
725 struct anv_cmd_buffer *cmd_buffer);
726
727 void anv_color_attachment_view_init(struct anv_surface_view *view,
728 struct anv_device *device,
729 const VkColorAttachmentViewCreateInfo* pCreateInfo,
730 struct anv_cmd_buffer *cmd_buffer);
731
732 struct anv_sampler {
733 uint32_t state[4];
734 };
735
736 struct anv_depth_stencil_view {
737 struct anv_bo * bo;
738
739 uint32_t depth_offset;
740 uint32_t depth_stride;
741 uint32_t depth_format;
742
743 uint32_t stencil_offset;
744 uint32_t stencil_stride;
745 };
746
747 struct anv_framebuffer {
748 struct anv_object base;
749 uint32_t color_attachment_count;
750 const struct anv_surface_view * color_attachments[MAX_RTS];
751 const struct anv_depth_stencil_view * depth_stencil;
752
753 uint32_t sample_count;
754 uint32_t width;
755 uint32_t height;
756 uint32_t layers;
757
758 /* Viewport for clears */
759 VkDynamicVpState vp_state;
760 };
761
762 struct anv_render_pass_layer {
763 VkAttachmentLoadOp color_load_op;
764 VkClearColor clear_color;
765 };
766
767 struct anv_render_pass {
768 VkRect render_area;
769
770 uint32_t num_clear_layers;
771 uint32_t num_layers;
772 struct anv_render_pass_layer layers[0];
773 };
774
775 void anv_device_init_meta(struct anv_device *device);
776
777 void
778 anv_cmd_buffer_clear(struct anv_cmd_buffer *cmd_buffer,
779 struct anv_render_pass *pass);
780
781 void
782 anv_cmd_buffer_fill_render_targets(struct anv_cmd_buffer *cmd_buffer);
783
784 void *
785 anv_lookup_entrypoint(const char *name);
786
787 #ifdef __cplusplus
788 }
789 #endif