vk/headers: Make General State offsets relocations
[mesa.git] / src / vulkan / private.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #pragma once
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdbool.h>
29 #include <pthread.h>
30 #include <assert.h>
31 #include <i915_drm.h>
32
33 #ifdef HAVE_VALGRIND
34 #include <valgrind.h>
35 #include <memcheck.h>
36 #define VG(x) x
37 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
38 #else
39 #define VG(x)
40 #endif
41
42 #include "brw_device_info.h"
43 #include "util/macros.h"
44
45 #define VK_PROTOTYPES
46 #include <vulkan/vulkan.h>
47 #include <vulkan/vulkan_intel.h>
48 #include <vulkan/vk_wsi_lunarg.h>
49
50 #include "entrypoints.h"
51
52 #include "brw_context.h"
53
54 #ifdef __cplusplus
55 extern "C" {
56 #endif
57
58 #define anv_noreturn __attribute__((__noreturn__))
59 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
60
61 static inline uint32_t
62 ALIGN_U32(uint32_t v, uint32_t a)
63 {
64 return (v + a - 1) & ~(a - 1);
65 }
66
67 static inline int32_t
68 ALIGN_I32(int32_t v, int32_t a)
69 {
70 return (v + a - 1) & ~(a - 1);
71 }
72
73 #define for_each_bit(b, dword) \
74 for (uint32_t __dword = (dword); \
75 (b) = __builtin_ffs(__dword) - 1, __dword; \
76 __dword &= ~(1 << (b)))
77
78 /* Define no kernel as 1, since that's an illegal offset for a kernel */
79 #define NO_KERNEL 1
80
81 struct anv_common {
82 VkStructureType sType;
83 const void* pNext;
84 };
85
86 /* Whenever we generate an error, pass it through this function. Useful for
87 * debugging, where we can break on it. Only call at error site, not when
88 * propagating errors. Might be useful to plug in a stack trace here.
89 */
90
91 static inline VkResult
92 vk_error(VkResult error)
93 {
94 #ifdef DEBUG
95 fprintf(stderr, "vk_error: %x\n", error);
96 #endif
97
98 return error;
99 }
100
101 void __anv_finishme(const char *file, int line, const char *format, ...)
102 anv_printflike(3, 4);
103
104 /**
105 * Print a FINISHME message, including its source location.
106 */
107 #define anv_finishme(format, ...) \
108 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
109
110 /* A non-fatal assert. Useful for debugging. */
111 #ifdef DEBUG
112 #define anv_assert(x) ({ \
113 if (unlikely(!(x))) \
114 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
115 })
116 #else
117 #define anv_assert(x)
118 #endif
119
120 void anv_abortf(const char *format, ...) anv_noreturn anv_printflike(1, 2);
121 void anv_abortfv(const char *format, va_list va) anv_noreturn;
122
123 #define stub_return(v) \
124 do { \
125 anv_finishme("stub %s", __func__); \
126 return (v); \
127 } while (0)
128
129 #define stub(v) \
130 do { \
131 anv_finishme("stub %s", __func__); \
132 return; \
133 } while (0)
134
135 /**
136 * A dynamically growable, circular buffer. Elements are added at head and
137 * removed from tail. head and tail are free-running uint32_t indices and we
138 * only compute the modulo with size when accessing the array. This way,
139 * number of bytes in the queue is always head - tail, even in case of
140 * wraparound.
141 */
142
143 struct anv_vector {
144 uint32_t head;
145 uint32_t tail;
146 uint32_t element_size;
147 uint32_t size;
148 void *data;
149 };
150
151 int anv_vector_init(struct anv_vector *queue, uint32_t element_size, uint32_t size);
152 void *anv_vector_add(struct anv_vector *queue);
153 void *anv_vector_remove(struct anv_vector *queue);
154
155 static inline int
156 anv_vector_length(struct anv_vector *queue)
157 {
158 return (queue->head - queue->tail) / queue->element_size;
159 }
160
161 static inline void
162 anv_vector_finish(struct anv_vector *queue)
163 {
164 free(queue->data);
165 }
166
167 #define anv_vector_foreach(elem, queue) \
168 static_assert(__builtin_types_compatible_p(__typeof__(queue), struct anv_vector *), ""); \
169 for (uint32_t __anv_vector_offset = (queue)->tail; \
170 elem = (queue)->data + (__anv_vector_offset & ((queue)->size - 1)), __anv_vector_offset < (queue)->head; \
171 __anv_vector_offset += (queue)->element_size)
172
173 struct anv_bo {
174 int gem_handle;
175 uint32_t index;
176 uint64_t offset;
177 uint64_t size;
178
179 /* This field is here for the benefit of the aub dumper. It can (and for
180 * userptr bos it must) be set to the cpu map of the buffer. Destroying
181 * the bo won't clean up the mmap, it's still the responsibility of the bo
182 * user to do that. */
183 void *map;
184 };
185
186 /* Represents a lock-free linked list of "free" things. This is used by
187 * both the block pool and the state pools. Unfortunately, in order to
188 * solve the ABA problem, we can't use a single uint32_t head.
189 */
190 union anv_free_list {
191 struct {
192 uint32_t offset;
193
194 /* A simple count that is incremented every time the head changes. */
195 uint32_t count;
196 };
197 uint64_t u64;
198 };
199
200 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
201
202 struct anv_block_pool {
203 struct anv_device *device;
204
205 struct anv_bo bo;
206 void *map;
207 int fd;
208 uint32_t size;
209
210 /**
211 * Array of mmaps and gem handles owned by the block pool, reclaimed when
212 * the block pool is destroyed.
213 */
214 struct anv_vector mmap_cleanups;
215
216 uint32_t block_size;
217
218 uint32_t next_block;
219 union anv_free_list free_list;
220 };
221
222 struct anv_block_state {
223 union {
224 struct {
225 uint32_t next;
226 uint32_t end;
227 };
228 uint64_t u64;
229 };
230 };
231
232 struct anv_state {
233 uint32_t offset;
234 uint32_t alloc_size;
235 void *map;
236 };
237
238 struct anv_fixed_size_state_pool {
239 size_t state_size;
240 union anv_free_list free_list;
241 struct anv_block_state block;
242 };
243
244 #define ANV_MIN_STATE_SIZE_LOG2 6
245 #define ANV_MAX_STATE_SIZE_LOG2 10
246
247 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2)
248
249 struct anv_state_pool {
250 struct anv_block_pool *block_pool;
251 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
252 };
253
254 struct anv_state_stream {
255 struct anv_block_pool *block_pool;
256 uint32_t next;
257 uint32_t current_block;
258 uint32_t end;
259 };
260
261 void anv_block_pool_init(struct anv_block_pool *pool,
262 struct anv_device *device, uint32_t block_size);
263 void anv_block_pool_finish(struct anv_block_pool *pool);
264 uint32_t anv_block_pool_alloc(struct anv_block_pool *pool);
265 void anv_block_pool_free(struct anv_block_pool *pool, uint32_t offset);
266 void anv_state_pool_init(struct anv_state_pool *pool,
267 struct anv_block_pool *block_pool);
268 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
269 size_t state_size, size_t alignment);
270 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
271 void anv_state_stream_init(struct anv_state_stream *stream,
272 struct anv_block_pool *block_pool);
273 void anv_state_stream_finish(struct anv_state_stream *stream);
274 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
275 uint32_t size, uint32_t alignment);
276
277 /**
278 * Implements a pool of re-usable BOs. The interface is identical to that
279 * of block_pool except that each block is its own BO.
280 */
281 struct anv_bo_pool {
282 struct anv_device *device;
283
284 uint32_t bo_size;
285
286 void *free_list;
287 };
288
289 void anv_bo_pool_init(struct anv_bo_pool *pool,
290 struct anv_device *device, uint32_t block_size);
291 void anv_bo_pool_finish(struct anv_bo_pool *pool);
292 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo);
293 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
294
295 struct anv_object;
296 struct anv_device;
297
298 typedef void (*anv_object_destructor_cb)(struct anv_device *,
299 struct anv_object *,
300 VkObjectType);
301
302 struct anv_object {
303 anv_object_destructor_cb destructor;
304 };
305
306 struct anv_physical_device {
307 struct anv_instance * instance;
308 uint32_t chipset_id;
309 bool no_hw;
310 const char * path;
311 const char * name;
312 const struct brw_device_info * info;
313 };
314
315 struct anv_instance {
316 void * pAllocUserData;
317 PFN_vkAllocFunction pfnAlloc;
318 PFN_vkFreeFunction pfnFree;
319 uint32_t apiVersion;
320 uint32_t physicalDeviceCount;
321 struct anv_physical_device physicalDevice;
322 };
323
324 struct anv_meta_state {
325 struct {
326 VkPipeline pipeline;
327 } clear;
328
329 struct {
330 VkPipeline pipeline;
331 VkPipelineLayout pipeline_layout;
332 VkDescriptorSetLayout ds_layout;
333 } blit;
334
335 struct {
336 VkDynamicRsState rs_state;
337 VkDynamicCbState cb_state;
338 VkDynamicDsState ds_state;
339 } shared;
340 };
341
342 struct anv_queue {
343 struct anv_device * device;
344
345 struct anv_state_pool * pool;
346
347 /**
348 * Serial number of the most recently completed batch executed on the
349 * engine.
350 */
351 struct anv_state completed_serial;
352
353 /**
354 * The next batch submitted to the engine will be assigned this serial
355 * number.
356 */
357 uint32_t next_serial;
358
359 uint32_t last_collected_serial;
360 };
361
362 struct anv_device {
363 struct anv_instance * instance;
364 uint32_t chipset_id;
365 struct brw_device_info info;
366 int context_id;
367 int fd;
368 bool no_hw;
369 bool dump_aub;
370
371 struct anv_bo_pool batch_bo_pool;
372
373 struct anv_block_pool dynamic_state_block_pool;
374 struct anv_state_pool dynamic_state_pool;
375
376 struct anv_block_pool instruction_block_pool;
377 struct anv_block_pool surface_state_block_pool;
378 struct anv_state_pool surface_state_pool;
379
380 struct anv_meta_state meta_state;
381
382 struct anv_state float_border_colors;
383 struct anv_state uint32_border_colors;
384
385 struct anv_queue queue;
386
387 struct anv_compiler * compiler;
388 struct anv_aub_writer * aub_writer;
389 pthread_mutex_t mutex;
390 };
391
392 void *
393 anv_device_alloc(struct anv_device * device,
394 size_t size,
395 size_t alignment,
396 VkSystemAllocType allocType);
397
398 void
399 anv_device_free(struct anv_device * device,
400 void * mem);
401
402 void* anv_gem_mmap(struct anv_device *device,
403 uint32_t gem_handle, uint64_t offset, uint64_t size);
404 void anv_gem_munmap(void *p, uint64_t size);
405 uint32_t anv_gem_create(struct anv_device *device, size_t size);
406 void anv_gem_close(struct anv_device *device, int gem_handle);
407 int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
408 int anv_gem_wait(struct anv_device *device, int gem_handle, int64_t *timeout_ns);
409 int anv_gem_execbuffer(struct anv_device *device,
410 struct drm_i915_gem_execbuffer2 *execbuf);
411 int anv_gem_set_tiling(struct anv_device *device, int gem_handle,
412 uint32_t stride, uint32_t tiling);
413 int anv_gem_create_context(struct anv_device *device);
414 int anv_gem_destroy_context(struct anv_device *device, int context);
415 int anv_gem_get_param(int fd, uint32_t param);
416 int anv_gem_get_aperture(struct anv_device *device, uint64_t *size);
417 int anv_gem_handle_to_fd(struct anv_device *device, int gem_handle);
418 int anv_gem_fd_to_handle(struct anv_device *device, int fd);
419 int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
420
421 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
422
423 struct anv_reloc_list {
424 size_t num_relocs;
425 size_t array_length;
426 struct drm_i915_gem_relocation_entry * relocs;
427 struct anv_bo ** reloc_bos;
428 };
429
430 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
431 struct anv_device *device);
432 void anv_reloc_list_finish(struct anv_reloc_list *list,
433 struct anv_device *device);
434
435 struct anv_batch_bo {
436 struct anv_bo bo;
437
438 /* Bytes actually consumed in this batch BO */
439 size_t length;
440
441 /* These offsets reference the per-batch reloc list */
442 size_t first_reloc;
443 size_t num_relocs;
444
445 struct anv_batch_bo * prev_batch_bo;
446 };
447
448 struct anv_batch {
449 struct anv_device * device;
450
451 void * start;
452 void * end;
453 void * next;
454
455 struct anv_reloc_list relocs;
456
457 /* This callback is called (with the associated user data) in the event
458 * that the batch runs out of space.
459 */
460 VkResult (*extend_cb)(struct anv_batch *, void *);
461 void * user_data;
462 };
463
464 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
465 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
466 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
467 void *location, struct anv_bo *bo, uint32_t offset);
468
469 struct anv_address {
470 struct anv_bo *bo;
471 uint32_t offset;
472 };
473
474 #define __gen_address_type struct anv_address
475 #define __gen_user_data struct anv_batch
476
477 static inline uint64_t
478 __gen_combine_address(struct anv_batch *batch, void *location,
479 const struct anv_address address, uint32_t delta)
480 {
481 if (address.bo == NULL) {
482 return delta;
483 } else {
484 assert(batch->start <= location && location < batch->end);
485
486 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
487 }
488 }
489
490 #include "gen7_pack.h"
491 #include "gen75_pack.h"
492 #undef GEN8_3DSTATE_MULTISAMPLE
493 #include "gen8_pack.h"
494
495 #define anv_batch_emit(batch, cmd, ...) do { \
496 struct cmd __template = { \
497 cmd ## _header, \
498 __VA_ARGS__ \
499 }; \
500 void *__dst = anv_batch_emit_dwords(batch, cmd ## _length); \
501 cmd ## _pack(batch, __dst, &__template); \
502 } while (0)
503
504 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
505 struct cmd __template = { \
506 cmd ## _header, \
507 .DwordLength = n - cmd ## _length_bias, \
508 __VA_ARGS__ \
509 }; \
510 void *__dst = anv_batch_emit_dwords(batch, n); \
511 cmd ## _pack(batch, __dst, &__template); \
512 __dst; \
513 })
514
515 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
516 do { \
517 uint32_t *dw; \
518 \
519 assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
520 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
521 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
522 dw[i] = (dwords0)[i] | (dwords1)[i]; \
523 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
524 } while (0)
525
526 #define GEN8_MOCS { \
527 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
528 .TargetCache = L3DefertoPATforLLCeLLCselection, \
529 .AgeforQUADLRU = 0 \
530 }
531
532 struct anv_device_memory {
533 struct anv_bo bo;
534 VkDeviceSize map_size;
535 void * map;
536 };
537
538 struct anv_dynamic_vp_state {
539 struct anv_object base;
540 struct anv_state sf_clip_vp;
541 struct anv_state cc_vp;
542 struct anv_state scissor;
543 };
544
545 struct anv_dynamic_rs_state {
546 uint32_t state_sf[GEN8_3DSTATE_SF_length];
547 uint32_t state_raster[GEN8_3DSTATE_RASTER_length];
548 };
549
550 struct anv_dynamic_ds_state {
551 uint32_t state_wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
552 uint32_t state_color_calc[GEN8_COLOR_CALC_STATE_length];
553 };
554
555 struct anv_dynamic_cb_state {
556 uint32_t state_color_calc[GEN8_COLOR_CALC_STATE_length];
557
558 };
559
560 struct anv_descriptor_slot {
561 int8_t dynamic_slot;
562 uint8_t index;
563 };
564
565 struct anv_descriptor_set_layout {
566 struct {
567 uint32_t surface_count;
568 struct anv_descriptor_slot *surface_start;
569 uint32_t sampler_count;
570 struct anv_descriptor_slot *sampler_start;
571 } stage[VK_NUM_SHADER_STAGE];
572
573 uint32_t count;
574 uint32_t num_dynamic_buffers;
575 uint32_t shader_stages;
576 struct anv_descriptor_slot entries[0];
577 };
578
579 struct anv_descriptor {
580 struct anv_sampler *sampler;
581 struct anv_surface_view *view;
582 };
583
584 struct anv_descriptor_set {
585 struct anv_descriptor descriptors[0];
586 };
587
588 #define MAX_VBS 32
589 #define MAX_SETS 8
590 #define MAX_RTS 8
591
592 struct anv_pipeline_layout {
593 struct {
594 struct anv_descriptor_set_layout *layout;
595 uint32_t surface_start[VK_NUM_SHADER_STAGE];
596 uint32_t sampler_start[VK_NUM_SHADER_STAGE];
597 } set[MAX_SETS];
598
599 uint32_t num_sets;
600
601 struct {
602 uint32_t surface_count;
603 uint32_t sampler_count;
604 } stage[VK_NUM_SHADER_STAGE];
605 };
606
607 struct anv_buffer {
608 struct anv_device * device;
609 VkDeviceSize size;
610
611 /* Set when bound */
612 struct anv_bo * bo;
613 VkDeviceSize offset;
614 };
615
616 #define ANV_CMD_BUFFER_PIPELINE_DIRTY (1 << 0)
617 #define ANV_CMD_BUFFER_RS_DIRTY (1 << 2)
618 #define ANV_CMD_BUFFER_DS_DIRTY (1 << 3)
619 #define ANV_CMD_BUFFER_CB_DIRTY (1 << 4)
620 #define ANV_CMD_BUFFER_VP_DIRTY (1 << 5)
621
622 struct anv_vertex_binding {
623 struct anv_buffer * buffer;
624 VkDeviceSize offset;
625 };
626
627 struct anv_descriptor_set_binding {
628 struct anv_descriptor_set * set;
629 uint32_t dynamic_offsets[128];
630 };
631
632 struct anv_cmd_buffer {
633 struct anv_object base;
634 struct anv_device * device;
635
636 struct drm_i915_gem_execbuffer2 execbuf;
637 struct drm_i915_gem_exec_object2 * exec2_objects;
638 struct anv_bo ** exec2_bos;
639 uint32_t exec2_array_length;
640 bool need_reloc;
641 uint32_t serial;
642
643 uint32_t bo_count;
644 struct anv_batch batch;
645 struct anv_batch_bo * last_batch_bo;
646 struct anv_batch_bo * surface_batch_bo;
647 uint32_t surface_next;
648 struct anv_reloc_list surface_relocs;
649 struct anv_state_stream surface_state_stream;
650 struct anv_state_stream dynamic_state_stream;
651
652 /* State required while building cmd buffer */
653 uint32_t current_pipeline;
654 uint32_t vb_dirty;
655 uint32_t dirty;
656 uint32_t compute_dirty;
657 uint32_t descriptors_dirty;
658 struct anv_pipeline * pipeline;
659 struct anv_pipeline * compute_pipeline;
660 struct anv_framebuffer * framebuffer;
661 struct anv_dynamic_rs_state * rs_state;
662 struct anv_dynamic_ds_state * ds_state;
663 struct anv_dynamic_vp_state * vp_state;
664 struct anv_dynamic_cb_state * cb_state;
665 struct anv_vertex_binding vertex_bindings[MAX_VBS];
666 struct anv_descriptor_set_binding descriptors[MAX_SETS];
667 };
668
669 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
670 void anv_aub_writer_destroy(struct anv_aub_writer *writer);
671
672 struct anv_fence {
673 struct anv_object base;
674 struct anv_bo bo;
675 struct drm_i915_gem_execbuffer2 execbuf;
676 struct drm_i915_gem_exec_object2 exec2_objects[1];
677 bool ready;
678 };
679
680 struct anv_shader {
681 uint32_t size;
682 char data[0];
683 };
684
685 struct anv_pipeline {
686 struct anv_object base;
687 struct anv_device * device;
688 struct anv_batch batch;
689 uint32_t batch_data[256];
690 struct anv_shader * shaders[VK_NUM_SHADER_STAGE];
691 struct anv_pipeline_layout * layout;
692 bool use_repclear;
693
694 struct brw_vs_prog_data vs_prog_data;
695 struct brw_wm_prog_data wm_prog_data;
696 struct brw_gs_prog_data gs_prog_data;
697 struct brw_cs_prog_data cs_prog_data;
698 struct brw_stage_prog_data * prog_data[VK_NUM_SHADER_STAGE];
699 struct {
700 uint32_t vs_start;
701 uint32_t vs_size;
702 uint32_t nr_vs_entries;
703 uint32_t gs_start;
704 uint32_t gs_size;
705 uint32_t nr_gs_entries;
706 } urb;
707
708 struct anv_bo vs_scratch_bo;
709 struct anv_bo ps_scratch_bo;
710 struct anv_bo gs_scratch_bo;
711 struct anv_bo cs_scratch_bo;
712
713 uint32_t active_stages;
714 struct anv_state_stream program_stream;
715 struct anv_state blend_state;
716 uint32_t vs_simd8;
717 uint32_t ps_simd8;
718 uint32_t ps_simd16;
719 uint32_t gs_vec4;
720 uint32_t gs_vertex_count;
721 uint32_t cs_simd;
722
723 uint32_t vb_used;
724 uint32_t binding_stride[MAX_VBS];
725
726 uint32_t state_sf[GEN8_3DSTATE_SF_length];
727 uint32_t state_raster[GEN8_3DSTATE_RASTER_length];
728 uint32_t state_wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
729
730 uint32_t cs_thread_width_max;
731 uint32_t cs_right_mask;
732 };
733
734 struct anv_pipeline_create_info {
735 bool use_repclear;
736 bool disable_viewport;
737 bool disable_scissor;
738 bool disable_vs;
739 bool use_rectlist;
740 };
741
742 VkResult
743 anv_pipeline_create(VkDevice device,
744 const VkGraphicsPipelineCreateInfo *pCreateInfo,
745 const struct anv_pipeline_create_info *extra,
746 VkPipeline *pPipeline);
747
748 struct anv_compiler *anv_compiler_create(struct anv_device *device);
749 void anv_compiler_destroy(struct anv_compiler *compiler);
750 int anv_compiler_run(struct anv_compiler *compiler, struct anv_pipeline *pipeline);
751 void anv_compiler_free(struct anv_pipeline *pipeline);
752
753 struct anv_format {
754 const char * name;
755 uint16_t format;
756 uint8_t cpp;
757 uint8_t channels;
758 bool has_stencil;
759 };
760
761 const struct anv_format *
762 anv_format_for_vk_format(VkFormat format);
763
764 struct anv_image {
765 VkImageType type;
766 VkExtent3D extent;
767 VkFormat format;
768 uint32_t tile_mode;
769 VkDeviceSize size;
770 uint32_t alignment;
771 uint32_t stride;
772
773 uint32_t stencil_offset;
774 uint32_t stencil_stride;
775
776 /* Set when bound */
777 struct anv_bo * bo;
778 VkDeviceSize offset;
779
780 struct anv_swap_chain * swap_chain;
781
782 /**
783 * \name Alignment of miptree images, in units of pixels.
784 *
785 * These fields contain the actual alignment values, not the values the
786 * hardware expects. For example, if h_align is 4, then program the hardware
787 * with HALIGN_4.
788 *
789 * \see RENDER_SURFACE_STATE.SurfaceHorizontalAlignment
790 * \see RENDER_SURFACE_STATE.SurfaceVerticalAlignment
791 * \{
792 */
793 uint8_t h_align;
794 uint8_t v_align;
795 /** \} */
796 };
797
798 struct anv_surface_view {
799 struct anv_object base;
800
801 struct anv_state surface_state;
802 struct anv_bo * bo;
803 uint32_t offset;
804 uint32_t range;
805 VkExtent3D extent;
806 VkFormat format;
807 };
808
809 struct anv_image_create_info {
810 uint32_t tile_mode;
811 };
812
813 VkResult anv_image_create(VkDevice _device,
814 const VkImageCreateInfo *pCreateInfo,
815 const struct anv_image_create_info *extra,
816 VkImage *pImage);
817
818 void anv_image_view_init(struct anv_surface_view *view,
819 struct anv_device *device,
820 const VkImageViewCreateInfo* pCreateInfo,
821 struct anv_cmd_buffer *cmd_buffer);
822
823 void anv_color_attachment_view_init(struct anv_surface_view *view,
824 struct anv_device *device,
825 const VkColorAttachmentViewCreateInfo* pCreateInfo,
826 struct anv_cmd_buffer *cmd_buffer);
827
828 void anv_surface_view_destroy(struct anv_device *device,
829 struct anv_object *obj, VkObjectType obj_type);
830
831 struct anv_sampler {
832 uint32_t state[4];
833 };
834
835 struct anv_depth_stencil_view {
836 struct anv_bo * bo;
837
838 uint32_t depth_offset;
839 uint32_t depth_stride;
840 uint32_t depth_format;
841
842 uint32_t stencil_offset;
843 uint32_t stencil_stride;
844 };
845
846 struct anv_framebuffer {
847 struct anv_object base;
848 uint32_t color_attachment_count;
849 const struct anv_surface_view * color_attachments[MAX_RTS];
850 const struct anv_depth_stencil_view * depth_stencil;
851
852 uint32_t sample_count;
853 uint32_t width;
854 uint32_t height;
855 uint32_t layers;
856
857 /* Viewport for clears */
858 VkDynamicVpState vp_state;
859 };
860
861 struct anv_render_pass_layer {
862 VkAttachmentLoadOp color_load_op;
863 VkClearColor clear_color;
864 };
865
866 struct anv_render_pass {
867 VkRect render_area;
868
869 uint32_t num_clear_layers;
870 uint32_t num_layers;
871 struct anv_render_pass_layer layers[0];
872 };
873
874 void anv_device_init_meta(struct anv_device *device);
875 void anv_device_finish_meta(struct anv_device *device);
876
877 void
878 anv_cmd_buffer_clear(struct anv_cmd_buffer *cmd_buffer,
879 struct anv_render_pass *pass);
880
881 void *
882 anv_lookup_entrypoint(const char *name);
883
884 #ifdef __cplusplus
885 }
886 #endif