2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
34 VkResult
anv_CreateShaderModule(
36 const VkShaderModuleCreateInfo
* pCreateInfo
,
37 VkShaderModule
* pShaderModule
)
39 ANV_FROM_HANDLE(anv_device
, device
, _device
);
40 struct anv_shader_module
*module
;
42 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
43 assert(pCreateInfo
->flags
== 0);
45 module
= anv_device_alloc(device
, sizeof(*module
) + pCreateInfo
->codeSize
, 8,
46 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
48 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
51 module
->size
= pCreateInfo
->codeSize
;
52 memcpy(module
->data
, pCreateInfo
->pCode
, module
->size
);
54 *pShaderModule
= anv_shader_module_to_handle(module
);
59 void anv_DestroyShaderModule(
61 VkShaderModule _module
)
63 ANV_FROM_HANDLE(anv_device
, device
, _device
);
64 ANV_FROM_HANDLE(anv_shader_module
, module
, _module
);
66 anv_device_free(device
, module
);
69 VkResult
anv_CreateShader(
71 const VkShaderCreateInfo
* pCreateInfo
,
74 ANV_FROM_HANDLE(anv_device
, device
, _device
);
75 ANV_FROM_HANDLE(anv_shader_module
, module
, pCreateInfo
->module
);
76 struct anv_shader
*shader
;
78 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_CREATE_INFO
);
79 assert(pCreateInfo
->flags
== 0);
81 const char *name
= pCreateInfo
->pName
? pCreateInfo
->pName
: "main";
82 size_t name_len
= strlen(name
);
84 if (strcmp(name
, "main") != 0) {
85 anv_finishme("Multiple shaders per module not really supported");
88 shader
= anv_device_alloc(device
, sizeof(*shader
) + name_len
+ 1, 8,
89 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
91 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
93 shader
->module
= module
;
94 memcpy(shader
->entrypoint
, name
, name_len
+ 1);
96 *pShader
= anv_shader_to_handle(shader
);
101 void anv_DestroyShader(
105 ANV_FROM_HANDLE(anv_device
, device
, _device
);
106 ANV_FROM_HANDLE(anv_shader
, shader
, _shader
);
108 anv_device_free(device
, shader
);
112 VkResult
anv_CreatePipelineCache(
114 const VkPipelineCacheCreateInfo
* pCreateInfo
,
115 VkPipelineCache
* pPipelineCache
)
117 pPipelineCache
->handle
= 1;
119 stub_return(VK_SUCCESS
);
122 void anv_DestroyPipelineCache(
124 VkPipelineCache _cache
)
128 size_t anv_GetPipelineCacheSize(
130 VkPipelineCache pipelineCache
)
135 VkResult
anv_GetPipelineCacheData(
137 VkPipelineCache pipelineCache
,
140 stub_return(VK_UNSUPPORTED
);
143 VkResult
anv_MergePipelineCaches(
145 VkPipelineCache destCache
,
146 uint32_t srcCacheCount
,
147 const VkPipelineCache
* pSrcCaches
)
149 stub_return(VK_UNSUPPORTED
);
152 void anv_DestroyPipeline(
154 VkPipeline _pipeline
)
156 ANV_FROM_HANDLE(anv_device
, device
, _device
);
157 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
159 anv_compiler_free(pipeline
);
160 anv_reloc_list_finish(&pipeline
->batch_relocs
, pipeline
->device
);
161 anv_state_stream_finish(&pipeline
->program_stream
);
162 anv_state_pool_free(&device
->dynamic_state_pool
, pipeline
->blend_state
);
163 anv_device_free(pipeline
->device
, pipeline
);
166 static const uint32_t vk_to_gen_primitive_type
[] = {
167 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST
] = _3DPRIM_POINTLIST
,
168 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST
] = _3DPRIM_LINELIST
,
169 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
] = _3DPRIM_LINESTRIP
,
170 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
] = _3DPRIM_TRILIST
,
171 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
] = _3DPRIM_TRISTRIP
,
172 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
] = _3DPRIM_TRIFAN
,
173 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ
] = _3DPRIM_LINELIST_ADJ
,
174 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ
] = _3DPRIM_LINESTRIP_ADJ
,
175 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ
] = _3DPRIM_TRILIST_ADJ
,
176 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ
] = _3DPRIM_TRISTRIP_ADJ
,
177 [VK_PRIMITIVE_TOPOLOGY_PATCH
] = _3DPRIM_PATCHLIST_1
181 anv_pipeline_init_dynamic_state(struct anv_pipeline
*pipeline
,
182 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
184 pipeline
->dynamic_state_mask
= 0;
186 if (pCreateInfo
->pDynamicState
== NULL
)
189 uint32_t count
= pCreateInfo
->pDynamicState
->dynamicStateCount
;
190 struct anv_dynamic_state
*dynamic
= &pipeline
->dynamic_state
;
192 for (uint32_t s
= 0; s
< count
; s
++) {
193 VkDynamicState state
= pCreateInfo
->pDynamicState
->pDynamicStates
[s
];
196 pipeline
->dynamic_state_mask
|= (1u << state
);
199 case VK_DYNAMIC_STATE_VIEWPORT
:
200 assert(pCreateInfo
->pViewportState
);
201 dynamic
->viewport
.count
= pCreateInfo
->pViewportState
->viewportCount
;
202 typed_memcpy(dynamic
->viewport
.viewports
,
203 pCreateInfo
->pViewportState
->pViewports
,
204 pCreateInfo
->pViewportState
->viewportCount
);
207 case VK_DYNAMIC_STATE_SCISSOR
:
208 assert(pCreateInfo
->pViewportState
);
209 dynamic
->scissor
.count
= pCreateInfo
->pViewportState
->scissorCount
;
210 typed_memcpy(dynamic
->scissor
.scissors
,
211 pCreateInfo
->pViewportState
->pScissors
,
212 pCreateInfo
->pViewportState
->scissorCount
);
215 case VK_DYNAMIC_STATE_LINE_WIDTH
:
216 assert(pCreateInfo
->pRasterState
);
217 dynamic
->line_width
= pCreateInfo
->pRasterState
->lineWidth
;
220 case VK_DYNAMIC_STATE_DEPTH_BIAS
:
221 assert(pCreateInfo
->pRasterState
);
222 dynamic
->depth_bias
.bias
= pCreateInfo
->pRasterState
->depthBias
;
223 dynamic
->depth_bias
.clamp
= pCreateInfo
->pRasterState
->depthBiasClamp
;
224 dynamic
->depth_bias
.slope_scaled
=
225 pCreateInfo
->pRasterState
->slopeScaledDepthBias
;
228 case VK_DYNAMIC_STATE_BLEND_CONSTANTS
:
229 assert(pCreateInfo
->pColorBlendState
);
230 typed_memcpy(dynamic
->blend_constants
,
231 pCreateInfo
->pColorBlendState
->blendConst
, 4);
234 case VK_DYNAMIC_STATE_DEPTH_BOUNDS
:
235 assert(pCreateInfo
->pDepthStencilState
);
236 dynamic
->depth_bounds
.min
=
237 pCreateInfo
->pDepthStencilState
->minDepthBounds
;
238 dynamic
->depth_bounds
.max
=
239 pCreateInfo
->pDepthStencilState
->maxDepthBounds
;
242 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
:
243 assert(pCreateInfo
->pDepthStencilState
);
244 dynamic
->stencil_compare_mask
.front
=
245 pCreateInfo
->pDepthStencilState
->front
.stencilCompareMask
;
246 dynamic
->stencil_compare_mask
.back
=
247 pCreateInfo
->pDepthStencilState
->back
.stencilCompareMask
;
250 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
:
251 assert(pCreateInfo
->pDepthStencilState
);
252 dynamic
->stencil_write_mask
.front
=
253 pCreateInfo
->pDepthStencilState
->front
.stencilWriteMask
;
254 dynamic
->stencil_write_mask
.back
=
255 pCreateInfo
->pDepthStencilState
->back
.stencilWriteMask
;
258 case VK_DYNAMIC_STATE_STENCIL_REFERENCE
:
259 assert(pCreateInfo
->pDepthStencilState
);
260 dynamic
->stencil_reference
.front
=
261 pCreateInfo
->pDepthStencilState
->front
.stencilReference
;
262 dynamic
->stencil_reference
.back
=
263 pCreateInfo
->pDepthStencilState
->back
.stencilReference
;
267 assert(!"Invalid dynamic state");
273 anv_pipeline_init(struct anv_pipeline
*pipeline
, struct anv_device
*device
,
274 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
275 const struct anv_graphics_pipeline_create_info
*extra
)
279 pipeline
->device
= device
;
280 pipeline
->layout
= anv_pipeline_layout_from_handle(pCreateInfo
->layout
);
281 memset(pipeline
->shaders
, 0, sizeof(pipeline
->shaders
));
283 result
= anv_reloc_list_init(&pipeline
->batch_relocs
, device
);
284 if (result
!= VK_SUCCESS
) {
285 anv_device_free(device
, pipeline
);
288 pipeline
->batch
.next
= pipeline
->batch
.start
= pipeline
->batch_data
;
289 pipeline
->batch
.end
= pipeline
->batch
.start
+ sizeof(pipeline
->batch_data
);
290 pipeline
->batch
.relocs
= &pipeline
->batch_relocs
;
292 anv_state_stream_init(&pipeline
->program_stream
,
293 &device
->instruction_block_pool
);
295 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
296 pipeline
->shaders
[pCreateInfo
->pStages
[i
].stage
] =
297 anv_shader_from_handle(pCreateInfo
->pStages
[i
].shader
);
300 anv_pipeline_init_dynamic_state(pipeline
, pCreateInfo
);
302 if (pCreateInfo
->pTessellationState
)
303 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO");
304 if (pCreateInfo
->pViewportState
)
305 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO");
306 if (pCreateInfo
->pMultisampleState
&&
307 pCreateInfo
->pMultisampleState
->rasterSamples
> 1)
308 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO");
310 pipeline
->use_repclear
= extra
&& extra
->use_repclear
;
312 anv_compiler_run(device
->compiler
, pipeline
);
314 const struct brw_wm_prog_data
*wm_prog_data
= &pipeline
->wm_prog_data
;
316 pipeline
->ps_ksp2
= 0;
317 pipeline
->ps_grf_start2
= 0;
318 if (pipeline
->ps_simd8
!= NO_KERNEL
) {
319 pipeline
->ps_ksp0
= pipeline
->ps_simd8
;
320 pipeline
->ps_grf_start0
= wm_prog_data
->base
.dispatch_grf_start_reg
;
321 if (pipeline
->ps_simd16
!= NO_KERNEL
) {
322 pipeline
->ps_ksp2
= pipeline
->ps_simd16
;
323 pipeline
->ps_grf_start2
= wm_prog_data
->dispatch_grf_start_reg_16
;
325 } else if (pipeline
->ps_simd16
!= NO_KERNEL
) {
326 pipeline
->ps_ksp0
= pipeline
->ps_simd16
;
327 pipeline
->ps_grf_start0
= wm_prog_data
->dispatch_grf_start_reg_16
;
329 unreachable("no ps shader");
332 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
333 pCreateInfo
->pVertexInputState
;
334 pipeline
->vb_used
= 0;
335 for (uint32_t i
= 0; i
< vi_info
->bindingCount
; i
++) {
336 const VkVertexInputBindingDescription
*desc
=
337 &vi_info
->pVertexBindingDescriptions
[i
];
339 pipeline
->vb_used
|= 1 << desc
->binding
;
340 pipeline
->binding_stride
[desc
->binding
] = desc
->strideInBytes
;
342 /* Step rate is programmed per vertex element (attribute), not
343 * binding. Set up a map of which bindings step per instance, for
344 * reference by vertex element setup. */
345 switch (desc
->stepRate
) {
347 case VK_VERTEX_INPUT_STEP_RATE_VERTEX
:
348 pipeline
->instancing_enable
[desc
->binding
] = false;
350 case VK_VERTEX_INPUT_STEP_RATE_INSTANCE
:
351 pipeline
->instancing_enable
[desc
->binding
] = true;
356 const VkPipelineInputAssemblyStateCreateInfo
*ia_info
=
357 pCreateInfo
->pInputAssemblyState
;
358 pipeline
->primitive_restart
= ia_info
->primitiveRestartEnable
;
359 pipeline
->topology
= vk_to_gen_primitive_type
[ia_info
->topology
];
361 if (extra
&& extra
->use_rectlist
)
362 pipeline
->topology
= _3DPRIM_RECTLIST
;
368 anv_graphics_pipeline_create(
370 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
371 const struct anv_graphics_pipeline_create_info
*extra
,
372 VkPipeline
*pPipeline
)
374 ANV_FROM_HANDLE(anv_device
, device
, _device
);
376 switch (device
->info
.gen
) {
378 return gen7_graphics_pipeline_create(_device
, pCreateInfo
, extra
, pPipeline
);
380 return gen8_graphics_pipeline_create(_device
, pCreateInfo
, extra
, pPipeline
);
382 unreachable("unsupported gen\n");
386 VkResult
anv_CreateGraphicsPipelines(
388 VkPipelineCache pipelineCache
,
390 const VkGraphicsPipelineCreateInfo
* pCreateInfos
,
391 VkPipeline
* pPipelines
)
393 VkResult result
= VK_SUCCESS
;
396 for (; i
< count
; i
++) {
397 result
= anv_graphics_pipeline_create(_device
, &pCreateInfos
[i
],
398 NULL
, &pPipelines
[i
]);
399 if (result
!= VK_SUCCESS
) {
400 for (unsigned j
= 0; j
< i
; j
++) {
401 anv_DestroyPipeline(_device
, pPipelines
[j
]);
411 static VkResult
anv_compute_pipeline_create(
413 const VkComputePipelineCreateInfo
* pCreateInfo
,
414 VkPipeline
* pPipeline
)
416 ANV_FROM_HANDLE(anv_device
, device
, _device
);
418 switch (device
->info
.gen
) {
420 return gen7_compute_pipeline_create(_device
, pCreateInfo
, pPipeline
);
422 return gen8_compute_pipeline_create(_device
, pCreateInfo
, pPipeline
);
424 unreachable("unsupported gen\n");
428 VkResult
anv_CreateComputePipelines(
430 VkPipelineCache pipelineCache
,
432 const VkComputePipelineCreateInfo
* pCreateInfos
,
433 VkPipeline
* pPipelines
)
435 VkResult result
= VK_SUCCESS
;
438 for (; i
< count
; i
++) {
439 result
= anv_compute_pipeline_create(_device
, &pCreateInfos
[i
],
441 if (result
!= VK_SUCCESS
) {
442 for (unsigned j
= 0; j
< i
; j
++) {
443 anv_DestroyPipeline(_device
, pPipelines
[j
]);
453 // Pipeline layout functions
455 VkResult
anv_CreatePipelineLayout(
457 const VkPipelineLayoutCreateInfo
* pCreateInfo
,
458 VkPipelineLayout
* pPipelineLayout
)
460 ANV_FROM_HANDLE(anv_device
, device
, _device
);
461 struct anv_pipeline_layout l
, *layout
;
463 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
);
465 l
.num_sets
= pCreateInfo
->descriptorSetCount
;
467 unsigned dynamic_offset_count
= 0;
469 memset(l
.stage
, 0, sizeof(l
.stage
));
470 for (uint32_t set
= 0; set
< pCreateInfo
->descriptorSetCount
; set
++) {
471 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
,
472 pCreateInfo
->pSetLayouts
[set
]);
473 l
.set
[set
].layout
= set_layout
;
475 l
.set
[set
].dynamic_offset_start
= dynamic_offset_count
;
476 for (uint32_t b
= 0; b
< set_layout
->binding_count
; b
++) {
477 if (set_layout
->binding
[b
].dynamic_offset_index
>= 0)
478 dynamic_offset_count
+= set_layout
->binding
[b
].array_size
;
481 for (VkShaderStage s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
482 l
.set
[set
].stage
[s
].surface_start
= l
.stage
[s
].surface_count
;
483 l
.set
[set
].stage
[s
].sampler_start
= l
.stage
[s
].sampler_count
;
485 for (uint32_t b
= 0; b
< set_layout
->binding_count
; b
++) {
486 unsigned array_size
= set_layout
->binding
[b
].array_size
;
488 if (set_layout
->binding
[b
].stage
[s
].surface_index
>= 0) {
489 l
.stage
[s
].surface_count
+= array_size
;
491 if (set_layout
->binding
[b
].dynamic_offset_index
>= 0)
492 l
.stage
[s
].has_dynamic_offsets
= true;
495 if (set_layout
->binding
[b
].stage
[s
].sampler_index
>= 0)
496 l
.stage
[s
].sampler_count
+= array_size
;
501 unsigned num_bindings
= 0;
502 for (VkShaderStage s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++)
503 num_bindings
+= l
.stage
[s
].surface_count
+ l
.stage
[s
].sampler_count
;
505 size_t size
= sizeof(*layout
) + num_bindings
* sizeof(layout
->entries
[0]);
507 layout
= anv_device_alloc(device
, size
, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
509 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
511 /* Now we can actually build our surface and sampler maps */
512 struct anv_pipeline_binding
*entry
= layout
->entries
;
513 for (VkShaderStage s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
514 l
.stage
[s
].surface_to_descriptor
= entry
;
515 entry
+= l
.stage
[s
].surface_count
;
516 l
.stage
[s
].sampler_to_descriptor
= entry
;
517 entry
+= l
.stage
[s
].sampler_count
;
521 for (uint32_t set
= 0; set
< pCreateInfo
->descriptorSetCount
; set
++) {
522 struct anv_descriptor_set_layout
*set_layout
= l
.set
[set
].layout
;
524 unsigned set_offset
= 0;
525 for (uint32_t b
= 0; b
< set_layout
->binding_count
; b
++) {
526 unsigned array_size
= set_layout
->binding
[b
].array_size
;
528 if (set_layout
->binding
[b
].stage
[s
].surface_index
>= 0) {
529 assert(surface
== l
.set
[set
].stage
[s
].surface_start
+
530 set_layout
->binding
[b
].stage
[s
].surface_index
);
531 for (unsigned i
= 0; i
< array_size
; i
++) {
532 l
.stage
[s
].surface_to_descriptor
[surface
+ i
].set
= set
;
533 l
.stage
[s
].surface_to_descriptor
[surface
+ i
].offset
= set_offset
+ i
;
535 surface
+= array_size
;
538 if (set_layout
->binding
[b
].stage
[s
].sampler_index
>= 0) {
539 assert(sampler
== l
.set
[set
].stage
[s
].sampler_start
+
540 set_layout
->binding
[b
].stage
[s
].sampler_index
);
541 for (unsigned i
= 0; i
< array_size
; i
++) {
542 l
.stage
[s
].sampler_to_descriptor
[sampler
+ i
].set
= set
;
543 l
.stage
[s
].sampler_to_descriptor
[sampler
+ i
].offset
= set_offset
+ i
;
545 sampler
+= array_size
;
548 set_offset
+= array_size
;
553 /* Finally, we're done setting it up, copy into the allocated version */
556 *pPipelineLayout
= anv_pipeline_layout_to_handle(layout
);
561 void anv_DestroyPipelineLayout(
563 VkPipelineLayout _pipelineLayout
)
565 ANV_FROM_HANDLE(anv_device
, device
, _device
);
566 ANV_FROM_HANDLE(anv_pipeline_layout
, pipeline_layout
, _pipelineLayout
);
568 anv_device_free(device
, pipeline_layout
);