2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
34 VkResult
anv_CreateShaderModule(
36 const VkShaderModuleCreateInfo
* pCreateInfo
,
37 VkShaderModule
* pShaderModule
)
39 ANV_FROM_HANDLE(anv_device
, device
, _device
);
40 struct anv_shader_module
*module
;
42 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
43 assert(pCreateInfo
->flags
== 0);
45 module
= anv_device_alloc(device
, sizeof(*module
) + pCreateInfo
->codeSize
, 8,
46 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
48 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
51 module
->size
= pCreateInfo
->codeSize
;
52 memcpy(module
->data
, pCreateInfo
->pCode
, module
->size
);
54 *pShaderModule
= anv_shader_module_to_handle(module
);
59 VkResult
anv_DestroyShaderModule(
61 VkShaderModule _module
)
63 ANV_FROM_HANDLE(anv_device
, device
, _device
);
64 ANV_FROM_HANDLE(anv_shader_module
, module
, _module
);
66 anv_device_free(device
, module
);
71 VkResult
anv_CreateShader(
73 const VkShaderCreateInfo
* pCreateInfo
,
76 ANV_FROM_HANDLE(anv_device
, device
, _device
);
77 ANV_FROM_HANDLE(anv_shader_module
, module
, pCreateInfo
->module
);
78 struct anv_shader
*shader
;
80 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_CREATE_INFO
);
81 assert(pCreateInfo
->flags
== 0);
83 const char *name
= pCreateInfo
->pName
? pCreateInfo
->pName
: "main";
84 size_t name_len
= strlen(name
);
86 if (strcmp(name
, "main") != 0) {
87 anv_finishme("Multiple shaders per module not really supported");
90 shader
= anv_device_alloc(device
, sizeof(*shader
) + name_len
+ 1, 8,
91 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
93 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
95 shader
->module
= module
;
96 memcpy(shader
->entrypoint
, name
, name_len
+ 1);
98 *pShader
= anv_shader_to_handle(shader
);
103 VkResult
anv_DestroyShader(
107 ANV_FROM_HANDLE(anv_device
, device
, _device
);
108 ANV_FROM_HANDLE(anv_shader
, shader
, _shader
);
110 anv_device_free(device
, shader
);
116 VkResult
anv_CreatePipelineCache(
118 const VkPipelineCacheCreateInfo
* pCreateInfo
,
119 VkPipelineCache
* pPipelineCache
)
121 pPipelineCache
->handle
= 1;
123 stub_return(VK_SUCCESS
);
126 VkResult
anv_DestroyPipelineCache(
128 VkPipelineCache _cache
)
130 /* VkPipelineCache is a dummy object. */
134 size_t anv_GetPipelineCacheSize(
136 VkPipelineCache pipelineCache
)
141 VkResult
anv_GetPipelineCacheData(
143 VkPipelineCache pipelineCache
,
146 stub_return(VK_UNSUPPORTED
);
149 VkResult
anv_MergePipelineCaches(
151 VkPipelineCache destCache
,
152 uint32_t srcCacheCount
,
153 const VkPipelineCache
* pSrcCaches
)
155 stub_return(VK_UNSUPPORTED
);
158 VkResult
anv_DestroyPipeline(
160 VkPipeline _pipeline
)
162 ANV_FROM_HANDLE(anv_device
, device
, _device
);
163 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
165 anv_compiler_free(pipeline
);
166 anv_reloc_list_finish(&pipeline
->batch_relocs
, pipeline
->device
);
167 anv_state_stream_finish(&pipeline
->program_stream
);
168 anv_state_pool_free(&device
->dynamic_state_pool
, pipeline
->blend_state
);
169 anv_device_free(pipeline
->device
, pipeline
);
174 static const uint32_t vk_to_gen_primitive_type
[] = {
175 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST
] = _3DPRIM_POINTLIST
,
176 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST
] = _3DPRIM_LINELIST
,
177 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
] = _3DPRIM_LINESTRIP
,
178 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
] = _3DPRIM_TRILIST
,
179 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
] = _3DPRIM_TRISTRIP
,
180 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
] = _3DPRIM_TRIFAN
,
181 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ
] = _3DPRIM_LINELIST_ADJ
,
182 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ
] = _3DPRIM_LINESTRIP_ADJ
,
183 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ
] = _3DPRIM_TRILIST_ADJ
,
184 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ
] = _3DPRIM_TRISTRIP_ADJ
,
185 [VK_PRIMITIVE_TOPOLOGY_PATCH
] = _3DPRIM_PATCHLIST_1
189 anv_pipeline_init(struct anv_pipeline
*pipeline
, struct anv_device
*device
,
190 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
191 const struct anv_graphics_pipeline_create_info
*extra
)
195 pipeline
->device
= device
;
196 pipeline
->layout
= anv_pipeline_layout_from_handle(pCreateInfo
->layout
);
197 memset(pipeline
->shaders
, 0, sizeof(pipeline
->shaders
));
199 result
= anv_reloc_list_init(&pipeline
->batch_relocs
, device
);
200 if (result
!= VK_SUCCESS
) {
201 anv_device_free(device
, pipeline
);
204 pipeline
->batch
.next
= pipeline
->batch
.start
= pipeline
->batch_data
;
205 pipeline
->batch
.end
= pipeline
->batch
.start
+ sizeof(pipeline
->batch_data
);
206 pipeline
->batch
.relocs
= &pipeline
->batch_relocs
;
208 anv_state_stream_init(&pipeline
->program_stream
,
209 &device
->instruction_block_pool
);
211 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
212 pipeline
->shaders
[pCreateInfo
->pStages
[i
].stage
] =
213 anv_shader_from_handle(pCreateInfo
->pStages
[i
].shader
);
216 if (pCreateInfo
->pTessellationState
)
217 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO");
218 if (pCreateInfo
->pViewportState
)
219 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO");
220 if (pCreateInfo
->pMultisampleState
)
221 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO");
223 pipeline
->use_repclear
= extra
&& extra
->use_repclear
;
225 anv_compiler_run(device
->compiler
, pipeline
);
227 const struct brw_wm_prog_data
*wm_prog_data
= &pipeline
->wm_prog_data
;
229 pipeline
->ps_ksp2
= 0;
230 pipeline
->ps_grf_start2
= 0;
231 if (pipeline
->ps_simd8
!= NO_KERNEL
) {
232 pipeline
->ps_ksp0
= pipeline
->ps_simd8
;
233 pipeline
->ps_grf_start0
= wm_prog_data
->base
.dispatch_grf_start_reg
;
234 if (pipeline
->ps_simd16
!= NO_KERNEL
) {
235 pipeline
->ps_ksp2
= pipeline
->ps_simd16
;
236 pipeline
->ps_grf_start2
= wm_prog_data
->dispatch_grf_start_reg_16
;
238 } else if (pipeline
->ps_simd16
!= NO_KERNEL
) {
239 pipeline
->ps_ksp0
= pipeline
->ps_simd16
;
240 pipeline
->ps_grf_start0
= wm_prog_data
->dispatch_grf_start_reg_16
;
242 unreachable("no ps shader");
245 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
246 pCreateInfo
->pVertexInputState
;
247 pipeline
->vb_used
= 0;
248 for (uint32_t i
= 0; i
< vi_info
->bindingCount
; i
++) {
249 const VkVertexInputBindingDescription
*desc
=
250 &vi_info
->pVertexBindingDescriptions
[i
];
252 pipeline
->vb_used
|= 1 << desc
->binding
;
253 pipeline
->binding_stride
[desc
->binding
] = desc
->strideInBytes
;
255 /* Step rate is programmed per vertex element (attribute), not
256 * binding. Set up a map of which bindings step per instance, for
257 * reference by vertex element setup. */
258 switch (desc
->stepRate
) {
260 case VK_VERTEX_INPUT_STEP_RATE_VERTEX
:
261 pipeline
->instancing_enable
[desc
->binding
] = false;
263 case VK_VERTEX_INPUT_STEP_RATE_INSTANCE
:
264 pipeline
->instancing_enable
[desc
->binding
] = true;
269 const VkPipelineInputAssemblyStateCreateInfo
*ia_info
=
270 pCreateInfo
->pInputAssemblyState
;
271 pipeline
->primitive_restart
= ia_info
->primitiveRestartEnable
;
272 pipeline
->topology
= vk_to_gen_primitive_type
[ia_info
->topology
];
274 if (extra
&& extra
->use_rectlist
)
275 pipeline
->topology
= _3DPRIM_RECTLIST
;
281 anv_graphics_pipeline_create(
283 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
284 const struct anv_graphics_pipeline_create_info
*extra
,
285 VkPipeline
*pPipeline
)
287 ANV_FROM_HANDLE(anv_device
, device
, _device
);
289 switch (device
->info
.gen
) {
291 return gen7_graphics_pipeline_create(_device
, pCreateInfo
, extra
, pPipeline
);
293 return gen8_graphics_pipeline_create(_device
, pCreateInfo
, extra
, pPipeline
);
295 unreachable("unsupported gen\n");
299 VkResult
anv_CreateGraphicsPipelines(
301 VkPipelineCache pipelineCache
,
303 const VkGraphicsPipelineCreateInfo
* pCreateInfos
,
304 VkPipeline
* pPipelines
)
306 VkResult result
= VK_SUCCESS
;
309 for (; i
< count
; i
++) {
310 result
= anv_graphics_pipeline_create(_device
, &pCreateInfos
[i
],
311 NULL
, &pPipelines
[i
]);
312 if (result
!= VK_SUCCESS
) {
313 for (unsigned j
= 0; j
< i
; j
++) {
314 anv_DestroyPipeline(_device
, pPipelines
[j
]);
324 static VkResult
anv_compute_pipeline_create(
326 const VkComputePipelineCreateInfo
* pCreateInfo
,
327 VkPipeline
* pPipeline
)
329 ANV_FROM_HANDLE(anv_device
, device
, _device
);
331 switch (device
->info
.gen
) {
333 return gen7_compute_pipeline_create(_device
, pCreateInfo
, pPipeline
);
335 return gen8_compute_pipeline_create(_device
, pCreateInfo
, pPipeline
);
337 unreachable("unsupported gen\n");
341 VkResult
anv_CreateComputePipelines(
343 VkPipelineCache pipelineCache
,
345 const VkComputePipelineCreateInfo
* pCreateInfos
,
346 VkPipeline
* pPipelines
)
348 VkResult result
= VK_SUCCESS
;
351 for (; i
< count
; i
++) {
352 result
= anv_compute_pipeline_create(_device
, &pCreateInfos
[i
],
354 if (result
!= VK_SUCCESS
) {
355 for (unsigned j
= 0; j
< i
; j
++) {
356 anv_DestroyPipeline(_device
, pPipelines
[j
]);
366 // Pipeline layout functions
368 VkResult
anv_CreatePipelineLayout(
370 const VkPipelineLayoutCreateInfo
* pCreateInfo
,
371 VkPipelineLayout
* pPipelineLayout
)
373 ANV_FROM_HANDLE(anv_device
, device
, _device
);
374 struct anv_pipeline_layout
*layout
;
376 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
);
378 layout
= anv_device_alloc(device
, sizeof(*layout
), 8,
379 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
381 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
383 layout
->num_sets
= pCreateInfo
->descriptorSetCount
;
385 uint32_t surface_start
[VK_SHADER_STAGE_NUM
] = { 0, };
386 uint32_t sampler_start
[VK_SHADER_STAGE_NUM
] = { 0, };
388 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
389 layout
->stage
[s
].has_dynamic_offsets
= false;
390 layout
->stage
[s
].surface_count
= 0;
391 layout
->stage
[s
].sampler_count
= 0;
394 uint32_t num_dynamic_offsets
= 0;
395 for (uint32_t i
= 0; i
< pCreateInfo
->descriptorSetCount
; i
++) {
396 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
,
397 pCreateInfo
->pSetLayouts
[i
]);
399 layout
->set
[i
].layout
= set_layout
;
400 layout
->set
[i
].dynamic_offset_start
= num_dynamic_offsets
;
401 num_dynamic_offsets
+= set_layout
->num_dynamic_buffers
;
402 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
403 if (set_layout
->num_dynamic_buffers
> 0)
404 layout
->stage
[s
].has_dynamic_offsets
= true;
406 layout
->set
[i
].stage
[s
].surface_start
= surface_start
[s
];
407 surface_start
[s
] += set_layout
->stage
[s
].surface_count
;
408 layout
->set
[i
].stage
[s
].sampler_start
= sampler_start
[s
];
409 sampler_start
[s
] += set_layout
->stage
[s
].sampler_count
;
411 layout
->stage
[s
].surface_count
+= set_layout
->stage
[s
].surface_count
;
412 layout
->stage
[s
].sampler_count
+= set_layout
->stage
[s
].sampler_count
;
416 *pPipelineLayout
= anv_pipeline_layout_to_handle(layout
);
421 VkResult
anv_DestroyPipelineLayout(
423 VkPipelineLayout _pipelineLayout
)
425 ANV_FROM_HANDLE(anv_device
, device
, _device
);
426 ANV_FROM_HANDLE(anv_pipeline_layout
, pipeline_layout
, _pipelineLayout
);
428 anv_device_free(device
, pipeline_layout
);