2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "anv_private.h"
26 #include "genxml/gen_macros.h"
27 #include "genxml/genX_pack.h"
30 compute_pipeline_create(
32 struct anv_pipeline_cache
* cache
,
33 const VkComputePipelineCreateInfo
* pCreateInfo
,
34 const VkAllocationCallbacks
* pAllocator
,
35 VkPipeline
* pPipeline
)
37 ANV_FROM_HANDLE(anv_device
, device
, _device
);
38 const struct anv_physical_device
*physical_device
=
39 &device
->instance
->physicalDevice
;
40 const struct gen_device_info
*devinfo
= &physical_device
->info
;
41 struct anv_pipeline
*pipeline
;
44 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
);
46 pipeline
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
47 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
49 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
51 pipeline
->device
= device
;
52 pipeline
->layout
= anv_pipeline_layout_from_handle(pCreateInfo
->layout
);
54 pipeline
->blend_state
.map
= NULL
;
56 result
= anv_reloc_list_init(&pipeline
->batch_relocs
,
57 pAllocator
? pAllocator
: &device
->alloc
);
58 if (result
!= VK_SUCCESS
) {
59 vk_free2(&device
->alloc
, pAllocator
, pipeline
);
62 pipeline
->batch
.next
= pipeline
->batch
.start
= pipeline
->batch_data
;
63 pipeline
->batch
.end
= pipeline
->batch
.start
+ sizeof(pipeline
->batch_data
);
64 pipeline
->batch
.relocs
= &pipeline
->batch_relocs
;
66 /* When we free the pipeline, we detect stages based on the NULL status
67 * of various prog_data pointers. Make them NULL by default.
69 memset(pipeline
->shaders
, 0, sizeof(pipeline
->shaders
));
71 pipeline
->vs_simd8
= NO_KERNEL
;
72 pipeline
->vs_vec4
= NO_KERNEL
;
73 pipeline
->gs_kernel
= NO_KERNEL
;
75 pipeline
->active_stages
= 0;
77 pipeline
->needs_data_cache
= false;
79 assert(pCreateInfo
->stage
.stage
== VK_SHADER_STAGE_COMPUTE_BIT
);
80 ANV_FROM_HANDLE(anv_shader_module
, module
, pCreateInfo
->stage
.module
);
81 result
= anv_pipeline_compile_cs(pipeline
, cache
, pCreateInfo
, module
,
82 pCreateInfo
->stage
.pName
,
83 pCreateInfo
->stage
.pSpecializationInfo
);
84 if (result
!= VK_SUCCESS
) {
85 vk_free2(&device
->alloc
, pAllocator
, pipeline
);
89 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
91 anv_pipeline_setup_l3_config(pipeline
, cs_prog_data
->base
.total_shared
> 0);
93 uint32_t group_size
= cs_prog_data
->local_size
[0] *
94 cs_prog_data
->local_size
[1] * cs_prog_data
->local_size
[2];
95 uint32_t remainder
= group_size
& (cs_prog_data
->simd_size
- 1);
98 pipeline
->cs_right_mask
= ~0u >> (32 - remainder
);
100 pipeline
->cs_right_mask
= ~0u >> (32 - cs_prog_data
->simd_size
);
102 const uint32_t vfe_curbe_allocation
=
103 ALIGN(cs_prog_data
->push
.per_thread
.regs
* cs_prog_data
->threads
+
104 cs_prog_data
->push
.cross_thread
.regs
, 2);
106 const uint32_t subslices
= MAX2(physical_device
->subslice_total
, 1);
108 anv_batch_emit(&pipeline
->batch
, GENX(MEDIA_VFE_STATE
), vfe
) {
109 vfe
.ScratchSpaceBasePointer
= (struct anv_address
) {
110 .bo
= anv_scratch_pool_alloc(device
, &device
->scratch_pool
,
112 cs_prog_data
->base
.total_scratch
),
115 vfe
.PerThreadScratchSpace
= ffs(cs_prog_data
->base
.total_scratch
/ 2048);
119 vfe
.GPGPUMode
= true;
121 vfe
.MaximumNumberofThreads
=
122 devinfo
->max_cs_threads
* subslices
- 1;
123 vfe
.NumberofURBEntries
= GEN_GEN
<= 7 ? 0 : 2;
124 vfe
.ResetGatewayTimer
= true;
126 vfe
.BypassGatewayControl
= true;
128 vfe
.URBEntryAllocationSize
= GEN_GEN
<= 7 ? 0 : 2;
129 vfe
.CURBEAllocationSize
= vfe_curbe_allocation
;
132 *pPipeline
= anv_pipeline_to_handle(pipeline
);
137 VkResult
genX(CreateGraphicsPipelines
)(
139 VkPipelineCache pipelineCache
,
141 const VkGraphicsPipelineCreateInfo
* pCreateInfos
,
142 const VkAllocationCallbacks
* pAllocator
,
143 VkPipeline
* pPipelines
)
145 ANV_FROM_HANDLE(anv_pipeline_cache
, pipeline_cache
, pipelineCache
);
147 VkResult result
= VK_SUCCESS
;
150 for (; i
< count
; i
++) {
151 result
= genX(graphics_pipeline_create
)(_device
,
154 pAllocator
, &pPipelines
[i
]);
155 if (result
!= VK_SUCCESS
) {
156 for (unsigned j
= 0; j
< i
; j
++) {
157 anv_DestroyPipeline(_device
, pPipelines
[j
], pAllocator
);
167 VkResult
genX(CreateComputePipelines
)(
169 VkPipelineCache pipelineCache
,
171 const VkComputePipelineCreateInfo
* pCreateInfos
,
172 const VkAllocationCallbacks
* pAllocator
,
173 VkPipeline
* pPipelines
)
175 ANV_FROM_HANDLE(anv_pipeline_cache
, pipeline_cache
, pipelineCache
);
177 VkResult result
= VK_SUCCESS
;
180 for (; i
< count
; i
++) {
181 result
= compute_pipeline_create(_device
, pipeline_cache
,
183 pAllocator
, &pPipelines
[i
]);
184 if (result
!= VK_SUCCESS
) {
185 for (unsigned j
= 0; j
< i
; j
++) {
186 anv_DestroyPipeline(_device
, pPipelines
[j
], pAllocator
);