50b3a02e6226b386d4a415252fd87ef255de5fef
[mesa.git] / src / freedreno / vulkan / tu_pipeline.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "main/menums.h"
31 #include "nir/nir.h"
32 #include "nir/nir_builder.h"
33 #include "spirv/nir_spirv.h"
34 #include "util/debug.h"
35 #include "util/mesa-sha1.h"
36 #include "util/u_atomic.h"
37 #include "vk_format.h"
38 #include "vk_util.h"
39
40 #include "tu_cs.h"
41
42 struct tu_pipeline_builder
43 {
44 struct tu_device *device;
45 struct tu_pipeline_cache *cache;
46 const VkAllocationCallbacks *alloc;
47 const VkGraphicsPipelineCreateInfo *create_info;
48 };
49
50 static enum tu_dynamic_state_bits
51 tu_dynamic_state_bit(VkDynamicState state)
52 {
53 switch (state) {
54 case VK_DYNAMIC_STATE_VIEWPORT:
55 return TU_DYNAMIC_VIEWPORT;
56 case VK_DYNAMIC_STATE_SCISSOR:
57 return TU_DYNAMIC_SCISSOR;
58 case VK_DYNAMIC_STATE_LINE_WIDTH:
59 return TU_DYNAMIC_LINE_WIDTH;
60 case VK_DYNAMIC_STATE_DEPTH_BIAS:
61 return TU_DYNAMIC_DEPTH_BIAS;
62 case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
63 return TU_DYNAMIC_BLEND_CONSTANTS;
64 case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
65 return TU_DYNAMIC_DEPTH_BOUNDS;
66 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
67 return TU_DYNAMIC_STENCIL_COMPARE_MASK;
68 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
69 return TU_DYNAMIC_STENCIL_WRITE_MASK;
70 case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
71 return TU_DYNAMIC_STENCIL_REFERENCE;
72 default:
73 unreachable("invalid dynamic state");
74 return 0;
75 }
76 }
77
78 static VkResult
79 tu_pipeline_builder_create_pipeline(struct tu_pipeline_builder *builder,
80 struct tu_pipeline **out_pipeline)
81 {
82 struct tu_device *dev = builder->device;
83
84 struct tu_pipeline *pipeline =
85 vk_zalloc2(&dev->alloc, builder->alloc, sizeof(*pipeline), 8,
86 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
87 if (!pipeline)
88 return VK_ERROR_OUT_OF_HOST_MEMORY;
89
90 tu_cs_init(&pipeline->cs, TU_CS_MODE_SUB_STREAM, 2048);
91
92 /* reserve the space now such that tu_cs_begin_sub_stream never fails */
93 VkResult result = tu_cs_reserve_space(dev, &pipeline->cs, 2048);
94 if (result != VK_SUCCESS) {
95 vk_free2(&dev->alloc, builder->alloc, pipeline);
96 return result;
97 }
98
99 *out_pipeline = pipeline;
100
101 return VK_SUCCESS;
102 }
103
104 static void
105 tu_pipeline_builder_parse_dynamic(struct tu_pipeline_builder *builder,
106 struct tu_pipeline *pipeline)
107 {
108 const VkPipelineDynamicStateCreateInfo *dynamic_info =
109 builder->create_info->pDynamicState;
110
111 if (!dynamic_info)
112 return;
113
114 for (uint32_t i = 0; i < dynamic_info->dynamicStateCount; i++) {
115 pipeline->dynamic_state.mask |=
116 tu_dynamic_state_bit(dynamic_info->pDynamicStates[i]);
117 }
118 }
119
120 static void
121 tu_pipeline_finish(struct tu_pipeline *pipeline,
122 struct tu_device *dev,
123 const VkAllocationCallbacks *alloc)
124 {
125 tu_cs_finish(dev, &pipeline->cs);
126 }
127
128 static VkResult
129 tu_pipeline_builder_build(struct tu_pipeline_builder *builder,
130 struct tu_pipeline **pipeline)
131 {
132 VkResult result = tu_pipeline_builder_create_pipeline(builder, pipeline);
133 if (result != VK_SUCCESS)
134 return result;
135
136 tu_pipeline_builder_parse_dynamic(builder, *pipeline);
137
138 /* we should have reserved enough space upfront such that the CS never
139 * grows
140 */
141 assert((*pipeline)->cs.bo_count == 1);
142
143 return VK_SUCCESS;
144 }
145
146 static void
147 tu_pipeline_builder_init_graphics(
148 struct tu_pipeline_builder *builder,
149 struct tu_device *dev,
150 struct tu_pipeline_cache *cache,
151 const VkGraphicsPipelineCreateInfo *create_info,
152 const VkAllocationCallbacks *alloc)
153 {
154 *builder = (struct tu_pipeline_builder) {
155 .device = dev,
156 .cache = cache,
157 .create_info = create_info,
158 .alloc = alloc,
159 };
160 }
161
162 VkResult
163 tu_CreateGraphicsPipelines(VkDevice device,
164 VkPipelineCache pipelineCache,
165 uint32_t count,
166 const VkGraphicsPipelineCreateInfo *pCreateInfos,
167 const VkAllocationCallbacks *pAllocator,
168 VkPipeline *pPipelines)
169 {
170 TU_FROM_HANDLE(tu_device, dev, device);
171 TU_FROM_HANDLE(tu_pipeline_cache, cache, pipelineCache);
172
173 for (uint32_t i = 0; i < count; i++) {
174 struct tu_pipeline_builder builder;
175 tu_pipeline_builder_init_graphics(&builder, dev, cache,
176 &pCreateInfos[i], pAllocator);
177
178 struct tu_pipeline *pipeline;
179 VkResult result = tu_pipeline_builder_build(&builder, &pipeline);
180
181 if (result != VK_SUCCESS) {
182 for (uint32_t j = 0; j < i; j++) {
183 tu_DestroyPipeline(device, pPipelines[j], pAllocator);
184 pPipelines[j] = VK_NULL_HANDLE;
185 }
186
187 return result;
188 }
189
190 pPipelines[i] = tu_pipeline_to_handle(pipeline);
191 }
192
193 return VK_SUCCESS;
194 }
195
196 static VkResult
197 tu_compute_pipeline_create(VkDevice _device,
198 VkPipelineCache _cache,
199 const VkComputePipelineCreateInfo *pCreateInfo,
200 const VkAllocationCallbacks *pAllocator,
201 VkPipeline *pPipeline)
202 {
203 return VK_SUCCESS;
204 }
205
206 VkResult
207 tu_CreateComputePipelines(VkDevice _device,
208 VkPipelineCache pipelineCache,
209 uint32_t count,
210 const VkComputePipelineCreateInfo *pCreateInfos,
211 const VkAllocationCallbacks *pAllocator,
212 VkPipeline *pPipelines)
213 {
214 VkResult result = VK_SUCCESS;
215
216 unsigned i = 0;
217 for (; i < count; i++) {
218 VkResult r;
219 r = tu_compute_pipeline_create(_device, pipelineCache, &pCreateInfos[i],
220 pAllocator, &pPipelines[i]);
221 if (r != VK_SUCCESS) {
222 result = r;
223 pPipelines[i] = VK_NULL_HANDLE;
224 }
225 }
226
227 return result;
228 }
229
230 void
231 tu_DestroyPipeline(VkDevice _device,
232 VkPipeline _pipeline,
233 const VkAllocationCallbacks *pAllocator)
234 {
235 TU_FROM_HANDLE(tu_device, dev, _device);
236 TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
237
238 if (!_pipeline)
239 return;
240
241 tu_pipeline_finish(pipeline, dev, pAllocator);
242 vk_free2(&dev->alloc, pAllocator, pipeline);
243 }