intel: Convert devinfo->urb.max_*_entries into an array.
[mesa.git] / src / intel / vulkan / genX_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 #include "genxml/gen_macros.h"
27 #include "genxml/genX_pack.h"
28
29 #include "common/gen_l3_config.h"
30 #include "common/gen_sample_positions.h"
31 #include "vk_format_info.h"
32
33 static uint32_t
34 vertex_element_comp_control(enum isl_format format, unsigned comp)
35 {
36 uint8_t bits;
37 switch (comp) {
38 case 0: bits = isl_format_layouts[format].channels.r.bits; break;
39 case 1: bits = isl_format_layouts[format].channels.g.bits; break;
40 case 2: bits = isl_format_layouts[format].channels.b.bits; break;
41 case 3: bits = isl_format_layouts[format].channels.a.bits; break;
42 default: unreachable("Invalid component");
43 }
44
45 if (bits) {
46 return VFCOMP_STORE_SRC;
47 } else if (comp < 3) {
48 return VFCOMP_STORE_0;
49 } else if (isl_format_layouts[format].channels.r.type == ISL_UINT ||
50 isl_format_layouts[format].channels.r.type == ISL_SINT) {
51 assert(comp == 3);
52 return VFCOMP_STORE_1_INT;
53 } else {
54 assert(comp == 3);
55 return VFCOMP_STORE_1_FP;
56 }
57 }
58
59 static void
60 emit_vertex_input(struct anv_pipeline *pipeline,
61 const VkPipelineVertexInputStateCreateInfo *info)
62 {
63 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
64
65 /* Pull inputs_read out of the VS prog data */
66 const uint64_t inputs_read = vs_prog_data->inputs_read;
67 assert((inputs_read & ((1 << VERT_ATTRIB_GENERIC0) - 1)) == 0);
68 const uint32_t elements = inputs_read >> VERT_ATTRIB_GENERIC0;
69
70 #if GEN_GEN >= 8
71 /* On BDW+, we only need to allocate space for base ids. Setting up
72 * the actual vertex and instance id is a separate packet.
73 */
74 const bool needs_svgs_elem = vs_prog_data->uses_basevertex ||
75 vs_prog_data->uses_baseinstance;
76 #else
77 /* On Haswell and prior, vertex and instance id are created by using the
78 * ComponentControl fields, so we need an element for any of them.
79 */
80 const bool needs_svgs_elem = vs_prog_data->uses_vertexid ||
81 vs_prog_data->uses_instanceid ||
82 vs_prog_data->uses_basevertex ||
83 vs_prog_data->uses_baseinstance;
84 #endif
85
86 uint32_t elem_count = __builtin_popcount(elements) + needs_svgs_elem;
87 if (elem_count == 0)
88 return;
89
90 uint32_t *p;
91
92 const uint32_t num_dwords = 1 + elem_count * 2;
93 p = anv_batch_emitn(&pipeline->batch, num_dwords,
94 GENX(3DSTATE_VERTEX_ELEMENTS));
95 memset(p + 1, 0, (num_dwords - 1) * 4);
96
97 for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) {
98 const VkVertexInputAttributeDescription *desc =
99 &info->pVertexAttributeDescriptions[i];
100 enum isl_format format = anv_get_isl_format(&pipeline->device->info,
101 desc->format,
102 VK_IMAGE_ASPECT_COLOR_BIT,
103 VK_IMAGE_TILING_LINEAR);
104
105 assert(desc->binding < 32);
106
107 if ((elements & (1 << desc->location)) == 0)
108 continue; /* Binding unused */
109
110 uint32_t slot = __builtin_popcount(elements & ((1 << desc->location) - 1));
111
112 struct GENX(VERTEX_ELEMENT_STATE) element = {
113 .VertexBufferIndex = desc->binding,
114 .Valid = true,
115 .SourceElementFormat = format,
116 .EdgeFlagEnable = false,
117 .SourceElementOffset = desc->offset,
118 .Component0Control = vertex_element_comp_control(format, 0),
119 .Component1Control = vertex_element_comp_control(format, 1),
120 .Component2Control = vertex_element_comp_control(format, 2),
121 .Component3Control = vertex_element_comp_control(format, 3),
122 };
123 GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + slot * 2], &element);
124
125 #if GEN_GEN >= 8
126 /* On Broadwell and later, we have a separate VF_INSTANCING packet
127 * that controls instancing. On Haswell and prior, that's part of
128 * VERTEX_BUFFER_STATE which we emit later.
129 */
130 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
131 vfi.InstancingEnable = pipeline->instancing_enable[desc->binding];
132 vfi.VertexElementIndex = slot;
133 /* Vulkan so far doesn't have an instance divisor, so
134 * this is always 1 (ignored if not instancing). */
135 vfi.InstanceDataStepRate = 1;
136 }
137 #endif
138 }
139
140 const uint32_t id_slot = __builtin_popcount(elements);
141 if (needs_svgs_elem) {
142 /* From the Broadwell PRM for the 3D_Vertex_Component_Control enum:
143 * "Within a VERTEX_ELEMENT_STATE structure, if a Component
144 * Control field is set to something other than VFCOMP_STORE_SRC,
145 * no higher-numbered Component Control fields may be set to
146 * VFCOMP_STORE_SRC"
147 *
148 * This means, that if we have BaseInstance, we need BaseVertex as
149 * well. Just do all or nothing.
150 */
151 uint32_t base_ctrl = (vs_prog_data->uses_basevertex ||
152 vs_prog_data->uses_baseinstance) ?
153 VFCOMP_STORE_SRC : VFCOMP_STORE_0;
154
155 struct GENX(VERTEX_ELEMENT_STATE) element = {
156 .VertexBufferIndex = 32, /* Reserved for this */
157 .Valid = true,
158 .SourceElementFormat = ISL_FORMAT_R32G32_UINT,
159 .Component0Control = base_ctrl,
160 .Component1Control = base_ctrl,
161 #if GEN_GEN >= 8
162 .Component2Control = VFCOMP_STORE_0,
163 .Component3Control = VFCOMP_STORE_0,
164 #else
165 .Component2Control = VFCOMP_STORE_VID,
166 .Component3Control = VFCOMP_STORE_IID,
167 #endif
168 };
169 GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + id_slot * 2], &element);
170 }
171
172 #if GEN_GEN >= 8
173 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_SGVS), sgvs) {
174 sgvs.VertexIDEnable = vs_prog_data->uses_vertexid;
175 sgvs.VertexIDComponentNumber = 2;
176 sgvs.VertexIDElementOffset = id_slot;
177 sgvs.InstanceIDEnable = vs_prog_data->uses_instanceid;
178 sgvs.InstanceIDComponentNumber = 3;
179 sgvs.InstanceIDElementOffset = id_slot;
180 }
181 #endif
182 }
183
184 void
185 genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
186 VkShaderStageFlags active_stages,
187 unsigned vs_size, unsigned gs_size,
188 const struct gen_l3_config *l3_config)
189 {
190 if (!(active_stages & VK_SHADER_STAGE_VERTEX_BIT))
191 vs_size = 1;
192
193 if (!(active_stages & VK_SHADER_STAGE_GEOMETRY_BIT))
194 gs_size = 1;
195
196 unsigned vs_entry_size_bytes = vs_size * 64;
197 unsigned gs_entry_size_bytes = gs_size * 64;
198
199 /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
200 *
201 * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
202 * Allocation Size is less than 9 512-bit URB entries.
203 *
204 * Similar text exists for GS.
205 */
206 unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
207 unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
208
209 /* URB allocations must be done in 8k chunks. */
210 unsigned chunk_size_bytes = 8192;
211
212 /* Determine the size of the URB in chunks. */
213 const unsigned total_urb_size =
214 gen_get_l3_config_urb_size(&device->info, l3_config);
215 const unsigned urb_chunks = total_urb_size * 1024 / chunk_size_bytes;
216
217 /* Reserve space for push constants */
218 unsigned push_constant_kb;
219 if (device->info.gen >= 8)
220 push_constant_kb = 32;
221 else if (device->info.is_haswell)
222 push_constant_kb = device->info.gt == 3 ? 32 : 16;
223 else
224 push_constant_kb = 16;
225
226 unsigned push_constant_bytes = push_constant_kb * 1024;
227 unsigned push_constant_chunks =
228 push_constant_bytes / chunk_size_bytes;
229
230 /* Initially, assign each stage the minimum amount of URB space it needs,
231 * and make a note of how much additional space it "wants" (the amount of
232 * additional space it could actually make use of).
233 */
234
235 /* VS has a lower limit on the number of URB entries */
236 unsigned vs_chunks =
237 ALIGN(device->info.urb.min_vs_entries * vs_entry_size_bytes,
238 chunk_size_bytes) / chunk_size_bytes;
239 unsigned vs_wants =
240 ALIGN(device->info.urb.max_entries[MESA_SHADER_VERTEX] *
241 vs_entry_size_bytes,
242 chunk_size_bytes) / chunk_size_bytes - vs_chunks;
243
244 unsigned gs_chunks = 0;
245 unsigned gs_wants = 0;
246 if (active_stages & VK_SHADER_STAGE_GEOMETRY_BIT) {
247 /* There are two constraints on the minimum amount of URB space we can
248 * allocate:
249 *
250 * (1) We need room for at least 2 URB entries, since we always operate
251 * the GS in DUAL_OBJECT mode.
252 *
253 * (2) We can't allocate less than nr_gs_entries_granularity.
254 */
255 gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
256 chunk_size_bytes) / chunk_size_bytes;
257 gs_wants =
258 ALIGN(device->info.urb.max_entries[MESA_SHADER_GEOMETRY] *
259 gs_entry_size_bytes,
260 chunk_size_bytes) / chunk_size_bytes - gs_chunks;
261 }
262
263 /* There should always be enough URB space to satisfy the minimum
264 * requirements of each stage.
265 */
266 unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
267 assert(total_needs <= urb_chunks);
268
269 /* Mete out remaining space (if any) in proportion to "wants". */
270 unsigned total_wants = vs_wants + gs_wants;
271 unsigned remaining_space = urb_chunks - total_needs;
272 if (remaining_space > total_wants)
273 remaining_space = total_wants;
274 if (remaining_space > 0) {
275 unsigned vs_additional = (unsigned)
276 round(vs_wants * (((double) remaining_space) / total_wants));
277 vs_chunks += vs_additional;
278 remaining_space -= vs_additional;
279 gs_chunks += remaining_space;
280 }
281
282 /* Sanity check that we haven't over-allocated. */
283 assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
284
285 /* Finally, compute the number of entries that can fit in the space
286 * allocated to each stage.
287 */
288 unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
289 unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
290
291 /* Since we rounded up when computing *_wants, this may be slightly more
292 * than the maximum allowed amount, so correct for that.
293 */
294 nr_vs_entries = MIN2(nr_vs_entries,
295 device->info.urb.max_entries[MESA_SHADER_VERTEX]);
296 nr_gs_entries = MIN2(nr_gs_entries,
297 device->info.urb.max_entries[MESA_SHADER_GEOMETRY]);
298
299 /* Ensure that we program a multiple of the granularity. */
300 nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
301 nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
302
303 /* Finally, sanity check to make sure we have at least the minimum number
304 * of entries needed for each stage.
305 */
306 assert(nr_vs_entries >= device->info.urb.min_vs_entries);
307 if (active_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
308 assert(nr_gs_entries >= 2);
309
310 #if GEN_GEN == 7 && !GEN_IS_HASWELL
311 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
312 *
313 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth stall
314 * needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
315 * 3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
316 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one PIPE_CONTROL
317 * needs to be sent before any combination of VS associated 3DSTATE."
318 */
319 anv_batch_emit(batch, GEN7_PIPE_CONTROL, pc) {
320 pc.DepthStallEnable = true;
321 pc.PostSyncOperation = WriteImmediateData;
322 pc.Address = (struct anv_address) { &device->workaround_bo, 0 };
323 }
324 #endif
325
326 /* Lay out the URB in the following order:
327 * - push constants
328 * - VS
329 * - GS
330 */
331 anv_batch_emit(batch, GENX(3DSTATE_URB_VS), urb) {
332 urb.VSURBStartingAddress = push_constant_chunks;
333 urb.VSURBEntryAllocationSize = vs_size - 1;
334 urb.VSNumberofURBEntries = nr_vs_entries;
335 }
336
337 anv_batch_emit(batch, GENX(3DSTATE_URB_HS), urb) {
338 urb.HSURBStartingAddress = push_constant_chunks;
339 }
340
341 anv_batch_emit(batch, GENX(3DSTATE_URB_DS), urb) {
342 urb.DSURBStartingAddress = push_constant_chunks;
343 }
344
345 anv_batch_emit(batch, GENX(3DSTATE_URB_GS), urb) {
346 urb.GSURBStartingAddress = push_constant_chunks + vs_chunks;
347 urb.GSURBEntryAllocationSize = gs_size - 1;
348 urb.GSNumberofURBEntries = nr_gs_entries;
349 }
350 }
351
352 static inline void
353 emit_urb_setup(struct anv_pipeline *pipeline)
354 {
355 unsigned vs_entry_size =
356 (pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT) ?
357 get_vs_prog_data(pipeline)->base.urb_entry_size : 0;
358 unsigned gs_entry_size =
359 (pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT) ?
360 get_gs_prog_data(pipeline)->base.urb_entry_size : 0;
361
362 genX(emit_urb_setup)(pipeline->device, &pipeline->batch,
363 pipeline->active_stages, vs_entry_size, gs_entry_size,
364 pipeline->urb.l3_config);
365 }
366
367 static void
368 emit_3dstate_sbe(struct anv_pipeline *pipeline)
369 {
370 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
371 const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
372 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
373 const struct brw_vue_map *fs_input_map;
374
375 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
376 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE), sbe);
377 #if GEN_GEN >= 8
378 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ), sbe);
379 #endif
380 return;
381 }
382
383 if (gs_prog_data)
384 fs_input_map = &gs_prog_data->base.vue_map;
385 else
386 fs_input_map = &vs_prog_data->base.vue_map;
387
388 struct GENX(3DSTATE_SBE) sbe = {
389 GENX(3DSTATE_SBE_header),
390 .AttributeSwizzleEnable = true,
391 .PointSpriteTextureCoordinateOrigin = UPPERLEFT,
392 .NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs,
393 .ConstantInterpolationEnable = wm_prog_data->flat_inputs,
394 };
395
396 #if GEN_GEN >= 9
397 for (unsigned i = 0; i < 32; i++)
398 sbe.AttributeActiveComponentFormat[i] = ACF_XYZW;
399 #endif
400
401 #if GEN_GEN >= 8
402 /* On Broadwell, they broke 3DSTATE_SBE into two packets */
403 struct GENX(3DSTATE_SBE_SWIZ) swiz = {
404 GENX(3DSTATE_SBE_SWIZ_header),
405 };
406 #else
407 # define swiz sbe
408 #endif
409
410 int max_source_attr = 0;
411 for (int attr = 0; attr < VARYING_SLOT_MAX; attr++) {
412 int input_index = wm_prog_data->urb_setup[attr];
413
414 if (input_index < 0)
415 continue;
416
417 if (attr == VARYING_SLOT_PNTC) {
418 sbe.PointSpriteTextureCoordinateEnable = 1 << input_index;
419 continue;
420 }
421
422 const int slot = fs_input_map->varying_to_slot[attr];
423
424 if (input_index >= 16)
425 continue;
426
427 if (slot == -1) {
428 /* This attribute does not exist in the VUE--that means that the
429 * vertex shader did not write to it. It could be that it's a
430 * regular varying read by the fragment shader but not written by
431 * the vertex shader or it's gl_PrimitiveID. In the first case the
432 * value is undefined, in the second it needs to be
433 * gl_PrimitiveID.
434 */
435 swiz.Attribute[input_index].ConstantSource = PRIM_ID;
436 swiz.Attribute[input_index].ComponentOverrideX = true;
437 swiz.Attribute[input_index].ComponentOverrideY = true;
438 swiz.Attribute[input_index].ComponentOverrideZ = true;
439 swiz.Attribute[input_index].ComponentOverrideW = true;
440 } else {
441 assert(slot >= 2);
442 const int source_attr = slot - 2;
443 max_source_attr = MAX2(max_source_attr, source_attr);
444 /* We have to subtract two slots to accout for the URB entry output
445 * read offset in the VS and GS stages.
446 */
447 swiz.Attribute[input_index].SourceAttribute = source_attr;
448 }
449 }
450
451 sbe.VertexURBEntryReadOffset = 1; /* Skip the VUE header and position slots */
452 sbe.VertexURBEntryReadLength = DIV_ROUND_UP(max_source_attr + 1, 2);
453
454 uint32_t *dw = anv_batch_emit_dwords(&pipeline->batch,
455 GENX(3DSTATE_SBE_length));
456 GENX(3DSTATE_SBE_pack)(&pipeline->batch, dw, &sbe);
457
458 #if GEN_GEN >= 8
459 dw = anv_batch_emit_dwords(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ_length));
460 GENX(3DSTATE_SBE_SWIZ_pack)(&pipeline->batch, dw, &swiz);
461 #endif
462 }
463
464 static const uint32_t vk_to_gen_cullmode[] = {
465 [VK_CULL_MODE_NONE] = CULLMODE_NONE,
466 [VK_CULL_MODE_FRONT_BIT] = CULLMODE_FRONT,
467 [VK_CULL_MODE_BACK_BIT] = CULLMODE_BACK,
468 [VK_CULL_MODE_FRONT_AND_BACK] = CULLMODE_BOTH
469 };
470
471 static const uint32_t vk_to_gen_fillmode[] = {
472 [VK_POLYGON_MODE_FILL] = FILL_MODE_SOLID,
473 [VK_POLYGON_MODE_LINE] = FILL_MODE_WIREFRAME,
474 [VK_POLYGON_MODE_POINT] = FILL_MODE_POINT,
475 };
476
477 static const uint32_t vk_to_gen_front_face[] = {
478 [VK_FRONT_FACE_COUNTER_CLOCKWISE] = 1,
479 [VK_FRONT_FACE_CLOCKWISE] = 0
480 };
481
482 static void
483 emit_rs_state(struct anv_pipeline *pipeline,
484 const VkPipelineRasterizationStateCreateInfo *rs_info,
485 const VkPipelineMultisampleStateCreateInfo *ms_info,
486 const struct anv_render_pass *pass,
487 const struct anv_subpass *subpass)
488 {
489 struct GENX(3DSTATE_SF) sf = {
490 GENX(3DSTATE_SF_header),
491 };
492
493 sf.ViewportTransformEnable = true;
494 sf.StatisticsEnable = true;
495 sf.TriangleStripListProvokingVertexSelect = 0;
496 sf.LineStripListProvokingVertexSelect = 0;
497 sf.TriangleFanProvokingVertexSelect = 1;
498 sf.PointWidthSource = Vertex;
499 sf.PointWidth = 1.0;
500
501 #if GEN_GEN >= 8
502 struct GENX(3DSTATE_RASTER) raster = {
503 GENX(3DSTATE_RASTER_header),
504 };
505 #else
506 # define raster sf
507 #endif
508
509 /* For details on 3DSTATE_RASTER multisample state, see the BSpec table
510 * "Multisample Modes State".
511 */
512 #if GEN_GEN >= 8
513 raster.DXMultisampleRasterizationEnable = true;
514 raster.ForcedSampleCount = FSC_NUMRASTSAMPLES_0;
515 raster.ForceMultisampling = false;
516 #else
517 raster.MultisampleRasterizationMode =
518 (ms_info && ms_info->rasterizationSamples > 1) ?
519 MSRASTMODE_ON_PATTERN : MSRASTMODE_OFF_PIXEL;
520 #endif
521
522 raster.FrontWinding = vk_to_gen_front_face[rs_info->frontFace];
523 raster.CullMode = vk_to_gen_cullmode[rs_info->cullMode];
524 raster.FrontFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode];
525 raster.BackFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode];
526 raster.ScissorRectangleEnable = true;
527
528 #if GEN_GEN >= 9
529 /* GEN9+ splits ViewportZClipTestEnable into near and far enable bits */
530 raster.ViewportZFarClipTestEnable = !pipeline->depth_clamp_enable;
531 raster.ViewportZNearClipTestEnable = !pipeline->depth_clamp_enable;
532 #elif GEN_GEN >= 8
533 raster.ViewportZClipTestEnable = !pipeline->depth_clamp_enable;
534 #endif
535
536 raster.GlobalDepthOffsetEnableSolid = rs_info->depthBiasEnable;
537 raster.GlobalDepthOffsetEnableWireframe = rs_info->depthBiasEnable;
538 raster.GlobalDepthOffsetEnablePoint = rs_info->depthBiasEnable;
539
540 #if GEN_GEN == 7
541 /* Gen7 requires that we provide the depth format in 3DSTATE_SF so that it
542 * can get the depth offsets correct.
543 */
544 if (subpass->depth_stencil_attachment < pass->attachment_count) {
545 VkFormat vk_format =
546 pass->attachments[subpass->depth_stencil_attachment].format;
547 assert(vk_format_is_depth_or_stencil(vk_format));
548 if (vk_format_aspects(vk_format) & VK_IMAGE_ASPECT_DEPTH_BIT) {
549 enum isl_format isl_format =
550 anv_get_isl_format(&pipeline->device->info, vk_format,
551 VK_IMAGE_ASPECT_DEPTH_BIT,
552 VK_IMAGE_TILING_OPTIMAL);
553 sf.DepthBufferSurfaceFormat =
554 isl_format_get_depth_format(isl_format, false);
555 }
556 }
557 #endif
558
559 #if GEN_GEN >= 8
560 GENX(3DSTATE_SF_pack)(NULL, pipeline->gen8.sf, &sf);
561 GENX(3DSTATE_RASTER_pack)(NULL, pipeline->gen8.raster, &raster);
562 #else
563 # undef raster
564 GENX(3DSTATE_SF_pack)(NULL, &pipeline->gen7.sf, &sf);
565 #endif
566 }
567
568 static void
569 emit_ms_state(struct anv_pipeline *pipeline,
570 const VkPipelineMultisampleStateCreateInfo *info)
571 {
572 uint32_t samples = 1;
573 uint32_t log2_samples = 0;
574
575 /* From the Vulkan 1.0 spec:
576 * If pSampleMask is NULL, it is treated as if the mask has all bits
577 * enabled, i.e. no coverage is removed from fragments.
578 *
579 * 3DSTATE_SAMPLE_MASK.SampleMask is 16 bits.
580 */
581 #if GEN_GEN >= 8
582 uint32_t sample_mask = 0xffff;
583 #else
584 uint32_t sample_mask = 0xff;
585 #endif
586
587 if (info) {
588 samples = info->rasterizationSamples;
589 log2_samples = __builtin_ffs(samples) - 1;
590 }
591
592 if (info && info->pSampleMask)
593 sample_mask &= info->pSampleMask[0];
594
595 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE), ms) {
596 ms.NumberofMultisamples = log2_samples;
597
598 #if GEN_GEN >= 8
599 /* The PRM says that this bit is valid only for DX9:
600 *
601 * SW can choose to set this bit only for DX9 API. DX10/OGL API's
602 * should not have any effect by setting or not setting this bit.
603 */
604 ms.PixelPositionOffsetEnable = false;
605 ms.PixelLocation = CENTER;
606 #else
607 ms.PixelLocation = PIXLOC_CENTER;
608
609 switch (samples) {
610 case 1:
611 GEN_SAMPLE_POS_1X(ms.Sample);
612 break;
613 case 2:
614 GEN_SAMPLE_POS_2X(ms.Sample);
615 break;
616 case 4:
617 GEN_SAMPLE_POS_4X(ms.Sample);
618 break;
619 case 8:
620 GEN_SAMPLE_POS_8X(ms.Sample);
621 break;
622 default:
623 break;
624 }
625 #endif
626 }
627
628 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK), sm) {
629 sm.SampleMask = sample_mask;
630 }
631 }
632
633 static const uint32_t vk_to_gen_logic_op[] = {
634 [VK_LOGIC_OP_COPY] = LOGICOP_COPY,
635 [VK_LOGIC_OP_CLEAR] = LOGICOP_CLEAR,
636 [VK_LOGIC_OP_AND] = LOGICOP_AND,
637 [VK_LOGIC_OP_AND_REVERSE] = LOGICOP_AND_REVERSE,
638 [VK_LOGIC_OP_AND_INVERTED] = LOGICOP_AND_INVERTED,
639 [VK_LOGIC_OP_NO_OP] = LOGICOP_NOOP,
640 [VK_LOGIC_OP_XOR] = LOGICOP_XOR,
641 [VK_LOGIC_OP_OR] = LOGICOP_OR,
642 [VK_LOGIC_OP_NOR] = LOGICOP_NOR,
643 [VK_LOGIC_OP_EQUIVALENT] = LOGICOP_EQUIV,
644 [VK_LOGIC_OP_INVERT] = LOGICOP_INVERT,
645 [VK_LOGIC_OP_OR_REVERSE] = LOGICOP_OR_REVERSE,
646 [VK_LOGIC_OP_COPY_INVERTED] = LOGICOP_COPY_INVERTED,
647 [VK_LOGIC_OP_OR_INVERTED] = LOGICOP_OR_INVERTED,
648 [VK_LOGIC_OP_NAND] = LOGICOP_NAND,
649 [VK_LOGIC_OP_SET] = LOGICOP_SET,
650 };
651
652 static const uint32_t vk_to_gen_blend[] = {
653 [VK_BLEND_FACTOR_ZERO] = BLENDFACTOR_ZERO,
654 [VK_BLEND_FACTOR_ONE] = BLENDFACTOR_ONE,
655 [VK_BLEND_FACTOR_SRC_COLOR] = BLENDFACTOR_SRC_COLOR,
656 [VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR] = BLENDFACTOR_INV_SRC_COLOR,
657 [VK_BLEND_FACTOR_DST_COLOR] = BLENDFACTOR_DST_COLOR,
658 [VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR] = BLENDFACTOR_INV_DST_COLOR,
659 [VK_BLEND_FACTOR_SRC_ALPHA] = BLENDFACTOR_SRC_ALPHA,
660 [VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA] = BLENDFACTOR_INV_SRC_ALPHA,
661 [VK_BLEND_FACTOR_DST_ALPHA] = BLENDFACTOR_DST_ALPHA,
662 [VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA] = BLENDFACTOR_INV_DST_ALPHA,
663 [VK_BLEND_FACTOR_CONSTANT_COLOR] = BLENDFACTOR_CONST_COLOR,
664 [VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR]= BLENDFACTOR_INV_CONST_COLOR,
665 [VK_BLEND_FACTOR_CONSTANT_ALPHA] = BLENDFACTOR_CONST_ALPHA,
666 [VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA]= BLENDFACTOR_INV_CONST_ALPHA,
667 [VK_BLEND_FACTOR_SRC_ALPHA_SATURATE] = BLENDFACTOR_SRC_ALPHA_SATURATE,
668 [VK_BLEND_FACTOR_SRC1_COLOR] = BLENDFACTOR_SRC1_COLOR,
669 [VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR] = BLENDFACTOR_INV_SRC1_COLOR,
670 [VK_BLEND_FACTOR_SRC1_ALPHA] = BLENDFACTOR_SRC1_ALPHA,
671 [VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA] = BLENDFACTOR_INV_SRC1_ALPHA,
672 };
673
674 static const uint32_t vk_to_gen_blend_op[] = {
675 [VK_BLEND_OP_ADD] = BLENDFUNCTION_ADD,
676 [VK_BLEND_OP_SUBTRACT] = BLENDFUNCTION_SUBTRACT,
677 [VK_BLEND_OP_REVERSE_SUBTRACT] = BLENDFUNCTION_REVERSE_SUBTRACT,
678 [VK_BLEND_OP_MIN] = BLENDFUNCTION_MIN,
679 [VK_BLEND_OP_MAX] = BLENDFUNCTION_MAX,
680 };
681
682 static const uint32_t vk_to_gen_compare_op[] = {
683 [VK_COMPARE_OP_NEVER] = PREFILTEROPNEVER,
684 [VK_COMPARE_OP_LESS] = PREFILTEROPLESS,
685 [VK_COMPARE_OP_EQUAL] = PREFILTEROPEQUAL,
686 [VK_COMPARE_OP_LESS_OR_EQUAL] = PREFILTEROPLEQUAL,
687 [VK_COMPARE_OP_GREATER] = PREFILTEROPGREATER,
688 [VK_COMPARE_OP_NOT_EQUAL] = PREFILTEROPNOTEQUAL,
689 [VK_COMPARE_OP_GREATER_OR_EQUAL] = PREFILTEROPGEQUAL,
690 [VK_COMPARE_OP_ALWAYS] = PREFILTEROPALWAYS,
691 };
692
693 static const uint32_t vk_to_gen_stencil_op[] = {
694 [VK_STENCIL_OP_KEEP] = STENCILOP_KEEP,
695 [VK_STENCIL_OP_ZERO] = STENCILOP_ZERO,
696 [VK_STENCIL_OP_REPLACE] = STENCILOP_REPLACE,
697 [VK_STENCIL_OP_INCREMENT_AND_CLAMP] = STENCILOP_INCRSAT,
698 [VK_STENCIL_OP_DECREMENT_AND_CLAMP] = STENCILOP_DECRSAT,
699 [VK_STENCIL_OP_INVERT] = STENCILOP_INVERT,
700 [VK_STENCIL_OP_INCREMENT_AND_WRAP] = STENCILOP_INCR,
701 [VK_STENCIL_OP_DECREMENT_AND_WRAP] = STENCILOP_DECR,
702 };
703
704 static void
705 emit_ds_state(struct anv_pipeline *pipeline,
706 const VkPipelineDepthStencilStateCreateInfo *info,
707 const struct anv_render_pass *pass,
708 const struct anv_subpass *subpass)
709 {
710 #if GEN_GEN == 7
711 # define depth_stencil_dw pipeline->gen7.depth_stencil_state
712 #elif GEN_GEN == 8
713 # define depth_stencil_dw pipeline->gen8.wm_depth_stencil
714 #else
715 # define depth_stencil_dw pipeline->gen9.wm_depth_stencil
716 #endif
717
718 if (info == NULL) {
719 /* We're going to OR this together with the dynamic state. We need
720 * to make sure it's initialized to something useful.
721 */
722 memset(depth_stencil_dw, 0, sizeof(depth_stencil_dw));
723 return;
724 }
725
726 /* VkBool32 depthBoundsTestEnable; // optional (depth_bounds_test) */
727
728 #if GEN_GEN <= 7
729 struct GENX(DEPTH_STENCIL_STATE) depth_stencil = {
730 #else
731 struct GENX(3DSTATE_WM_DEPTH_STENCIL) depth_stencil = {
732 #endif
733 .DepthTestEnable = info->depthTestEnable,
734 .DepthBufferWriteEnable = info->depthWriteEnable,
735 .DepthTestFunction = vk_to_gen_compare_op[info->depthCompareOp],
736 .DoubleSidedStencilEnable = true,
737
738 .StencilTestEnable = info->stencilTestEnable,
739 .StencilBufferWriteEnable = info->stencilTestEnable,
740 .StencilFailOp = vk_to_gen_stencil_op[info->front.failOp],
741 .StencilPassDepthPassOp = vk_to_gen_stencil_op[info->front.passOp],
742 .StencilPassDepthFailOp = vk_to_gen_stencil_op[info->front.depthFailOp],
743 .StencilTestFunction = vk_to_gen_compare_op[info->front.compareOp],
744 .BackfaceStencilFailOp = vk_to_gen_stencil_op[info->back.failOp],
745 .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info->back.passOp],
746 .BackfaceStencilPassDepthFailOp =vk_to_gen_stencil_op[info->back.depthFailOp],
747 .BackfaceStencilTestFunction = vk_to_gen_compare_op[info->back.compareOp],
748 };
749
750 VkImageAspectFlags aspects = 0;
751 if (subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
752 VkFormat depth_stencil_format =
753 pass->attachments[subpass->depth_stencil_attachment].format;
754 aspects = vk_format_aspects(depth_stencil_format);
755 }
756
757 /* The Vulkan spec requires that if either depth or stencil is not present,
758 * the pipeline is to act as if the test silently passes.
759 */
760 if (!(aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
761 depth_stencil.DepthBufferWriteEnable = false;
762 depth_stencil.DepthTestFunction = PREFILTEROPALWAYS;
763 }
764
765 if (!(aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) {
766 depth_stencil.StencilBufferWriteEnable = false;
767 depth_stencil.StencilTestFunction = PREFILTEROPALWAYS;
768 depth_stencil.BackfaceStencilTestFunction = PREFILTEROPALWAYS;
769 }
770
771 /* From the Broadwell PRM:
772 *
773 * "If Depth_Test_Enable = 1 AND Depth_Test_func = EQUAL, the
774 * Depth_Write_Enable must be set to 0."
775 */
776 if (info->depthTestEnable && info->depthCompareOp == VK_COMPARE_OP_EQUAL)
777 depth_stencil.DepthBufferWriteEnable = false;
778
779 #if GEN_GEN <= 7
780 GENX(DEPTH_STENCIL_STATE_pack)(NULL, depth_stencil_dw, &depth_stencil);
781 #else
782 GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, depth_stencil_dw, &depth_stencil);
783 #endif
784 }
785
786 static void
787 emit_cb_state(struct anv_pipeline *pipeline,
788 const VkPipelineColorBlendStateCreateInfo *info,
789 const VkPipelineMultisampleStateCreateInfo *ms_info)
790 {
791 struct anv_device *device = pipeline->device;
792
793 const uint32_t num_dwords = GENX(BLEND_STATE_length);
794 pipeline->blend_state =
795 anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64);
796
797 struct GENX(BLEND_STATE) blend_state = {
798 #if GEN_GEN >= 8
799 .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable,
800 .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable,
801 #else
802 /* Make sure it gets zeroed */
803 .Entry = { { 0, }, },
804 #endif
805 };
806
807 /* Default everything to disabled */
808 for (uint32_t i = 0; i < 8; i++) {
809 blend_state.Entry[i].WriteDisableAlpha = true;
810 blend_state.Entry[i].WriteDisableRed = true;
811 blend_state.Entry[i].WriteDisableGreen = true;
812 blend_state.Entry[i].WriteDisableBlue = true;
813 }
814
815 uint32_t surface_count = 0;
816 struct anv_pipeline_bind_map *map;
817 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
818 map = &pipeline->shaders[MESA_SHADER_FRAGMENT]->bind_map;
819 surface_count = map->surface_count;
820 }
821
822 bool has_writeable_rt = false;
823 for (unsigned i = 0; i < surface_count; i++) {
824 struct anv_pipeline_binding *binding = &map->surface_to_descriptor[i];
825
826 /* All color attachments are at the beginning of the binding table */
827 if (binding->set != ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS)
828 break;
829
830 /* We can have at most 8 attachments */
831 assert(i < 8);
832
833 if (binding->index >= info->attachmentCount)
834 continue;
835
836 assert(binding->binding == 0);
837 const VkPipelineColorBlendAttachmentState *a =
838 &info->pAttachments[binding->index];
839
840 blend_state.Entry[i] = (struct GENX(BLEND_STATE_ENTRY)) {
841 #if GEN_GEN < 8
842 .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable,
843 .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable,
844 #endif
845 .LogicOpEnable = info->logicOpEnable,
846 .LogicOpFunction = vk_to_gen_logic_op[info->logicOp],
847 .ColorBufferBlendEnable = a->blendEnable,
848 .ColorClampRange = COLORCLAMP_RTFORMAT,
849 .PreBlendColorClampEnable = true,
850 .PostBlendColorClampEnable = true,
851 .SourceBlendFactor = vk_to_gen_blend[a->srcColorBlendFactor],
852 .DestinationBlendFactor = vk_to_gen_blend[a->dstColorBlendFactor],
853 .ColorBlendFunction = vk_to_gen_blend_op[a->colorBlendOp],
854 .SourceAlphaBlendFactor = vk_to_gen_blend[a->srcAlphaBlendFactor],
855 .DestinationAlphaBlendFactor = vk_to_gen_blend[a->dstAlphaBlendFactor],
856 .AlphaBlendFunction = vk_to_gen_blend_op[a->alphaBlendOp],
857 .WriteDisableAlpha = !(a->colorWriteMask & VK_COLOR_COMPONENT_A_BIT),
858 .WriteDisableRed = !(a->colorWriteMask & VK_COLOR_COMPONENT_R_BIT),
859 .WriteDisableGreen = !(a->colorWriteMask & VK_COLOR_COMPONENT_G_BIT),
860 .WriteDisableBlue = !(a->colorWriteMask & VK_COLOR_COMPONENT_B_BIT),
861 };
862
863 if (a->srcColorBlendFactor != a->srcAlphaBlendFactor ||
864 a->dstColorBlendFactor != a->dstAlphaBlendFactor ||
865 a->colorBlendOp != a->alphaBlendOp) {
866 #if GEN_GEN >= 8
867 blend_state.IndependentAlphaBlendEnable = true;
868 #else
869 blend_state.Entry[i].IndependentAlphaBlendEnable = true;
870 #endif
871 }
872
873 if (a->colorWriteMask != 0)
874 has_writeable_rt = true;
875
876 /* Our hardware applies the blend factor prior to the blend function
877 * regardless of what function is used. Technically, this means the
878 * hardware can do MORE than GL or Vulkan specify. However, it also
879 * means that, for MIN and MAX, we have to stomp the blend factor to
880 * ONE to make it a no-op.
881 */
882 if (a->colorBlendOp == VK_BLEND_OP_MIN ||
883 a->colorBlendOp == VK_BLEND_OP_MAX) {
884 blend_state.Entry[i].SourceBlendFactor = BLENDFACTOR_ONE;
885 blend_state.Entry[i].DestinationBlendFactor = BLENDFACTOR_ONE;
886 }
887 if (a->alphaBlendOp == VK_BLEND_OP_MIN ||
888 a->alphaBlendOp == VK_BLEND_OP_MAX) {
889 blend_state.Entry[i].SourceAlphaBlendFactor = BLENDFACTOR_ONE;
890 blend_state.Entry[i].DestinationAlphaBlendFactor = BLENDFACTOR_ONE;
891 }
892 }
893
894 #if GEN_GEN >= 8
895 struct GENX(BLEND_STATE_ENTRY) *bs0 = &blend_state.Entry[0];
896 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_BLEND), blend) {
897 blend.AlphaToCoverageEnable = blend_state.AlphaToCoverageEnable;
898 blend.HasWriteableRT = has_writeable_rt;
899 blend.ColorBufferBlendEnable = bs0->ColorBufferBlendEnable;
900 blend.SourceAlphaBlendFactor = bs0->SourceAlphaBlendFactor;
901 blend.DestinationAlphaBlendFactor = bs0->DestinationAlphaBlendFactor;
902 blend.SourceBlendFactor = bs0->SourceBlendFactor;
903 blend.DestinationBlendFactor = bs0->DestinationBlendFactor;
904 blend.AlphaTestEnable = false;
905 blend.IndependentAlphaBlendEnable =
906 blend_state.IndependentAlphaBlendEnable;
907 }
908 #else
909 (void)has_writeable_rt;
910 #endif
911
912 GENX(BLEND_STATE_pack)(NULL, pipeline->blend_state.map, &blend_state);
913 if (!device->info.has_llc)
914 anv_state_clflush(pipeline->blend_state);
915
916 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) {
917 bsp.BlendStatePointer = pipeline->blend_state.offset;
918 #if GEN_GEN >= 8
919 bsp.BlendStatePointerValid = true;
920 #endif
921 }
922 }
923
924 static void
925 emit_3dstate_clip(struct anv_pipeline *pipeline,
926 const VkPipelineViewportStateCreateInfo *vp_info,
927 const VkPipelineRasterizationStateCreateInfo *rs_info)
928 {
929 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
930 (void) wm_prog_data;
931 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP), clip) {
932 clip.ClipEnable = true;
933 clip.EarlyCullEnable = true;
934 clip.APIMode = APIMODE_D3D,
935 clip.ViewportXYClipTestEnable = true;
936
937 clip.ClipMode = CLIPMODE_NORMAL;
938
939 clip.TriangleStripListProvokingVertexSelect = 0;
940 clip.LineStripListProvokingVertexSelect = 0;
941 clip.TriangleFanProvokingVertexSelect = 1;
942
943 clip.MinimumPointWidth = 0.125;
944 clip.MaximumPointWidth = 255.875;
945 clip.MaximumVPIndex = (vp_info ? vp_info->viewportCount : 1) - 1;
946
947 #if GEN_GEN == 7
948 clip.FrontWinding = vk_to_gen_front_face[rs_info->frontFace];
949 clip.CullMode = vk_to_gen_cullmode[rs_info->cullMode];
950 clip.ViewportZClipTestEnable = !pipeline->depth_clamp_enable;
951 #else
952 clip.NonPerspectiveBarycentricEnable = wm_prog_data ?
953 (wm_prog_data->barycentric_interp_modes & 0x38) != 0 : 0;
954 #endif
955 }
956 }
957
958 static void
959 emit_3dstate_streamout(struct anv_pipeline *pipeline,
960 const VkPipelineRasterizationStateCreateInfo *rs_info)
961 {
962 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_STREAMOUT), so) {
963 so.RenderingDisable = rs_info->rasterizerDiscardEnable;
964 }
965 }
966
967 static inline uint32_t
968 get_sampler_count(const struct anv_shader_bin *bin)
969 {
970 return DIV_ROUND_UP(bin->bind_map.sampler_count, 4);
971 }
972
973 static inline uint32_t
974 get_binding_table_entry_count(const struct anv_shader_bin *bin)
975 {
976 return DIV_ROUND_UP(bin->bind_map.surface_count, 32);
977 }
978
979 static inline struct anv_address
980 get_scratch_address(struct anv_pipeline *pipeline,
981 gl_shader_stage stage,
982 const struct anv_shader_bin *bin)
983 {
984 return (struct anv_address) {
985 .bo = anv_scratch_pool_alloc(pipeline->device,
986 &pipeline->device->scratch_pool,
987 stage, bin->prog_data->total_scratch),
988 .offset = 0,
989 };
990 }
991
992 static inline uint32_t
993 get_scratch_space(const struct anv_shader_bin *bin)
994 {
995 return ffs(bin->prog_data->total_scratch / 2048);
996 }
997
998 static inline uint32_t
999 get_urb_output_offset()
1000 {
1001 /* Skip the VUE header and position slots */
1002 return 1;
1003 }
1004
1005 static inline uint32_t
1006 get_urb_output_length(const struct anv_shader_bin *bin)
1007 {
1008 const struct brw_vue_prog_data *prog_data =
1009 (const struct brw_vue_prog_data *)bin->prog_data;
1010
1011 return (prog_data->vue_map.num_slots + 1) / 2 - get_urb_output_offset();
1012 }
1013
1014 static void
1015 emit_3dstate_vs(struct anv_pipeline *pipeline)
1016 {
1017 const struct gen_device_info *devinfo = &pipeline->device->info;
1018 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
1019 const struct anv_shader_bin *vs_bin =
1020 pipeline->shaders[MESA_SHADER_VERTEX];
1021
1022 assert(anv_pipeline_has_stage(pipeline, MESA_SHADER_VERTEX));
1023
1024 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) {
1025 vs.FunctionEnable = true;
1026 vs.StatisticsEnable = true;
1027 vs.KernelStartPointer = vs_bin->kernel.offset;
1028 #if GEN_GEN >= 8
1029 vs.SIMD8DispatchEnable =
1030 vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8;
1031 #endif
1032
1033 assert(!vs_prog_data->base.base.use_alt_mode);
1034 vs.SingleVertexDispatch = false;
1035 vs.VectorMaskEnable = false;
1036 vs.SamplerCount = get_sampler_count(vs_bin);
1037 vs.BindingTableEntryCount = get_binding_table_entry_count(vs_bin);
1038 vs.FloatingPointMode = IEEE754;
1039 vs.IllegalOpcodeExceptionEnable = false;
1040 vs.SoftwareExceptionEnable = false;
1041 vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
1042 vs.VertexCacheDisable = false;
1043
1044 vs.VertexURBEntryReadLength = vs_prog_data->base.urb_read_length;
1045 vs.VertexURBEntryReadOffset = 0;
1046 vs.DispatchGRFStartRegisterForURBData =
1047 vs_prog_data->base.base.dispatch_grf_start_reg;
1048
1049 #if GEN_GEN >= 8
1050 vs.VertexURBEntryOutputReadOffset = get_urb_output_offset();
1051 vs.VertexURBEntryOutputLength = get_urb_output_length(vs_bin);
1052
1053 /* TODO */
1054 vs.UserClipDistanceClipTestEnableBitmask = 0;
1055 vs.UserClipDistanceCullTestEnableBitmask = 0;
1056 #endif
1057
1058 vs.PerThreadScratchSpace = get_scratch_space(vs_bin);
1059 vs.ScratchSpaceBasePointer =
1060 get_scratch_address(pipeline, MESA_SHADER_VERTEX, vs_bin);
1061 }
1062 }
1063
1064 static void
1065 emit_3dstate_gs(struct anv_pipeline *pipeline)
1066 {
1067 const struct gen_device_info *devinfo = &pipeline->device->info;
1068 const struct anv_shader_bin *gs_bin =
1069 pipeline->shaders[MESA_SHADER_GEOMETRY];
1070
1071 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY)) {
1072 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs);
1073 return;
1074 }
1075
1076 const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
1077
1078 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs) {
1079 gs.FunctionEnable = true;
1080 gs.StatisticsEnable = true;
1081 gs.KernelStartPointer = gs_bin->kernel.offset;
1082 gs.DispatchMode = gs_prog_data->base.dispatch_mode;
1083
1084 gs.SingleProgramFlow = false;
1085 gs.VectorMaskEnable = false;
1086 gs.SamplerCount = get_sampler_count(gs_bin);
1087 gs.BindingTableEntryCount = get_binding_table_entry_count(gs_bin);
1088 gs.IncludeVertexHandles = gs_prog_data->base.include_vue_handles;
1089 gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
1090
1091 if (GEN_GEN == 8) {
1092 /* Broadwell is weird. It needs us to divide by 2. */
1093 gs.MaximumNumberofThreads = devinfo->max_gs_threads / 2 - 1;
1094 } else {
1095 gs.MaximumNumberofThreads = devinfo->max_gs_threads - 1;
1096 }
1097
1098 gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
1099 gs.OutputTopology = gs_prog_data->output_topology;
1100 gs.VertexURBEntryReadLength = gs_prog_data->base.urb_read_length;
1101 gs.ControlDataFormat = gs_prog_data->control_data_format;
1102 gs.ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords;
1103 gs.InstanceControl = MAX2(gs_prog_data->invocations, 1) - 1;
1104 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1105 gs.ReorderMode = TRAILING;
1106 #else
1107 gs.ReorderEnable = true;
1108 #endif
1109
1110 #if GEN_GEN >= 8
1111 gs.ExpectedVertexCount = gs_prog_data->vertices_in;
1112 gs.StaticOutput = gs_prog_data->static_vertex_count >= 0;
1113 gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count >= 0 ?
1114 gs_prog_data->static_vertex_count : 0;
1115 #endif
1116
1117 gs.VertexURBEntryReadOffset = 0;
1118 gs.VertexURBEntryReadLength = gs_prog_data->base.urb_read_length;
1119 gs.DispatchGRFStartRegisterForURBData =
1120 gs_prog_data->base.base.dispatch_grf_start_reg;
1121
1122 #if GEN_GEN >= 8
1123 gs.VertexURBEntryOutputReadOffset = get_urb_output_offset();
1124 gs.VertexURBEntryOutputLength = get_urb_output_length(gs_bin);
1125
1126 /* TODO */
1127 gs.UserClipDistanceClipTestEnableBitmask = 0;
1128 gs.UserClipDistanceCullTestEnableBitmask = 0;
1129 #endif
1130
1131 gs.PerThreadScratchSpace = get_scratch_space(gs_bin);
1132 gs.ScratchSpaceBasePointer =
1133 get_scratch_address(pipeline, MESA_SHADER_GEOMETRY, gs_bin);
1134 }
1135 }
1136
1137 static void
1138 emit_3dstate_wm(struct anv_pipeline *pipeline,
1139 const VkPipelineMultisampleStateCreateInfo *multisample)
1140 {
1141 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1142
1143 MAYBE_UNUSED uint32_t samples =
1144 multisample ? multisample->rasterizationSamples : 1;
1145
1146 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM), wm) {
1147 wm.StatisticsEnable = true;
1148 wm.LineEndCapAntialiasingRegionWidth = _05pixels;
1149 wm.LineAntialiasingRegionWidth = _10pixels;
1150 wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
1151
1152 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1153 if (wm_prog_data->early_fragment_tests) {
1154 wm.EarlyDepthStencilControl = EDSC_PREPS;
1155 } else if (wm_prog_data->has_side_effects) {
1156 wm.EarlyDepthStencilControl = EDSC_PSEXEC;
1157 } else {
1158 wm.EarlyDepthStencilControl = EDSC_NORMAL;
1159 }
1160
1161 wm.BarycentricInterpolationMode =
1162 wm_prog_data->barycentric_interp_modes;
1163
1164 #if GEN_GEN < 8
1165 /* FIXME: This needs a lot more work, cf gen7 upload_wm_state(). */
1166 wm.ThreadDispatchEnable = true;
1167
1168 wm.PixelShaderKillsPixel = wm_prog_data->uses_kill;
1169 wm.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
1170 wm.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
1171 wm.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
1172 wm.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask;
1173
1174 if (samples > 1) {
1175 wm.MultisampleRasterizationMode = MSRASTMODE_ON_PATTERN;
1176 if (wm_prog_data->persample_dispatch) {
1177 wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE;
1178 } else {
1179 wm.MultisampleDispatchMode = MSDISPMODE_PERPIXEL;
1180 }
1181 } else {
1182 wm.MultisampleRasterizationMode = MSRASTMODE_OFF_PIXEL;
1183 wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE;
1184 }
1185 #endif
1186 }
1187 }
1188 }
1189
1190 static void
1191 emit_3dstate_ps(struct anv_pipeline *pipeline)
1192 {
1193 MAYBE_UNUSED const struct gen_device_info *devinfo = &pipeline->device->info;
1194 const struct anv_shader_bin *fs_bin =
1195 pipeline->shaders[MESA_SHADER_FRAGMENT];
1196
1197 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1198 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) {
1199 #if GEN_GEN == 7
1200 /* Even if no fragments are ever dispatched, gen7 hardware hangs if
1201 * we don't at least set the maximum number of threads.
1202 */
1203 ps.MaximumNumberofThreads = devinfo->max_wm_threads - 1;
1204 #endif
1205 }
1206 return;
1207 }
1208
1209 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1210
1211 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) {
1212 ps.KernelStartPointer0 = fs_bin->kernel.offset;
1213 ps.KernelStartPointer1 = 0;
1214 ps.KernelStartPointer2 = fs_bin->kernel.offset +
1215 wm_prog_data->prog_offset_2;
1216 ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
1217 ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
1218 ps._32PixelDispatchEnable = false;
1219
1220 ps.SingleProgramFlow = false;
1221 ps.VectorMaskEnable = true;
1222 ps.SamplerCount = get_sampler_count(fs_bin);
1223 ps.BindingTableEntryCount = get_binding_table_entry_count(fs_bin);
1224 ps.PushConstantEnable = wm_prog_data->base.nr_params > 0;
1225 ps.PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ?
1226 POSOFFSET_SAMPLE: POSOFFSET_NONE;
1227 #if GEN_GEN < 8
1228 ps.AttributeEnable = wm_prog_data->num_varying_inputs > 0;
1229 ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
1230 ps.DualSourceBlendEnable = wm_prog_data->dual_src_blend;
1231 #endif
1232
1233 #if GEN_IS_HASWELL
1234 /* Haswell requires the sample mask to be set in this packet as well
1235 * as in 3DSTATE_SAMPLE_MASK; the values should match.
1236 */
1237 ps.SampleMask = 0xff;
1238 #endif
1239
1240 #if GEN_GEN >= 9
1241 ps.MaximumNumberofThreadsPerPSD = 64 - 1;
1242 #elif GEN_GEN >= 8
1243 ps.MaximumNumberofThreadsPerPSD = 64 - 2;
1244 #else
1245 ps.MaximumNumberofThreads = devinfo->max_wm_threads - 1;
1246 #endif
1247
1248 ps.DispatchGRFStartRegisterForConstantSetupData0 =
1249 wm_prog_data->base.dispatch_grf_start_reg;
1250 ps.DispatchGRFStartRegisterForConstantSetupData1 = 0;
1251 ps.DispatchGRFStartRegisterForConstantSetupData2 =
1252 wm_prog_data->dispatch_grf_start_reg_2;
1253
1254 ps.PerThreadScratchSpace = get_scratch_space(fs_bin);
1255 ps.ScratchSpaceBasePointer =
1256 get_scratch_address(pipeline, MESA_SHADER_FRAGMENT, fs_bin);
1257 }
1258 }
1259
1260 #if GEN_GEN >= 8
1261 static void
1262 emit_3dstate_ps_extra(struct anv_pipeline *pipeline)
1263 {
1264 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1265
1266 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1267 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps);
1268 return;
1269 }
1270
1271 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps) {
1272 ps.PixelShaderValid = true;
1273 ps.AttributeEnable = wm_prog_data->num_varying_inputs > 0;
1274 ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
1275 ps.PixelShaderIsPerSample = wm_prog_data->persample_dispatch;
1276 ps.PixelShaderKillsPixel = wm_prog_data->uses_kill;
1277 ps.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
1278 ps.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
1279 ps.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
1280
1281 #if GEN_GEN >= 9
1282 ps.PixelShaderPullsBary = wm_prog_data->pulls_bary;
1283 ps.InputCoverageMaskState = wm_prog_data->uses_sample_mask ?
1284 ICMS_INNER_CONSERVATIVE : ICMS_NONE;
1285 #else
1286 ps.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask;
1287 #endif
1288 }
1289 }
1290
1291 static void
1292 emit_3dstate_vf_topology(struct anv_pipeline *pipeline)
1293 {
1294 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY), vft) {
1295 vft.PrimitiveTopologyType = pipeline->topology;
1296 }
1297 }
1298 #endif
1299
1300 static VkResult
1301 genX(graphics_pipeline_create)(
1302 VkDevice _device,
1303 struct anv_pipeline_cache * cache,
1304 const VkGraphicsPipelineCreateInfo* pCreateInfo,
1305 const VkAllocationCallbacks* pAllocator,
1306 VkPipeline* pPipeline)
1307 {
1308 ANV_FROM_HANDLE(anv_device, device, _device);
1309 ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
1310 struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
1311 struct anv_pipeline *pipeline;
1312 VkResult result;
1313
1314 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1315
1316 pipeline = vk_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
1317 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1318 if (pipeline == NULL)
1319 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1320
1321 result = anv_pipeline_init(pipeline, device, cache,
1322 pCreateInfo, pAllocator);
1323 if (result != VK_SUCCESS) {
1324 vk_free2(&device->alloc, pAllocator, pipeline);
1325 return result;
1326 }
1327
1328 assert(pCreateInfo->pVertexInputState);
1329 emit_vertex_input(pipeline, pCreateInfo->pVertexInputState);
1330 assert(pCreateInfo->pRasterizationState);
1331 emit_rs_state(pipeline, pCreateInfo->pRasterizationState,
1332 pCreateInfo->pMultisampleState, pass, subpass);
1333 emit_ms_state(pipeline, pCreateInfo->pMultisampleState);
1334 emit_ds_state(pipeline, pCreateInfo->pDepthStencilState, pass, subpass);
1335 emit_cb_state(pipeline, pCreateInfo->pColorBlendState,
1336 pCreateInfo->pMultisampleState);
1337
1338 emit_urb_setup(pipeline);
1339
1340 emit_3dstate_clip(pipeline, pCreateInfo->pViewportState,
1341 pCreateInfo->pRasterizationState);
1342 emit_3dstate_streamout(pipeline, pCreateInfo->pRasterizationState);
1343
1344 #if 0
1345 /* From gen7_vs_state.c */
1346
1347 /**
1348 * From Graphics BSpec: 3D-Media-GPGPU Engine > 3D Pipeline Stages >
1349 * Geometry > Geometry Shader > State:
1350 *
1351 * "Note: Because of corruption in IVB:GT2, software needs to flush the
1352 * whole fixed function pipeline when the GS enable changes value in
1353 * the 3DSTATE_GS."
1354 *
1355 * The hardware architects have clarified that in this context "flush the
1356 * whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS
1357 * Stall" bit set.
1358 */
1359 if (!brw->is_haswell && !brw->is_baytrail)
1360 gen7_emit_vs_workaround_flush(brw);
1361 #endif
1362
1363 emit_3dstate_vs(pipeline);
1364 emit_3dstate_gs(pipeline);
1365 emit_3dstate_sbe(pipeline);
1366 emit_3dstate_wm(pipeline, pCreateInfo->pMultisampleState);
1367 emit_3dstate_ps(pipeline);
1368 #if GEN_GEN >= 8
1369 emit_3dstate_ps_extra(pipeline);
1370 emit_3dstate_vf_topology(pipeline);
1371 #endif
1372
1373 *pPipeline = anv_pipeline_to_handle(pipeline);
1374
1375 return VK_SUCCESS;
1376 }
1377
1378 static VkResult
1379 compute_pipeline_create(
1380 VkDevice _device,
1381 struct anv_pipeline_cache * cache,
1382 const VkComputePipelineCreateInfo* pCreateInfo,
1383 const VkAllocationCallbacks* pAllocator,
1384 VkPipeline* pPipeline)
1385 {
1386 ANV_FROM_HANDLE(anv_device, device, _device);
1387 const struct anv_physical_device *physical_device =
1388 &device->instance->physicalDevice;
1389 const struct gen_device_info *devinfo = &physical_device->info;
1390 struct anv_pipeline *pipeline;
1391 VkResult result;
1392
1393 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
1394
1395 pipeline = vk_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
1396 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1397 if (pipeline == NULL)
1398 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1399
1400 pipeline->device = device;
1401 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
1402
1403 pipeline->blend_state.map = NULL;
1404
1405 result = anv_reloc_list_init(&pipeline->batch_relocs,
1406 pAllocator ? pAllocator : &device->alloc);
1407 if (result != VK_SUCCESS) {
1408 vk_free2(&device->alloc, pAllocator, pipeline);
1409 return result;
1410 }
1411 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1412 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1413 pipeline->batch.relocs = &pipeline->batch_relocs;
1414
1415 /* When we free the pipeline, we detect stages based on the NULL status
1416 * of various prog_data pointers. Make them NULL by default.
1417 */
1418 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1419
1420 pipeline->active_stages = 0;
1421
1422 pipeline->needs_data_cache = false;
1423
1424 assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT);
1425 ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->stage.module);
1426 result = anv_pipeline_compile_cs(pipeline, cache, pCreateInfo, module,
1427 pCreateInfo->stage.pName,
1428 pCreateInfo->stage.pSpecializationInfo);
1429 if (result != VK_SUCCESS) {
1430 vk_free2(&device->alloc, pAllocator, pipeline);
1431 return result;
1432 }
1433
1434 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
1435
1436 anv_pipeline_setup_l3_config(pipeline, cs_prog_data->base.total_shared > 0);
1437
1438 uint32_t group_size = cs_prog_data->local_size[0] *
1439 cs_prog_data->local_size[1] * cs_prog_data->local_size[2];
1440 uint32_t remainder = group_size & (cs_prog_data->simd_size - 1);
1441
1442 if (remainder > 0)
1443 pipeline->cs_right_mask = ~0u >> (32 - remainder);
1444 else
1445 pipeline->cs_right_mask = ~0u >> (32 - cs_prog_data->simd_size);
1446
1447 const uint32_t vfe_curbe_allocation =
1448 ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
1449 cs_prog_data->push.cross_thread.regs, 2);
1450
1451 const uint32_t subslices = MAX2(physical_device->subslice_total, 1);
1452
1453 const struct anv_shader_bin *cs_bin =
1454 pipeline->shaders[MESA_SHADER_COMPUTE];
1455
1456 anv_batch_emit(&pipeline->batch, GENX(MEDIA_VFE_STATE), vfe) {
1457 #if GEN_GEN > 7
1458 vfe.StackSize = 0;
1459 #else
1460 vfe.GPGPUMode = true;
1461 #endif
1462 vfe.MaximumNumberofThreads =
1463 devinfo->max_cs_threads * subslices - 1;
1464 vfe.NumberofURBEntries = GEN_GEN <= 7 ? 0 : 2;
1465 vfe.ResetGatewayTimer = true;
1466 #if GEN_GEN <= 8
1467 vfe.BypassGatewayControl = true;
1468 #endif
1469 vfe.URBEntryAllocationSize = GEN_GEN <= 7 ? 0 : 2;
1470 vfe.CURBEAllocationSize = vfe_curbe_allocation;
1471
1472 vfe.PerThreadScratchSpace = get_scratch_space(cs_bin);
1473 vfe.ScratchSpaceBasePointer =
1474 get_scratch_address(pipeline, MESA_SHADER_COMPUTE, cs_bin);
1475 }
1476
1477 struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = {
1478 .KernelStartPointer = cs_bin->kernel.offset,
1479
1480 .SamplerCount = get_sampler_count(cs_bin),
1481 .BindingTableEntryCount = get_binding_table_entry_count(cs_bin),
1482 .BarrierEnable = cs_prog_data->uses_barrier,
1483 .SharedLocalMemorySize =
1484 encode_slm_size(GEN_GEN, cs_prog_data->base.total_shared),
1485
1486 #if !GEN_IS_HASWELL
1487 .ConstantURBEntryReadOffset = 0,
1488 #endif
1489 .ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs,
1490 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1491 .CrossThreadConstantDataReadLength =
1492 cs_prog_data->push.cross_thread.regs,
1493 #endif
1494
1495 .NumberofThreadsinGPGPUThreadGroup = cs_prog_data->threads,
1496 };
1497 GENX(INTERFACE_DESCRIPTOR_DATA_pack)(NULL,
1498 pipeline->interface_descriptor_data,
1499 &desc);
1500
1501 *pPipeline = anv_pipeline_to_handle(pipeline);
1502
1503 return VK_SUCCESS;
1504 }
1505
1506 VkResult genX(CreateGraphicsPipelines)(
1507 VkDevice _device,
1508 VkPipelineCache pipelineCache,
1509 uint32_t count,
1510 const VkGraphicsPipelineCreateInfo* pCreateInfos,
1511 const VkAllocationCallbacks* pAllocator,
1512 VkPipeline* pPipelines)
1513 {
1514 ANV_FROM_HANDLE(anv_pipeline_cache, pipeline_cache, pipelineCache);
1515
1516 VkResult result = VK_SUCCESS;
1517
1518 unsigned i = 0;
1519 for (; i < count; i++) {
1520 result = genX(graphics_pipeline_create)(_device,
1521 pipeline_cache,
1522 &pCreateInfos[i],
1523 pAllocator, &pPipelines[i]);
1524 if (result != VK_SUCCESS) {
1525 for (unsigned j = 0; j < i; j++) {
1526 anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
1527 }
1528
1529 return result;
1530 }
1531 }
1532
1533 return VK_SUCCESS;
1534 }
1535
1536 VkResult genX(CreateComputePipelines)(
1537 VkDevice _device,
1538 VkPipelineCache pipelineCache,
1539 uint32_t count,
1540 const VkComputePipelineCreateInfo* pCreateInfos,
1541 const VkAllocationCallbacks* pAllocator,
1542 VkPipeline* pPipelines)
1543 {
1544 ANV_FROM_HANDLE(anv_pipeline_cache, pipeline_cache, pipelineCache);
1545
1546 VkResult result = VK_SUCCESS;
1547
1548 unsigned i = 0;
1549 for (; i < count; i++) {
1550 result = compute_pipeline_create(_device, pipeline_cache,
1551 &pCreateInfos[i],
1552 pAllocator, &pPipelines[i]);
1553 if (result != VK_SUCCESS) {
1554 for (unsigned j = 0; j < i; j++) {
1555 anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
1556 }
1557
1558 return result;
1559 }
1560 }
1561
1562 return VK_SUCCESS;
1563 }