anv/pipeline: Use get_scratch_space/address for compute shaders
[mesa.git] / src / intel / vulkan / genX_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 #include "genxml/gen_macros.h"
27 #include "genxml/genX_pack.h"
28
29 #include "common/gen_l3_config.h"
30 #include "common/gen_sample_positions.h"
31 #include "vk_format_info.h"
32
33 static uint32_t
34 vertex_element_comp_control(enum isl_format format, unsigned comp)
35 {
36 uint8_t bits;
37 switch (comp) {
38 case 0: bits = isl_format_layouts[format].channels.r.bits; break;
39 case 1: bits = isl_format_layouts[format].channels.g.bits; break;
40 case 2: bits = isl_format_layouts[format].channels.b.bits; break;
41 case 3: bits = isl_format_layouts[format].channels.a.bits; break;
42 default: unreachable("Invalid component");
43 }
44
45 if (bits) {
46 return VFCOMP_STORE_SRC;
47 } else if (comp < 3) {
48 return VFCOMP_STORE_0;
49 } else if (isl_format_layouts[format].channels.r.type == ISL_UINT ||
50 isl_format_layouts[format].channels.r.type == ISL_SINT) {
51 assert(comp == 3);
52 return VFCOMP_STORE_1_INT;
53 } else {
54 assert(comp == 3);
55 return VFCOMP_STORE_1_FP;
56 }
57 }
58
59 static void
60 emit_vertex_input(struct anv_pipeline *pipeline,
61 const VkPipelineVertexInputStateCreateInfo *info)
62 {
63 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
64
65 /* Pull inputs_read out of the VS prog data */
66 const uint64_t inputs_read = vs_prog_data->inputs_read;
67 assert((inputs_read & ((1 << VERT_ATTRIB_GENERIC0) - 1)) == 0);
68 const uint32_t elements = inputs_read >> VERT_ATTRIB_GENERIC0;
69
70 #if GEN_GEN >= 8
71 /* On BDW+, we only need to allocate space for base ids. Setting up
72 * the actual vertex and instance id is a separate packet.
73 */
74 const bool needs_svgs_elem = vs_prog_data->uses_basevertex ||
75 vs_prog_data->uses_baseinstance;
76 #else
77 /* On Haswell and prior, vertex and instance id are created by using the
78 * ComponentControl fields, so we need an element for any of them.
79 */
80 const bool needs_svgs_elem = vs_prog_data->uses_vertexid ||
81 vs_prog_data->uses_instanceid ||
82 vs_prog_data->uses_basevertex ||
83 vs_prog_data->uses_baseinstance;
84 #endif
85
86 uint32_t elem_count = __builtin_popcount(elements) + needs_svgs_elem;
87 if (elem_count == 0)
88 return;
89
90 uint32_t *p;
91
92 const uint32_t num_dwords = 1 + elem_count * 2;
93 p = anv_batch_emitn(&pipeline->batch, num_dwords,
94 GENX(3DSTATE_VERTEX_ELEMENTS));
95 memset(p + 1, 0, (num_dwords - 1) * 4);
96
97 for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) {
98 const VkVertexInputAttributeDescription *desc =
99 &info->pVertexAttributeDescriptions[i];
100 enum isl_format format = anv_get_isl_format(&pipeline->device->info,
101 desc->format,
102 VK_IMAGE_ASPECT_COLOR_BIT,
103 VK_IMAGE_TILING_LINEAR);
104
105 assert(desc->binding < 32);
106
107 if ((elements & (1 << desc->location)) == 0)
108 continue; /* Binding unused */
109
110 uint32_t slot = __builtin_popcount(elements & ((1 << desc->location) - 1));
111
112 struct GENX(VERTEX_ELEMENT_STATE) element = {
113 .VertexBufferIndex = desc->binding,
114 .Valid = true,
115 .SourceElementFormat = format,
116 .EdgeFlagEnable = false,
117 .SourceElementOffset = desc->offset,
118 .Component0Control = vertex_element_comp_control(format, 0),
119 .Component1Control = vertex_element_comp_control(format, 1),
120 .Component2Control = vertex_element_comp_control(format, 2),
121 .Component3Control = vertex_element_comp_control(format, 3),
122 };
123 GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + slot * 2], &element);
124
125 #if GEN_GEN >= 8
126 /* On Broadwell and later, we have a separate VF_INSTANCING packet
127 * that controls instancing. On Haswell and prior, that's part of
128 * VERTEX_BUFFER_STATE which we emit later.
129 */
130 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
131 vfi.InstancingEnable = pipeline->instancing_enable[desc->binding];
132 vfi.VertexElementIndex = slot;
133 /* Vulkan so far doesn't have an instance divisor, so
134 * this is always 1 (ignored if not instancing). */
135 vfi.InstanceDataStepRate = 1;
136 }
137 #endif
138 }
139
140 const uint32_t id_slot = __builtin_popcount(elements);
141 if (needs_svgs_elem) {
142 /* From the Broadwell PRM for the 3D_Vertex_Component_Control enum:
143 * "Within a VERTEX_ELEMENT_STATE structure, if a Component
144 * Control field is set to something other than VFCOMP_STORE_SRC,
145 * no higher-numbered Component Control fields may be set to
146 * VFCOMP_STORE_SRC"
147 *
148 * This means, that if we have BaseInstance, we need BaseVertex as
149 * well. Just do all or nothing.
150 */
151 uint32_t base_ctrl = (vs_prog_data->uses_basevertex ||
152 vs_prog_data->uses_baseinstance) ?
153 VFCOMP_STORE_SRC : VFCOMP_STORE_0;
154
155 struct GENX(VERTEX_ELEMENT_STATE) element = {
156 .VertexBufferIndex = 32, /* Reserved for this */
157 .Valid = true,
158 .SourceElementFormat = ISL_FORMAT_R32G32_UINT,
159 .Component0Control = base_ctrl,
160 .Component1Control = base_ctrl,
161 #if GEN_GEN >= 8
162 .Component2Control = VFCOMP_STORE_0,
163 .Component3Control = VFCOMP_STORE_0,
164 #else
165 .Component2Control = VFCOMP_STORE_VID,
166 .Component3Control = VFCOMP_STORE_IID,
167 #endif
168 };
169 GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + id_slot * 2], &element);
170 }
171
172 #if GEN_GEN >= 8
173 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_SGVS), sgvs) {
174 sgvs.VertexIDEnable = vs_prog_data->uses_vertexid;
175 sgvs.VertexIDComponentNumber = 2;
176 sgvs.VertexIDElementOffset = id_slot;
177 sgvs.InstanceIDEnable = vs_prog_data->uses_instanceid;
178 sgvs.InstanceIDComponentNumber = 3;
179 sgvs.InstanceIDElementOffset = id_slot;
180 }
181 #endif
182 }
183
184 void
185 genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
186 VkShaderStageFlags active_stages,
187 unsigned vs_size, unsigned gs_size,
188 const struct gen_l3_config *l3_config)
189 {
190 if (!(active_stages & VK_SHADER_STAGE_VERTEX_BIT))
191 vs_size = 1;
192
193 if (!(active_stages & VK_SHADER_STAGE_GEOMETRY_BIT))
194 gs_size = 1;
195
196 unsigned vs_entry_size_bytes = vs_size * 64;
197 unsigned gs_entry_size_bytes = gs_size * 64;
198
199 /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
200 *
201 * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
202 * Allocation Size is less than 9 512-bit URB entries.
203 *
204 * Similar text exists for GS.
205 */
206 unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
207 unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
208
209 /* URB allocations must be done in 8k chunks. */
210 unsigned chunk_size_bytes = 8192;
211
212 /* Determine the size of the URB in chunks. */
213 const unsigned total_urb_size =
214 gen_get_l3_config_urb_size(&device->info, l3_config);
215 const unsigned urb_chunks = total_urb_size * 1024 / chunk_size_bytes;
216
217 /* Reserve space for push constants */
218 unsigned push_constant_kb;
219 if (device->info.gen >= 8)
220 push_constant_kb = 32;
221 else if (device->info.is_haswell)
222 push_constant_kb = device->info.gt == 3 ? 32 : 16;
223 else
224 push_constant_kb = 16;
225
226 unsigned push_constant_bytes = push_constant_kb * 1024;
227 unsigned push_constant_chunks =
228 push_constant_bytes / chunk_size_bytes;
229
230 /* Initially, assign each stage the minimum amount of URB space it needs,
231 * and make a note of how much additional space it "wants" (the amount of
232 * additional space it could actually make use of).
233 */
234
235 /* VS has a lower limit on the number of URB entries */
236 unsigned vs_chunks =
237 ALIGN(device->info.urb.min_vs_entries * vs_entry_size_bytes,
238 chunk_size_bytes) / chunk_size_bytes;
239 unsigned vs_wants =
240 ALIGN(device->info.urb.max_vs_entries * vs_entry_size_bytes,
241 chunk_size_bytes) / chunk_size_bytes - vs_chunks;
242
243 unsigned gs_chunks = 0;
244 unsigned gs_wants = 0;
245 if (active_stages & VK_SHADER_STAGE_GEOMETRY_BIT) {
246 /* There are two constraints on the minimum amount of URB space we can
247 * allocate:
248 *
249 * (1) We need room for at least 2 URB entries, since we always operate
250 * the GS in DUAL_OBJECT mode.
251 *
252 * (2) We can't allocate less than nr_gs_entries_granularity.
253 */
254 gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
255 chunk_size_bytes) / chunk_size_bytes;
256 gs_wants =
257 ALIGN(device->info.urb.max_gs_entries * gs_entry_size_bytes,
258 chunk_size_bytes) / chunk_size_bytes - gs_chunks;
259 }
260
261 /* There should always be enough URB space to satisfy the minimum
262 * requirements of each stage.
263 */
264 unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
265 assert(total_needs <= urb_chunks);
266
267 /* Mete out remaining space (if any) in proportion to "wants". */
268 unsigned total_wants = vs_wants + gs_wants;
269 unsigned remaining_space = urb_chunks - total_needs;
270 if (remaining_space > total_wants)
271 remaining_space = total_wants;
272 if (remaining_space > 0) {
273 unsigned vs_additional = (unsigned)
274 round(vs_wants * (((double) remaining_space) / total_wants));
275 vs_chunks += vs_additional;
276 remaining_space -= vs_additional;
277 gs_chunks += remaining_space;
278 }
279
280 /* Sanity check that we haven't over-allocated. */
281 assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
282
283 /* Finally, compute the number of entries that can fit in the space
284 * allocated to each stage.
285 */
286 unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
287 unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
288
289 /* Since we rounded up when computing *_wants, this may be slightly more
290 * than the maximum allowed amount, so correct for that.
291 */
292 nr_vs_entries = MIN2(nr_vs_entries, device->info.urb.max_vs_entries);
293 nr_gs_entries = MIN2(nr_gs_entries, device->info.urb.max_gs_entries);
294
295 /* Ensure that we program a multiple of the granularity. */
296 nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
297 nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
298
299 /* Finally, sanity check to make sure we have at least the minimum number
300 * of entries needed for each stage.
301 */
302 assert(nr_vs_entries >= device->info.urb.min_vs_entries);
303 if (active_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
304 assert(nr_gs_entries >= 2);
305
306 #if GEN_GEN == 7 && !GEN_IS_HASWELL
307 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
308 *
309 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth stall
310 * needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
311 * 3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
312 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one PIPE_CONTROL
313 * needs to be sent before any combination of VS associated 3DSTATE."
314 */
315 anv_batch_emit(batch, GEN7_PIPE_CONTROL, pc) {
316 pc.DepthStallEnable = true;
317 pc.PostSyncOperation = WriteImmediateData;
318 pc.Address = (struct anv_address) { &device->workaround_bo, 0 };
319 }
320 #endif
321
322 /* Lay out the URB in the following order:
323 * - push constants
324 * - VS
325 * - GS
326 */
327 anv_batch_emit(batch, GENX(3DSTATE_URB_VS), urb) {
328 urb.VSURBStartingAddress = push_constant_chunks;
329 urb.VSURBEntryAllocationSize = vs_size - 1;
330 urb.VSNumberofURBEntries = nr_vs_entries;
331 }
332
333 anv_batch_emit(batch, GENX(3DSTATE_URB_HS), urb) {
334 urb.HSURBStartingAddress = push_constant_chunks;
335 }
336
337 anv_batch_emit(batch, GENX(3DSTATE_URB_DS), urb) {
338 urb.DSURBStartingAddress = push_constant_chunks;
339 }
340
341 anv_batch_emit(batch, GENX(3DSTATE_URB_GS), urb) {
342 urb.GSURBStartingAddress = push_constant_chunks + vs_chunks;
343 urb.GSURBEntryAllocationSize = gs_size - 1;
344 urb.GSNumberofURBEntries = nr_gs_entries;
345 }
346 }
347
348 static inline void
349 emit_urb_setup(struct anv_pipeline *pipeline)
350 {
351 unsigned vs_entry_size =
352 (pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT) ?
353 get_vs_prog_data(pipeline)->base.urb_entry_size : 0;
354 unsigned gs_entry_size =
355 (pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT) ?
356 get_gs_prog_data(pipeline)->base.urb_entry_size : 0;
357
358 genX(emit_urb_setup)(pipeline->device, &pipeline->batch,
359 pipeline->active_stages, vs_entry_size, gs_entry_size,
360 pipeline->urb.l3_config);
361 }
362
363 static void
364 emit_3dstate_sbe(struct anv_pipeline *pipeline)
365 {
366 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
367 const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
368 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
369 const struct brw_vue_map *fs_input_map;
370
371 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
372 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE), sbe);
373 #if GEN_GEN >= 8
374 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ), sbe);
375 #endif
376 return;
377 }
378
379 if (gs_prog_data)
380 fs_input_map = &gs_prog_data->base.vue_map;
381 else
382 fs_input_map = &vs_prog_data->base.vue_map;
383
384 struct GENX(3DSTATE_SBE) sbe = {
385 GENX(3DSTATE_SBE_header),
386 .AttributeSwizzleEnable = true,
387 .PointSpriteTextureCoordinateOrigin = UPPERLEFT,
388 .NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs,
389 .ConstantInterpolationEnable = wm_prog_data->flat_inputs,
390 };
391
392 #if GEN_GEN >= 9
393 for (unsigned i = 0; i < 32; i++)
394 sbe.AttributeActiveComponentFormat[i] = ACF_XYZW;
395 #endif
396
397 #if GEN_GEN >= 8
398 /* On Broadwell, they broke 3DSTATE_SBE into two packets */
399 struct GENX(3DSTATE_SBE_SWIZ) swiz = {
400 GENX(3DSTATE_SBE_SWIZ_header),
401 };
402 #else
403 # define swiz sbe
404 #endif
405
406 int max_source_attr = 0;
407 for (int attr = 0; attr < VARYING_SLOT_MAX; attr++) {
408 int input_index = wm_prog_data->urb_setup[attr];
409
410 if (input_index < 0)
411 continue;
412
413 if (attr == VARYING_SLOT_PNTC) {
414 sbe.PointSpriteTextureCoordinateEnable = 1 << input_index;
415 continue;
416 }
417
418 const int slot = fs_input_map->varying_to_slot[attr];
419
420 if (input_index >= 16)
421 continue;
422
423 if (slot == -1) {
424 /* This attribute does not exist in the VUE--that means that the
425 * vertex shader did not write to it. It could be that it's a
426 * regular varying read by the fragment shader but not written by
427 * the vertex shader or it's gl_PrimitiveID. In the first case the
428 * value is undefined, in the second it needs to be
429 * gl_PrimitiveID.
430 */
431 swiz.Attribute[input_index].ConstantSource = PRIM_ID;
432 swiz.Attribute[input_index].ComponentOverrideX = true;
433 swiz.Attribute[input_index].ComponentOverrideY = true;
434 swiz.Attribute[input_index].ComponentOverrideZ = true;
435 swiz.Attribute[input_index].ComponentOverrideW = true;
436 } else {
437 assert(slot >= 2);
438 const int source_attr = slot - 2;
439 max_source_attr = MAX2(max_source_attr, source_attr);
440 /* We have to subtract two slots to accout for the URB entry output
441 * read offset in the VS and GS stages.
442 */
443 swiz.Attribute[input_index].SourceAttribute = source_attr;
444 }
445 }
446
447 sbe.VertexURBEntryReadOffset = 1; /* Skip the VUE header and position slots */
448 sbe.VertexURBEntryReadLength = DIV_ROUND_UP(max_source_attr + 1, 2);
449
450 uint32_t *dw = anv_batch_emit_dwords(&pipeline->batch,
451 GENX(3DSTATE_SBE_length));
452 GENX(3DSTATE_SBE_pack)(&pipeline->batch, dw, &sbe);
453
454 #if GEN_GEN >= 8
455 dw = anv_batch_emit_dwords(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ_length));
456 GENX(3DSTATE_SBE_SWIZ_pack)(&pipeline->batch, dw, &swiz);
457 #endif
458 }
459
460 static const uint32_t vk_to_gen_cullmode[] = {
461 [VK_CULL_MODE_NONE] = CULLMODE_NONE,
462 [VK_CULL_MODE_FRONT_BIT] = CULLMODE_FRONT,
463 [VK_CULL_MODE_BACK_BIT] = CULLMODE_BACK,
464 [VK_CULL_MODE_FRONT_AND_BACK] = CULLMODE_BOTH
465 };
466
467 static const uint32_t vk_to_gen_fillmode[] = {
468 [VK_POLYGON_MODE_FILL] = FILL_MODE_SOLID,
469 [VK_POLYGON_MODE_LINE] = FILL_MODE_WIREFRAME,
470 [VK_POLYGON_MODE_POINT] = FILL_MODE_POINT,
471 };
472
473 static const uint32_t vk_to_gen_front_face[] = {
474 [VK_FRONT_FACE_COUNTER_CLOCKWISE] = 1,
475 [VK_FRONT_FACE_CLOCKWISE] = 0
476 };
477
478 static void
479 emit_rs_state(struct anv_pipeline *pipeline,
480 const VkPipelineRasterizationStateCreateInfo *rs_info,
481 const VkPipelineMultisampleStateCreateInfo *ms_info,
482 const struct anv_render_pass *pass,
483 const struct anv_subpass *subpass)
484 {
485 struct GENX(3DSTATE_SF) sf = {
486 GENX(3DSTATE_SF_header),
487 };
488
489 sf.ViewportTransformEnable = true;
490 sf.StatisticsEnable = true;
491 sf.TriangleStripListProvokingVertexSelect = 0;
492 sf.LineStripListProvokingVertexSelect = 0;
493 sf.TriangleFanProvokingVertexSelect = 1;
494 sf.PointWidthSource = Vertex;
495 sf.PointWidth = 1.0;
496
497 #if GEN_GEN >= 8
498 struct GENX(3DSTATE_RASTER) raster = {
499 GENX(3DSTATE_RASTER_header),
500 };
501 #else
502 # define raster sf
503 #endif
504
505 /* For details on 3DSTATE_RASTER multisample state, see the BSpec table
506 * "Multisample Modes State".
507 */
508 #if GEN_GEN >= 8
509 raster.DXMultisampleRasterizationEnable = true;
510 raster.ForcedSampleCount = FSC_NUMRASTSAMPLES_0;
511 raster.ForceMultisampling = false;
512 #else
513 raster.MultisampleRasterizationMode =
514 (ms_info && ms_info->rasterizationSamples > 1) ?
515 MSRASTMODE_ON_PATTERN : MSRASTMODE_OFF_PIXEL;
516 #endif
517
518 raster.FrontWinding = vk_to_gen_front_face[rs_info->frontFace];
519 raster.CullMode = vk_to_gen_cullmode[rs_info->cullMode];
520 raster.FrontFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode];
521 raster.BackFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode];
522 raster.ScissorRectangleEnable = true;
523
524 #if GEN_GEN >= 9
525 /* GEN9+ splits ViewportZClipTestEnable into near and far enable bits */
526 raster.ViewportZFarClipTestEnable = !pipeline->depth_clamp_enable;
527 raster.ViewportZNearClipTestEnable = !pipeline->depth_clamp_enable;
528 #elif GEN_GEN >= 8
529 raster.ViewportZClipTestEnable = !pipeline->depth_clamp_enable;
530 #endif
531
532 raster.GlobalDepthOffsetEnableSolid = rs_info->depthBiasEnable;
533 raster.GlobalDepthOffsetEnableWireframe = rs_info->depthBiasEnable;
534 raster.GlobalDepthOffsetEnablePoint = rs_info->depthBiasEnable;
535
536 #if GEN_GEN == 7
537 /* Gen7 requires that we provide the depth format in 3DSTATE_SF so that it
538 * can get the depth offsets correct.
539 */
540 if (subpass->depth_stencil_attachment < pass->attachment_count) {
541 VkFormat vk_format =
542 pass->attachments[subpass->depth_stencil_attachment].format;
543 assert(vk_format_is_depth_or_stencil(vk_format));
544 if (vk_format_aspects(vk_format) & VK_IMAGE_ASPECT_DEPTH_BIT) {
545 enum isl_format isl_format =
546 anv_get_isl_format(&pipeline->device->info, vk_format,
547 VK_IMAGE_ASPECT_DEPTH_BIT,
548 VK_IMAGE_TILING_OPTIMAL);
549 sf.DepthBufferSurfaceFormat =
550 isl_format_get_depth_format(isl_format, false);
551 }
552 }
553 #endif
554
555 #if GEN_GEN >= 8
556 GENX(3DSTATE_SF_pack)(NULL, pipeline->gen8.sf, &sf);
557 GENX(3DSTATE_RASTER_pack)(NULL, pipeline->gen8.raster, &raster);
558 #else
559 # undef raster
560 GENX(3DSTATE_SF_pack)(NULL, &pipeline->gen7.sf, &sf);
561 #endif
562 }
563
564 static void
565 emit_ms_state(struct anv_pipeline *pipeline,
566 const VkPipelineMultisampleStateCreateInfo *info)
567 {
568 uint32_t samples = 1;
569 uint32_t log2_samples = 0;
570
571 /* From the Vulkan 1.0 spec:
572 * If pSampleMask is NULL, it is treated as if the mask has all bits
573 * enabled, i.e. no coverage is removed from fragments.
574 *
575 * 3DSTATE_SAMPLE_MASK.SampleMask is 16 bits.
576 */
577 #if GEN_GEN >= 8
578 uint32_t sample_mask = 0xffff;
579 #else
580 uint32_t sample_mask = 0xff;
581 #endif
582
583 if (info) {
584 samples = info->rasterizationSamples;
585 log2_samples = __builtin_ffs(samples) - 1;
586 }
587
588 if (info && info->pSampleMask)
589 sample_mask &= info->pSampleMask[0];
590
591 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE), ms) {
592 ms.NumberofMultisamples = log2_samples;
593
594 #if GEN_GEN >= 8
595 /* The PRM says that this bit is valid only for DX9:
596 *
597 * SW can choose to set this bit only for DX9 API. DX10/OGL API's
598 * should not have any effect by setting or not setting this bit.
599 */
600 ms.PixelPositionOffsetEnable = false;
601 ms.PixelLocation = CENTER;
602 #else
603 ms.PixelLocation = PIXLOC_CENTER;
604
605 switch (samples) {
606 case 1:
607 GEN_SAMPLE_POS_1X(ms.Sample);
608 break;
609 case 2:
610 GEN_SAMPLE_POS_2X(ms.Sample);
611 break;
612 case 4:
613 GEN_SAMPLE_POS_4X(ms.Sample);
614 break;
615 case 8:
616 GEN_SAMPLE_POS_8X(ms.Sample);
617 break;
618 default:
619 break;
620 }
621 #endif
622 }
623
624 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK), sm) {
625 sm.SampleMask = sample_mask;
626 }
627 }
628
629 static const uint32_t vk_to_gen_logic_op[] = {
630 [VK_LOGIC_OP_COPY] = LOGICOP_COPY,
631 [VK_LOGIC_OP_CLEAR] = LOGICOP_CLEAR,
632 [VK_LOGIC_OP_AND] = LOGICOP_AND,
633 [VK_LOGIC_OP_AND_REVERSE] = LOGICOP_AND_REVERSE,
634 [VK_LOGIC_OP_AND_INVERTED] = LOGICOP_AND_INVERTED,
635 [VK_LOGIC_OP_NO_OP] = LOGICOP_NOOP,
636 [VK_LOGIC_OP_XOR] = LOGICOP_XOR,
637 [VK_LOGIC_OP_OR] = LOGICOP_OR,
638 [VK_LOGIC_OP_NOR] = LOGICOP_NOR,
639 [VK_LOGIC_OP_EQUIVALENT] = LOGICOP_EQUIV,
640 [VK_LOGIC_OP_INVERT] = LOGICOP_INVERT,
641 [VK_LOGIC_OP_OR_REVERSE] = LOGICOP_OR_REVERSE,
642 [VK_LOGIC_OP_COPY_INVERTED] = LOGICOP_COPY_INVERTED,
643 [VK_LOGIC_OP_OR_INVERTED] = LOGICOP_OR_INVERTED,
644 [VK_LOGIC_OP_NAND] = LOGICOP_NAND,
645 [VK_LOGIC_OP_SET] = LOGICOP_SET,
646 };
647
648 static const uint32_t vk_to_gen_blend[] = {
649 [VK_BLEND_FACTOR_ZERO] = BLENDFACTOR_ZERO,
650 [VK_BLEND_FACTOR_ONE] = BLENDFACTOR_ONE,
651 [VK_BLEND_FACTOR_SRC_COLOR] = BLENDFACTOR_SRC_COLOR,
652 [VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR] = BLENDFACTOR_INV_SRC_COLOR,
653 [VK_BLEND_FACTOR_DST_COLOR] = BLENDFACTOR_DST_COLOR,
654 [VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR] = BLENDFACTOR_INV_DST_COLOR,
655 [VK_BLEND_FACTOR_SRC_ALPHA] = BLENDFACTOR_SRC_ALPHA,
656 [VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA] = BLENDFACTOR_INV_SRC_ALPHA,
657 [VK_BLEND_FACTOR_DST_ALPHA] = BLENDFACTOR_DST_ALPHA,
658 [VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA] = BLENDFACTOR_INV_DST_ALPHA,
659 [VK_BLEND_FACTOR_CONSTANT_COLOR] = BLENDFACTOR_CONST_COLOR,
660 [VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR]= BLENDFACTOR_INV_CONST_COLOR,
661 [VK_BLEND_FACTOR_CONSTANT_ALPHA] = BLENDFACTOR_CONST_ALPHA,
662 [VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA]= BLENDFACTOR_INV_CONST_ALPHA,
663 [VK_BLEND_FACTOR_SRC_ALPHA_SATURATE] = BLENDFACTOR_SRC_ALPHA_SATURATE,
664 [VK_BLEND_FACTOR_SRC1_COLOR] = BLENDFACTOR_SRC1_COLOR,
665 [VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR] = BLENDFACTOR_INV_SRC1_COLOR,
666 [VK_BLEND_FACTOR_SRC1_ALPHA] = BLENDFACTOR_SRC1_ALPHA,
667 [VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA] = BLENDFACTOR_INV_SRC1_ALPHA,
668 };
669
670 static const uint32_t vk_to_gen_blend_op[] = {
671 [VK_BLEND_OP_ADD] = BLENDFUNCTION_ADD,
672 [VK_BLEND_OP_SUBTRACT] = BLENDFUNCTION_SUBTRACT,
673 [VK_BLEND_OP_REVERSE_SUBTRACT] = BLENDFUNCTION_REVERSE_SUBTRACT,
674 [VK_BLEND_OP_MIN] = BLENDFUNCTION_MIN,
675 [VK_BLEND_OP_MAX] = BLENDFUNCTION_MAX,
676 };
677
678 static const uint32_t vk_to_gen_compare_op[] = {
679 [VK_COMPARE_OP_NEVER] = PREFILTEROPNEVER,
680 [VK_COMPARE_OP_LESS] = PREFILTEROPLESS,
681 [VK_COMPARE_OP_EQUAL] = PREFILTEROPEQUAL,
682 [VK_COMPARE_OP_LESS_OR_EQUAL] = PREFILTEROPLEQUAL,
683 [VK_COMPARE_OP_GREATER] = PREFILTEROPGREATER,
684 [VK_COMPARE_OP_NOT_EQUAL] = PREFILTEROPNOTEQUAL,
685 [VK_COMPARE_OP_GREATER_OR_EQUAL] = PREFILTEROPGEQUAL,
686 [VK_COMPARE_OP_ALWAYS] = PREFILTEROPALWAYS,
687 };
688
689 static const uint32_t vk_to_gen_stencil_op[] = {
690 [VK_STENCIL_OP_KEEP] = STENCILOP_KEEP,
691 [VK_STENCIL_OP_ZERO] = STENCILOP_ZERO,
692 [VK_STENCIL_OP_REPLACE] = STENCILOP_REPLACE,
693 [VK_STENCIL_OP_INCREMENT_AND_CLAMP] = STENCILOP_INCRSAT,
694 [VK_STENCIL_OP_DECREMENT_AND_CLAMP] = STENCILOP_DECRSAT,
695 [VK_STENCIL_OP_INVERT] = STENCILOP_INVERT,
696 [VK_STENCIL_OP_INCREMENT_AND_WRAP] = STENCILOP_INCR,
697 [VK_STENCIL_OP_DECREMENT_AND_WRAP] = STENCILOP_DECR,
698 };
699
700 static void
701 emit_ds_state(struct anv_pipeline *pipeline,
702 const VkPipelineDepthStencilStateCreateInfo *info,
703 const struct anv_render_pass *pass,
704 const struct anv_subpass *subpass)
705 {
706 #if GEN_GEN == 7
707 # define depth_stencil_dw pipeline->gen7.depth_stencil_state
708 #elif GEN_GEN == 8
709 # define depth_stencil_dw pipeline->gen8.wm_depth_stencil
710 #else
711 # define depth_stencil_dw pipeline->gen9.wm_depth_stencil
712 #endif
713
714 if (info == NULL) {
715 /* We're going to OR this together with the dynamic state. We need
716 * to make sure it's initialized to something useful.
717 */
718 memset(depth_stencil_dw, 0, sizeof(depth_stencil_dw));
719 return;
720 }
721
722 /* VkBool32 depthBoundsTestEnable; // optional (depth_bounds_test) */
723
724 #if GEN_GEN <= 7
725 struct GENX(DEPTH_STENCIL_STATE) depth_stencil = {
726 #else
727 struct GENX(3DSTATE_WM_DEPTH_STENCIL) depth_stencil = {
728 #endif
729 .DepthTestEnable = info->depthTestEnable,
730 .DepthBufferWriteEnable = info->depthWriteEnable,
731 .DepthTestFunction = vk_to_gen_compare_op[info->depthCompareOp],
732 .DoubleSidedStencilEnable = true,
733
734 .StencilTestEnable = info->stencilTestEnable,
735 .StencilBufferWriteEnable = info->stencilTestEnable,
736 .StencilFailOp = vk_to_gen_stencil_op[info->front.failOp],
737 .StencilPassDepthPassOp = vk_to_gen_stencil_op[info->front.passOp],
738 .StencilPassDepthFailOp = vk_to_gen_stencil_op[info->front.depthFailOp],
739 .StencilTestFunction = vk_to_gen_compare_op[info->front.compareOp],
740 .BackfaceStencilFailOp = vk_to_gen_stencil_op[info->back.failOp],
741 .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info->back.passOp],
742 .BackfaceStencilPassDepthFailOp =vk_to_gen_stencil_op[info->back.depthFailOp],
743 .BackfaceStencilTestFunction = vk_to_gen_compare_op[info->back.compareOp],
744 };
745
746 VkImageAspectFlags aspects = 0;
747 if (subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
748 VkFormat depth_stencil_format =
749 pass->attachments[subpass->depth_stencil_attachment].format;
750 aspects = vk_format_aspects(depth_stencil_format);
751 }
752
753 /* The Vulkan spec requires that if either depth or stencil is not present,
754 * the pipeline is to act as if the test silently passes.
755 */
756 if (!(aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
757 depth_stencil.DepthBufferWriteEnable = false;
758 depth_stencil.DepthTestFunction = PREFILTEROPALWAYS;
759 }
760
761 if (!(aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) {
762 depth_stencil.StencilBufferWriteEnable = false;
763 depth_stencil.StencilTestFunction = PREFILTEROPALWAYS;
764 depth_stencil.BackfaceStencilTestFunction = PREFILTEROPALWAYS;
765 }
766
767 /* From the Broadwell PRM:
768 *
769 * "If Depth_Test_Enable = 1 AND Depth_Test_func = EQUAL, the
770 * Depth_Write_Enable must be set to 0."
771 */
772 if (info->depthTestEnable && info->depthCompareOp == VK_COMPARE_OP_EQUAL)
773 depth_stencil.DepthBufferWriteEnable = false;
774
775 #if GEN_GEN <= 7
776 GENX(DEPTH_STENCIL_STATE_pack)(NULL, depth_stencil_dw, &depth_stencil);
777 #else
778 GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, depth_stencil_dw, &depth_stencil);
779 #endif
780 }
781
782 static void
783 emit_cb_state(struct anv_pipeline *pipeline,
784 const VkPipelineColorBlendStateCreateInfo *info,
785 const VkPipelineMultisampleStateCreateInfo *ms_info)
786 {
787 struct anv_device *device = pipeline->device;
788
789 const uint32_t num_dwords = GENX(BLEND_STATE_length);
790 pipeline->blend_state =
791 anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64);
792
793 struct GENX(BLEND_STATE) blend_state = {
794 #if GEN_GEN >= 8
795 .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable,
796 .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable,
797 #else
798 /* Make sure it gets zeroed */
799 .Entry = { { 0, }, },
800 #endif
801 };
802
803 /* Default everything to disabled */
804 for (uint32_t i = 0; i < 8; i++) {
805 blend_state.Entry[i].WriteDisableAlpha = true;
806 blend_state.Entry[i].WriteDisableRed = true;
807 blend_state.Entry[i].WriteDisableGreen = true;
808 blend_state.Entry[i].WriteDisableBlue = true;
809 }
810
811 uint32_t surface_count = 0;
812 struct anv_pipeline_bind_map *map;
813 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
814 map = &pipeline->shaders[MESA_SHADER_FRAGMENT]->bind_map;
815 surface_count = map->surface_count;
816 }
817
818 bool has_writeable_rt = false;
819 for (unsigned i = 0; i < surface_count; i++) {
820 struct anv_pipeline_binding *binding = &map->surface_to_descriptor[i];
821
822 /* All color attachments are at the beginning of the binding table */
823 if (binding->set != ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS)
824 break;
825
826 /* We can have at most 8 attachments */
827 assert(i < 8);
828
829 if (binding->index >= info->attachmentCount)
830 continue;
831
832 assert(binding->binding == 0);
833 const VkPipelineColorBlendAttachmentState *a =
834 &info->pAttachments[binding->index];
835
836 blend_state.Entry[i] = (struct GENX(BLEND_STATE_ENTRY)) {
837 #if GEN_GEN < 8
838 .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable,
839 .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable,
840 #endif
841 .LogicOpEnable = info->logicOpEnable,
842 .LogicOpFunction = vk_to_gen_logic_op[info->logicOp],
843 .ColorBufferBlendEnable = a->blendEnable,
844 .ColorClampRange = COLORCLAMP_RTFORMAT,
845 .PreBlendColorClampEnable = true,
846 .PostBlendColorClampEnable = true,
847 .SourceBlendFactor = vk_to_gen_blend[a->srcColorBlendFactor],
848 .DestinationBlendFactor = vk_to_gen_blend[a->dstColorBlendFactor],
849 .ColorBlendFunction = vk_to_gen_blend_op[a->colorBlendOp],
850 .SourceAlphaBlendFactor = vk_to_gen_blend[a->srcAlphaBlendFactor],
851 .DestinationAlphaBlendFactor = vk_to_gen_blend[a->dstAlphaBlendFactor],
852 .AlphaBlendFunction = vk_to_gen_blend_op[a->alphaBlendOp],
853 .WriteDisableAlpha = !(a->colorWriteMask & VK_COLOR_COMPONENT_A_BIT),
854 .WriteDisableRed = !(a->colorWriteMask & VK_COLOR_COMPONENT_R_BIT),
855 .WriteDisableGreen = !(a->colorWriteMask & VK_COLOR_COMPONENT_G_BIT),
856 .WriteDisableBlue = !(a->colorWriteMask & VK_COLOR_COMPONENT_B_BIT),
857 };
858
859 if (a->srcColorBlendFactor != a->srcAlphaBlendFactor ||
860 a->dstColorBlendFactor != a->dstAlphaBlendFactor ||
861 a->colorBlendOp != a->alphaBlendOp) {
862 #if GEN_GEN >= 8
863 blend_state.IndependentAlphaBlendEnable = true;
864 #else
865 blend_state.Entry[i].IndependentAlphaBlendEnable = true;
866 #endif
867 }
868
869 if (a->colorWriteMask != 0)
870 has_writeable_rt = true;
871
872 /* Our hardware applies the blend factor prior to the blend function
873 * regardless of what function is used. Technically, this means the
874 * hardware can do MORE than GL or Vulkan specify. However, it also
875 * means that, for MIN and MAX, we have to stomp the blend factor to
876 * ONE to make it a no-op.
877 */
878 if (a->colorBlendOp == VK_BLEND_OP_MIN ||
879 a->colorBlendOp == VK_BLEND_OP_MAX) {
880 blend_state.Entry[i].SourceBlendFactor = BLENDFACTOR_ONE;
881 blend_state.Entry[i].DestinationBlendFactor = BLENDFACTOR_ONE;
882 }
883 if (a->alphaBlendOp == VK_BLEND_OP_MIN ||
884 a->alphaBlendOp == VK_BLEND_OP_MAX) {
885 blend_state.Entry[i].SourceAlphaBlendFactor = BLENDFACTOR_ONE;
886 blend_state.Entry[i].DestinationAlphaBlendFactor = BLENDFACTOR_ONE;
887 }
888 }
889
890 #if GEN_GEN >= 8
891 struct GENX(BLEND_STATE_ENTRY) *bs0 = &blend_state.Entry[0];
892 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_BLEND), blend) {
893 blend.AlphaToCoverageEnable = blend_state.AlphaToCoverageEnable;
894 blend.HasWriteableRT = has_writeable_rt;
895 blend.ColorBufferBlendEnable = bs0->ColorBufferBlendEnable;
896 blend.SourceAlphaBlendFactor = bs0->SourceAlphaBlendFactor;
897 blend.DestinationAlphaBlendFactor = bs0->DestinationAlphaBlendFactor;
898 blend.SourceBlendFactor = bs0->SourceBlendFactor;
899 blend.DestinationBlendFactor = bs0->DestinationBlendFactor;
900 blend.AlphaTestEnable = false;
901 blend.IndependentAlphaBlendEnable =
902 blend_state.IndependentAlphaBlendEnable;
903 }
904 #else
905 (void)has_writeable_rt;
906 #endif
907
908 GENX(BLEND_STATE_pack)(NULL, pipeline->blend_state.map, &blend_state);
909 if (!device->info.has_llc)
910 anv_state_clflush(pipeline->blend_state);
911
912 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) {
913 bsp.BlendStatePointer = pipeline->blend_state.offset;
914 #if GEN_GEN >= 8
915 bsp.BlendStatePointerValid = true;
916 #endif
917 }
918 }
919
920 static void
921 emit_3dstate_clip(struct anv_pipeline *pipeline,
922 const VkPipelineViewportStateCreateInfo *vp_info,
923 const VkPipelineRasterizationStateCreateInfo *rs_info)
924 {
925 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
926 (void) wm_prog_data;
927 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP), clip) {
928 clip.ClipEnable = true;
929 clip.EarlyCullEnable = true;
930 clip.APIMode = APIMODE_D3D,
931 clip.ViewportXYClipTestEnable = true;
932
933 clip.ClipMode = CLIPMODE_NORMAL;
934
935 clip.TriangleStripListProvokingVertexSelect = 0;
936 clip.LineStripListProvokingVertexSelect = 0;
937 clip.TriangleFanProvokingVertexSelect = 1;
938
939 clip.MinimumPointWidth = 0.125;
940 clip.MaximumPointWidth = 255.875;
941 clip.MaximumVPIndex = (vp_info ? vp_info->viewportCount : 1) - 1;
942
943 #if GEN_GEN == 7
944 clip.FrontWinding = vk_to_gen_front_face[rs_info->frontFace];
945 clip.CullMode = vk_to_gen_cullmode[rs_info->cullMode];
946 clip.ViewportZClipTestEnable = !pipeline->depth_clamp_enable;
947 #else
948 clip.NonPerspectiveBarycentricEnable = wm_prog_data ?
949 (wm_prog_data->barycentric_interp_modes & 0x38) != 0 : 0;
950 #endif
951 }
952 }
953
954 static void
955 emit_3dstate_streamout(struct anv_pipeline *pipeline,
956 const VkPipelineRasterizationStateCreateInfo *rs_info)
957 {
958 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_STREAMOUT), so) {
959 so.RenderingDisable = rs_info->rasterizerDiscardEnable;
960 }
961 }
962
963 static inline uint32_t
964 get_sampler_count(const struct anv_shader_bin *bin)
965 {
966 return DIV_ROUND_UP(bin->bind_map.sampler_count, 4);
967 }
968
969 static inline uint32_t
970 get_binding_table_entry_count(const struct anv_shader_bin *bin)
971 {
972 return DIV_ROUND_UP(bin->bind_map.surface_count, 32);
973 }
974
975 static inline struct anv_address
976 get_scratch_address(struct anv_pipeline *pipeline,
977 gl_shader_stage stage,
978 const struct anv_shader_bin *bin)
979 {
980 return (struct anv_address) {
981 .bo = anv_scratch_pool_alloc(pipeline->device,
982 &pipeline->device->scratch_pool,
983 stage, bin->prog_data->total_scratch),
984 .offset = 0,
985 };
986 }
987
988 static inline uint32_t
989 get_scratch_space(const struct anv_shader_bin *bin)
990 {
991 return ffs(bin->prog_data->total_scratch / 2048);
992 }
993
994 static inline uint32_t
995 get_urb_output_offset()
996 {
997 /* Skip the VUE header and position slots */
998 return 1;
999 }
1000
1001 static inline uint32_t
1002 get_urb_output_length(const struct anv_shader_bin *bin)
1003 {
1004 const struct brw_vue_prog_data *prog_data =
1005 (const struct brw_vue_prog_data *)bin->prog_data;
1006
1007 return (prog_data->vue_map.num_slots + 1) / 2 - get_urb_output_offset();
1008 }
1009
1010 static void
1011 emit_3dstate_vs(struct anv_pipeline *pipeline)
1012 {
1013 const struct gen_device_info *devinfo = &pipeline->device->info;
1014 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
1015 const struct anv_shader_bin *vs_bin =
1016 pipeline->shaders[MESA_SHADER_VERTEX];
1017
1018 assert(anv_pipeline_has_stage(pipeline, MESA_SHADER_VERTEX));
1019
1020 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) {
1021 vs.FunctionEnable = true;
1022 vs.StatisticsEnable = true;
1023 vs.KernelStartPointer = vs_bin->kernel.offset;
1024 #if GEN_GEN >= 8
1025 vs.SIMD8DispatchEnable =
1026 vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8;
1027 #endif
1028
1029 assert(!vs_prog_data->base.base.use_alt_mode);
1030 vs.SingleVertexDispatch = false;
1031 vs.VectorMaskEnable = false;
1032 vs.SamplerCount = get_sampler_count(vs_bin);
1033 vs.BindingTableEntryCount = get_binding_table_entry_count(vs_bin);
1034 vs.FloatingPointMode = IEEE754;
1035 vs.IllegalOpcodeExceptionEnable = false;
1036 vs.SoftwareExceptionEnable = false;
1037 vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
1038 vs.VertexCacheDisable = false;
1039
1040 vs.VertexURBEntryReadLength = vs_prog_data->base.urb_read_length;
1041 vs.VertexURBEntryReadOffset = 0;
1042 vs.DispatchGRFStartRegisterForURBData =
1043 vs_prog_data->base.base.dispatch_grf_start_reg;
1044
1045 #if GEN_GEN >= 8
1046 vs.VertexURBEntryOutputReadOffset = get_urb_output_offset();
1047 vs.VertexURBEntryOutputLength = get_urb_output_length(vs_bin);
1048
1049 /* TODO */
1050 vs.UserClipDistanceClipTestEnableBitmask = 0;
1051 vs.UserClipDistanceCullTestEnableBitmask = 0;
1052 #endif
1053
1054 vs.PerThreadScratchSpace = get_scratch_space(vs_bin);
1055 vs.ScratchSpaceBasePointer =
1056 get_scratch_address(pipeline, MESA_SHADER_VERTEX, vs_bin);
1057 }
1058 }
1059
1060 static void
1061 emit_3dstate_gs(struct anv_pipeline *pipeline)
1062 {
1063 const struct gen_device_info *devinfo = &pipeline->device->info;
1064 const struct anv_shader_bin *gs_bin =
1065 pipeline->shaders[MESA_SHADER_GEOMETRY];
1066
1067 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY)) {
1068 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs);
1069 return;
1070 }
1071
1072 const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
1073
1074 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs) {
1075 gs.FunctionEnable = true;
1076 gs.StatisticsEnable = true;
1077 gs.KernelStartPointer = gs_bin->kernel.offset;
1078 gs.DispatchMode = gs_prog_data->base.dispatch_mode;
1079
1080 gs.SingleProgramFlow = false;
1081 gs.VectorMaskEnable = false;
1082 gs.SamplerCount = get_sampler_count(gs_bin);
1083 gs.BindingTableEntryCount = get_binding_table_entry_count(gs_bin);
1084 gs.IncludeVertexHandles = gs_prog_data->base.include_vue_handles;
1085 gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
1086
1087 if (GEN_GEN == 8) {
1088 /* Broadwell is weird. It needs us to divide by 2. */
1089 gs.MaximumNumberofThreads = devinfo->max_gs_threads / 2 - 1;
1090 } else {
1091 gs.MaximumNumberofThreads = devinfo->max_gs_threads - 1;
1092 }
1093
1094 gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
1095 gs.OutputTopology = gs_prog_data->output_topology;
1096 gs.VertexURBEntryReadLength = gs_prog_data->base.urb_read_length;
1097 gs.ControlDataFormat = gs_prog_data->control_data_format;
1098 gs.ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords;
1099 gs.InstanceControl = MAX2(gs_prog_data->invocations, 1) - 1;
1100 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1101 gs.ReorderMode = TRAILING;
1102 #else
1103 gs.ReorderEnable = true;
1104 #endif
1105
1106 #if GEN_GEN >= 8
1107 gs.ExpectedVertexCount = gs_prog_data->vertices_in;
1108 gs.StaticOutput = gs_prog_data->static_vertex_count >= 0;
1109 gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count >= 0 ?
1110 gs_prog_data->static_vertex_count : 0;
1111 #endif
1112
1113 gs.VertexURBEntryReadOffset = 0;
1114 gs.VertexURBEntryReadLength = gs_prog_data->base.urb_read_length;
1115 gs.DispatchGRFStartRegisterForURBData =
1116 gs_prog_data->base.base.dispatch_grf_start_reg;
1117
1118 #if GEN_GEN >= 8
1119 gs.VertexURBEntryOutputReadOffset = get_urb_output_offset();
1120 gs.VertexURBEntryOutputLength = get_urb_output_length(gs_bin);
1121
1122 /* TODO */
1123 gs.UserClipDistanceClipTestEnableBitmask = 0;
1124 gs.UserClipDistanceCullTestEnableBitmask = 0;
1125 #endif
1126
1127 gs.PerThreadScratchSpace = get_scratch_space(gs_bin);
1128 gs.ScratchSpaceBasePointer =
1129 get_scratch_address(pipeline, MESA_SHADER_GEOMETRY, gs_bin);
1130 }
1131 }
1132
1133 static void
1134 emit_3dstate_wm(struct anv_pipeline *pipeline,
1135 const VkPipelineMultisampleStateCreateInfo *multisample)
1136 {
1137 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1138
1139 MAYBE_UNUSED uint32_t samples =
1140 multisample ? multisample->rasterizationSamples : 1;
1141
1142 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM), wm) {
1143 wm.StatisticsEnable = true;
1144 wm.LineEndCapAntialiasingRegionWidth = _05pixels;
1145 wm.LineAntialiasingRegionWidth = _10pixels;
1146 wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
1147
1148 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1149 if (wm_prog_data->early_fragment_tests) {
1150 wm.EarlyDepthStencilControl = EDSC_PREPS;
1151 } else if (wm_prog_data->has_side_effects) {
1152 wm.EarlyDepthStencilControl = EDSC_PSEXEC;
1153 } else {
1154 wm.EarlyDepthStencilControl = EDSC_NORMAL;
1155 }
1156
1157 wm.BarycentricInterpolationMode =
1158 wm_prog_data->barycentric_interp_modes;
1159
1160 #if GEN_GEN < 8
1161 /* FIXME: This needs a lot more work, cf gen7 upload_wm_state(). */
1162 wm.ThreadDispatchEnable = true;
1163
1164 wm.PixelShaderKillsPixel = wm_prog_data->uses_kill;
1165 wm.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
1166 wm.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
1167 wm.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
1168 wm.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask;
1169
1170 if (samples > 1) {
1171 wm.MultisampleRasterizationMode = MSRASTMODE_ON_PATTERN;
1172 if (wm_prog_data->persample_dispatch) {
1173 wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE;
1174 } else {
1175 wm.MultisampleDispatchMode = MSDISPMODE_PERPIXEL;
1176 }
1177 } else {
1178 wm.MultisampleRasterizationMode = MSRASTMODE_OFF_PIXEL;
1179 wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE;
1180 }
1181 #endif
1182 }
1183 }
1184 }
1185
1186 static void
1187 emit_3dstate_ps(struct anv_pipeline *pipeline)
1188 {
1189 MAYBE_UNUSED const struct gen_device_info *devinfo = &pipeline->device->info;
1190 const struct anv_shader_bin *fs_bin =
1191 pipeline->shaders[MESA_SHADER_FRAGMENT];
1192
1193 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1194 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) {
1195 #if GEN_GEN == 7
1196 /* Even if no fragments are ever dispatched, gen7 hardware hangs if
1197 * we don't at least set the maximum number of threads.
1198 */
1199 ps.MaximumNumberofThreads = devinfo->max_wm_threads - 1;
1200 #endif
1201 }
1202 return;
1203 }
1204
1205 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1206
1207 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) {
1208 ps.KernelStartPointer0 = fs_bin->kernel.offset;
1209 ps.KernelStartPointer1 = 0;
1210 ps.KernelStartPointer2 = fs_bin->kernel.offset +
1211 wm_prog_data->prog_offset_2;
1212 ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
1213 ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
1214 ps._32PixelDispatchEnable = false;
1215
1216 ps.SingleProgramFlow = false;
1217 ps.VectorMaskEnable = true;
1218 ps.SamplerCount = get_sampler_count(fs_bin);
1219 ps.BindingTableEntryCount = get_binding_table_entry_count(fs_bin);
1220 ps.PushConstantEnable = wm_prog_data->base.nr_params > 0;
1221 ps.PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ?
1222 POSOFFSET_SAMPLE: POSOFFSET_NONE;
1223 #if GEN_GEN < 8
1224 ps.AttributeEnable = wm_prog_data->num_varying_inputs > 0;
1225 ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
1226 ps.DualSourceBlendEnable = wm_prog_data->dual_src_blend;
1227 #endif
1228
1229 #if GEN_IS_HASWELL
1230 /* Haswell requires the sample mask to be set in this packet as well
1231 * as in 3DSTATE_SAMPLE_MASK; the values should match.
1232 */
1233 ps.SampleMask = 0xff;
1234 #endif
1235
1236 #if GEN_GEN >= 9
1237 ps.MaximumNumberofThreadsPerPSD = 64 - 1;
1238 #elif GEN_GEN >= 8
1239 ps.MaximumNumberofThreadsPerPSD = 64 - 2;
1240 #else
1241 ps.MaximumNumberofThreads = devinfo->max_wm_threads - 1;
1242 #endif
1243
1244 ps.DispatchGRFStartRegisterForConstantSetupData0 =
1245 wm_prog_data->base.dispatch_grf_start_reg;
1246 ps.DispatchGRFStartRegisterForConstantSetupData1 = 0;
1247 ps.DispatchGRFStartRegisterForConstantSetupData2 =
1248 wm_prog_data->dispatch_grf_start_reg_2;
1249
1250 ps.PerThreadScratchSpace = get_scratch_space(fs_bin);
1251 ps.ScratchSpaceBasePointer =
1252 get_scratch_address(pipeline, MESA_SHADER_FRAGMENT, fs_bin);
1253 }
1254 }
1255
1256 #if GEN_GEN >= 8
1257 static void
1258 emit_3dstate_ps_extra(struct anv_pipeline *pipeline)
1259 {
1260 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1261
1262 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1263 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps);
1264 return;
1265 }
1266
1267 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps) {
1268 ps.PixelShaderValid = true;
1269 ps.AttributeEnable = wm_prog_data->num_varying_inputs > 0;
1270 ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
1271 ps.PixelShaderIsPerSample = wm_prog_data->persample_dispatch;
1272 ps.PixelShaderKillsPixel = wm_prog_data->uses_kill;
1273 ps.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
1274 ps.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
1275 ps.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
1276
1277 #if GEN_GEN >= 9
1278 ps.PixelShaderPullsBary = wm_prog_data->pulls_bary;
1279 ps.InputCoverageMaskState = wm_prog_data->uses_sample_mask ?
1280 ICMS_INNER_CONSERVATIVE : ICMS_NONE;
1281 #else
1282 ps.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask;
1283 #endif
1284 }
1285 }
1286
1287 static void
1288 emit_3dstate_vf_topology(struct anv_pipeline *pipeline)
1289 {
1290 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY), vft) {
1291 vft.PrimitiveTopologyType = pipeline->topology;
1292 }
1293 }
1294 #endif
1295
1296 static VkResult
1297 genX(graphics_pipeline_create)(
1298 VkDevice _device,
1299 struct anv_pipeline_cache * cache,
1300 const VkGraphicsPipelineCreateInfo* pCreateInfo,
1301 const VkAllocationCallbacks* pAllocator,
1302 VkPipeline* pPipeline)
1303 {
1304 ANV_FROM_HANDLE(anv_device, device, _device);
1305 ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
1306 struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
1307 struct anv_pipeline *pipeline;
1308 VkResult result;
1309
1310 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1311
1312 pipeline = vk_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
1313 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1314 if (pipeline == NULL)
1315 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1316
1317 result = anv_pipeline_init(pipeline, device, cache,
1318 pCreateInfo, pAllocator);
1319 if (result != VK_SUCCESS) {
1320 vk_free2(&device->alloc, pAllocator, pipeline);
1321 return result;
1322 }
1323
1324 assert(pCreateInfo->pVertexInputState);
1325 emit_vertex_input(pipeline, pCreateInfo->pVertexInputState);
1326 assert(pCreateInfo->pRasterizationState);
1327 emit_rs_state(pipeline, pCreateInfo->pRasterizationState,
1328 pCreateInfo->pMultisampleState, pass, subpass);
1329 emit_ms_state(pipeline, pCreateInfo->pMultisampleState);
1330 emit_ds_state(pipeline, pCreateInfo->pDepthStencilState, pass, subpass);
1331 emit_cb_state(pipeline, pCreateInfo->pColorBlendState,
1332 pCreateInfo->pMultisampleState);
1333
1334 emit_urb_setup(pipeline);
1335
1336 emit_3dstate_clip(pipeline, pCreateInfo->pViewportState,
1337 pCreateInfo->pRasterizationState);
1338 emit_3dstate_streamout(pipeline, pCreateInfo->pRasterizationState);
1339
1340 #if 0
1341 /* From gen7_vs_state.c */
1342
1343 /**
1344 * From Graphics BSpec: 3D-Media-GPGPU Engine > 3D Pipeline Stages >
1345 * Geometry > Geometry Shader > State:
1346 *
1347 * "Note: Because of corruption in IVB:GT2, software needs to flush the
1348 * whole fixed function pipeline when the GS enable changes value in
1349 * the 3DSTATE_GS."
1350 *
1351 * The hardware architects have clarified that in this context "flush the
1352 * whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS
1353 * Stall" bit set.
1354 */
1355 if (!brw->is_haswell && !brw->is_baytrail)
1356 gen7_emit_vs_workaround_flush(brw);
1357 #endif
1358
1359 emit_3dstate_vs(pipeline);
1360 emit_3dstate_gs(pipeline);
1361 emit_3dstate_sbe(pipeline);
1362 emit_3dstate_wm(pipeline, pCreateInfo->pMultisampleState);
1363 emit_3dstate_ps(pipeline);
1364 #if GEN_GEN >= 8
1365 emit_3dstate_ps_extra(pipeline);
1366 emit_3dstate_vf_topology(pipeline);
1367 #endif
1368
1369 *pPipeline = anv_pipeline_to_handle(pipeline);
1370
1371 return VK_SUCCESS;
1372 }
1373
1374 static VkResult
1375 compute_pipeline_create(
1376 VkDevice _device,
1377 struct anv_pipeline_cache * cache,
1378 const VkComputePipelineCreateInfo* pCreateInfo,
1379 const VkAllocationCallbacks* pAllocator,
1380 VkPipeline* pPipeline)
1381 {
1382 ANV_FROM_HANDLE(anv_device, device, _device);
1383 const struct anv_physical_device *physical_device =
1384 &device->instance->physicalDevice;
1385 const struct gen_device_info *devinfo = &physical_device->info;
1386 struct anv_pipeline *pipeline;
1387 VkResult result;
1388
1389 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
1390
1391 pipeline = vk_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
1392 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1393 if (pipeline == NULL)
1394 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1395
1396 pipeline->device = device;
1397 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
1398
1399 pipeline->blend_state.map = NULL;
1400
1401 result = anv_reloc_list_init(&pipeline->batch_relocs,
1402 pAllocator ? pAllocator : &device->alloc);
1403 if (result != VK_SUCCESS) {
1404 vk_free2(&device->alloc, pAllocator, pipeline);
1405 return result;
1406 }
1407 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1408 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1409 pipeline->batch.relocs = &pipeline->batch_relocs;
1410
1411 /* When we free the pipeline, we detect stages based on the NULL status
1412 * of various prog_data pointers. Make them NULL by default.
1413 */
1414 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1415
1416 pipeline->active_stages = 0;
1417
1418 pipeline->needs_data_cache = false;
1419
1420 assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT);
1421 ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->stage.module);
1422 result = anv_pipeline_compile_cs(pipeline, cache, pCreateInfo, module,
1423 pCreateInfo->stage.pName,
1424 pCreateInfo->stage.pSpecializationInfo);
1425 if (result != VK_SUCCESS) {
1426 vk_free2(&device->alloc, pAllocator, pipeline);
1427 return result;
1428 }
1429
1430 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
1431
1432 anv_pipeline_setup_l3_config(pipeline, cs_prog_data->base.total_shared > 0);
1433
1434 uint32_t group_size = cs_prog_data->local_size[0] *
1435 cs_prog_data->local_size[1] * cs_prog_data->local_size[2];
1436 uint32_t remainder = group_size & (cs_prog_data->simd_size - 1);
1437
1438 if (remainder > 0)
1439 pipeline->cs_right_mask = ~0u >> (32 - remainder);
1440 else
1441 pipeline->cs_right_mask = ~0u >> (32 - cs_prog_data->simd_size);
1442
1443 const uint32_t vfe_curbe_allocation =
1444 ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
1445 cs_prog_data->push.cross_thread.regs, 2);
1446
1447 const uint32_t subslices = MAX2(physical_device->subslice_total, 1);
1448
1449 const struct anv_shader_bin *cs_bin =
1450 pipeline->shaders[MESA_SHADER_COMPUTE];
1451
1452 anv_batch_emit(&pipeline->batch, GENX(MEDIA_VFE_STATE), vfe) {
1453 #if GEN_GEN > 7
1454 vfe.StackSize = 0;
1455 #else
1456 vfe.GPGPUMode = true;
1457 #endif
1458 vfe.MaximumNumberofThreads =
1459 devinfo->max_cs_threads * subslices - 1;
1460 vfe.NumberofURBEntries = GEN_GEN <= 7 ? 0 : 2;
1461 vfe.ResetGatewayTimer = true;
1462 #if GEN_GEN <= 8
1463 vfe.BypassGatewayControl = true;
1464 #endif
1465 vfe.URBEntryAllocationSize = GEN_GEN <= 7 ? 0 : 2;
1466 vfe.CURBEAllocationSize = vfe_curbe_allocation;
1467
1468 vfe.PerThreadScratchSpace = get_scratch_space(cs_bin);
1469 vfe.ScratchSpaceBasePointer =
1470 get_scratch_address(pipeline, MESA_SHADER_COMPUTE, cs_bin);
1471 }
1472
1473 struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = {
1474 .KernelStartPointer = cs_bin->kernel.offset,
1475
1476 .SamplerCount = get_sampler_count(cs_bin),
1477 .BindingTableEntryCount = get_binding_table_entry_count(cs_bin),
1478 .BarrierEnable = cs_prog_data->uses_barrier,
1479 .SharedLocalMemorySize =
1480 encode_slm_size(GEN_GEN, cs_prog_data->base.total_shared),
1481
1482 #if !GEN_IS_HASWELL
1483 .ConstantURBEntryReadOffset = 0,
1484 #endif
1485 .ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs,
1486 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1487 .CrossThreadConstantDataReadLength =
1488 cs_prog_data->push.cross_thread.regs,
1489 #endif
1490
1491 .NumberofThreadsinGPGPUThreadGroup = cs_prog_data->threads,
1492 };
1493 GENX(INTERFACE_DESCRIPTOR_DATA_pack)(NULL,
1494 pipeline->interface_descriptor_data,
1495 &desc);
1496
1497 *pPipeline = anv_pipeline_to_handle(pipeline);
1498
1499 return VK_SUCCESS;
1500 }
1501
1502 VkResult genX(CreateGraphicsPipelines)(
1503 VkDevice _device,
1504 VkPipelineCache pipelineCache,
1505 uint32_t count,
1506 const VkGraphicsPipelineCreateInfo* pCreateInfos,
1507 const VkAllocationCallbacks* pAllocator,
1508 VkPipeline* pPipelines)
1509 {
1510 ANV_FROM_HANDLE(anv_pipeline_cache, pipeline_cache, pipelineCache);
1511
1512 VkResult result = VK_SUCCESS;
1513
1514 unsigned i = 0;
1515 for (; i < count; i++) {
1516 result = genX(graphics_pipeline_create)(_device,
1517 pipeline_cache,
1518 &pCreateInfos[i],
1519 pAllocator, &pPipelines[i]);
1520 if (result != VK_SUCCESS) {
1521 for (unsigned j = 0; j < i; j++) {
1522 anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
1523 }
1524
1525 return result;
1526 }
1527 }
1528
1529 return VK_SUCCESS;
1530 }
1531
1532 VkResult genX(CreateComputePipelines)(
1533 VkDevice _device,
1534 VkPipelineCache pipelineCache,
1535 uint32_t count,
1536 const VkComputePipelineCreateInfo* pCreateInfos,
1537 const VkAllocationCallbacks* pAllocator,
1538 VkPipeline* pPipelines)
1539 {
1540 ANV_FROM_HANDLE(anv_pipeline_cache, pipeline_cache, pipelineCache);
1541
1542 VkResult result = VK_SUCCESS;
1543
1544 unsigned i = 0;
1545 for (; i < count; i++) {
1546 result = compute_pipeline_create(_device, pipeline_cache,
1547 &pCreateInfos[i],
1548 pAllocator, &pPipelines[i]);
1549 if (result != VK_SUCCESS) {
1550 for (unsigned j = 0; j < i; j++) {
1551 anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
1552 }
1553
1554 return result;
1555 }
1556 }
1557
1558 return VK_SUCCESS;
1559 }