anv: Switch over to the macros in genxml
[mesa.git] / src / intel / vulkan / gen8_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
34
35 #include "genX_pipeline_util.h"
36
37 static void
38 emit_ia_state(struct anv_pipeline *pipeline,
39 const VkPipelineInputAssemblyStateCreateInfo *info,
40 const struct anv_graphics_pipeline_create_info *extra)
41 {
42 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY),
43 .PrimitiveTopologyType = pipeline->topology);
44 }
45
46 static void
47 emit_rs_state(struct anv_pipeline *pipeline,
48 const VkPipelineRasterizationStateCreateInfo *info,
49 const VkPipelineMultisampleStateCreateInfo *ms_info,
50 const struct anv_graphics_pipeline_create_info *extra)
51 {
52 uint32_t samples = 1;
53
54 if (ms_info)
55 samples = ms_info->rasterizationSamples;
56
57 struct GENX(3DSTATE_SF) sf = {
58 GENX(3DSTATE_SF_header),
59 .ViewportTransformEnable = !(extra && extra->disable_viewport),
60 .TriangleStripListProvokingVertexSelect = 0,
61 .LineStripListProvokingVertexSelect = 0,
62 .TriangleFanProvokingVertexSelect = 0,
63 .PointWidthSource = pipeline->writes_point_size ? Vertex : State,
64 .PointWidth = 1.0,
65 };
66
67 /* FINISHME: VkBool32 rasterizerDiscardEnable; */
68
69 GENX(3DSTATE_SF_pack)(NULL, pipeline->gen8.sf, &sf);
70
71 struct GENX(3DSTATE_RASTER) raster = {
72 GENX(3DSTATE_RASTER_header),
73
74 /* For details on 3DSTATE_RASTER multisample state, see the BSpec table
75 * "Multisample Modes State".
76 */
77 .DXMultisampleRasterizationEnable = samples > 1,
78 .ForcedSampleCount = FSC_NUMRASTSAMPLES_0,
79 .ForceMultisampling = false,
80
81 .FrontWinding = vk_to_gen_front_face[info->frontFace],
82 .CullMode = vk_to_gen_cullmode[info->cullMode],
83 .FrontFaceFillMode = vk_to_gen_fillmode[info->polygonMode],
84 .BackFaceFillMode = vk_to_gen_fillmode[info->polygonMode],
85 .ScissorRectangleEnable = !(extra && extra->disable_scissor),
86 #if GEN_GEN == 8
87 .ViewportZClipTestEnable = true,
88 #else
89 /* GEN9+ splits ViewportZClipTestEnable into near and far enable bits */
90 .ViewportZFarClipTestEnable = true,
91 .ViewportZNearClipTestEnable = true,
92 #endif
93 };
94
95 GENX(3DSTATE_RASTER_pack)(NULL, pipeline->gen8.raster, &raster);
96 }
97
98 static void
99 emit_cb_state(struct anv_pipeline *pipeline,
100 const VkPipelineColorBlendStateCreateInfo *info,
101 const VkPipelineMultisampleStateCreateInfo *ms_info)
102 {
103 struct anv_device *device = pipeline->device;
104
105 uint32_t num_dwords = GENX(BLEND_STATE_length);
106 pipeline->blend_state =
107 anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64);
108
109 struct GENX(BLEND_STATE) blend_state = {
110 .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable,
111 .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable,
112 };
113
114 for (uint32_t i = 0; i < info->attachmentCount; i++) {
115 const VkPipelineColorBlendAttachmentState *a = &info->pAttachments[i];
116
117 if (a->srcColorBlendFactor != a->srcAlphaBlendFactor ||
118 a->dstColorBlendFactor != a->dstAlphaBlendFactor ||
119 a->colorBlendOp != a->alphaBlendOp) {
120 blend_state.IndependentAlphaBlendEnable = true;
121 }
122
123 blend_state.Entry[i] = (struct GENX(BLEND_STATE_ENTRY)) {
124 .LogicOpEnable = info->logicOpEnable,
125 .LogicOpFunction = vk_to_gen_logic_op[info->logicOp],
126 .ColorBufferBlendEnable = a->blendEnable,
127 .PreBlendSourceOnlyClampEnable = false,
128 .ColorClampRange = COLORCLAMP_RTFORMAT,
129 .PreBlendColorClampEnable = true,
130 .PostBlendColorClampEnable = true,
131 .SourceBlendFactor = vk_to_gen_blend[a->srcColorBlendFactor],
132 .DestinationBlendFactor = vk_to_gen_blend[a->dstColorBlendFactor],
133 .ColorBlendFunction = vk_to_gen_blend_op[a->colorBlendOp],
134 .SourceAlphaBlendFactor = vk_to_gen_blend[a->srcAlphaBlendFactor],
135 .DestinationAlphaBlendFactor = vk_to_gen_blend[a->dstAlphaBlendFactor],
136 .AlphaBlendFunction = vk_to_gen_blend_op[a->alphaBlendOp],
137 .WriteDisableAlpha = !(a->colorWriteMask & VK_COLOR_COMPONENT_A_BIT),
138 .WriteDisableRed = !(a->colorWriteMask & VK_COLOR_COMPONENT_R_BIT),
139 .WriteDisableGreen = !(a->colorWriteMask & VK_COLOR_COMPONENT_G_BIT),
140 .WriteDisableBlue = !(a->colorWriteMask & VK_COLOR_COMPONENT_B_BIT),
141 };
142
143 /* Our hardware applies the blend factor prior to the blend function
144 * regardless of what function is used. Technically, this means the
145 * hardware can do MORE than GL or Vulkan specify. However, it also
146 * means that, for MIN and MAX, we have to stomp the blend factor to
147 * ONE to make it a no-op.
148 */
149 if (a->colorBlendOp == VK_BLEND_OP_MIN ||
150 a->colorBlendOp == VK_BLEND_OP_MAX) {
151 blend_state.Entry[i].SourceBlendFactor = BLENDFACTOR_ONE;
152 blend_state.Entry[i].DestinationBlendFactor = BLENDFACTOR_ONE;
153 }
154 if (a->alphaBlendOp == VK_BLEND_OP_MIN ||
155 a->alphaBlendOp == VK_BLEND_OP_MAX) {
156 blend_state.Entry[i].SourceAlphaBlendFactor = BLENDFACTOR_ONE;
157 blend_state.Entry[i].DestinationAlphaBlendFactor = BLENDFACTOR_ONE;
158 }
159 }
160
161 for (uint32_t i = info->attachmentCount; i < 8; i++) {
162 blend_state.Entry[i].WriteDisableAlpha = true;
163 blend_state.Entry[i].WriteDisableRed = true;
164 blend_state.Entry[i].WriteDisableGreen = true;
165 blend_state.Entry[i].WriteDisableBlue = true;
166 }
167
168 GENX(BLEND_STATE_pack)(NULL, pipeline->blend_state.map, &blend_state);
169 if (!device->info.has_llc)
170 anv_state_clflush(pipeline->blend_state);
171
172 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS),
173 .BlendStatePointer = pipeline->blend_state.offset,
174 .BlendStatePointerValid = true);
175 }
176
177 static void
178 emit_ds_state(struct anv_pipeline *pipeline,
179 const VkPipelineDepthStencilStateCreateInfo *info)
180 {
181 uint32_t *dw = GEN_GEN == 8 ?
182 pipeline->gen8.wm_depth_stencil : pipeline->gen9.wm_depth_stencil;
183
184 if (info == NULL) {
185 /* We're going to OR this together with the dynamic state. We need
186 * to make sure it's initialized to something useful.
187 */
188 memset(pipeline->gen8.wm_depth_stencil, 0,
189 sizeof(pipeline->gen8.wm_depth_stencil));
190 memset(pipeline->gen9.wm_depth_stencil, 0,
191 sizeof(pipeline->gen9.wm_depth_stencil));
192 return;
193 }
194
195 /* VkBool32 depthBoundsTestEnable; // optional (depth_bounds_test) */
196
197 struct GENX(3DSTATE_WM_DEPTH_STENCIL) wm_depth_stencil = {
198 .DepthTestEnable = info->depthTestEnable,
199 .DepthBufferWriteEnable = info->depthWriteEnable,
200 .DepthTestFunction = vk_to_gen_compare_op[info->depthCompareOp],
201 .DoubleSidedStencilEnable = true,
202
203 .StencilTestEnable = info->stencilTestEnable,
204 .StencilFailOp = vk_to_gen_stencil_op[info->front.failOp],
205 .StencilPassDepthPassOp = vk_to_gen_stencil_op[info->front.passOp],
206 .StencilPassDepthFailOp = vk_to_gen_stencil_op[info->front.depthFailOp],
207 .StencilTestFunction = vk_to_gen_compare_op[info->front.compareOp],
208 .BackfaceStencilFailOp = vk_to_gen_stencil_op[info->back.failOp],
209 .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info->back.passOp],
210 .BackfaceStencilPassDepthFailOp =vk_to_gen_stencil_op[info->back.depthFailOp],
211 .BackfaceStencilTestFunction = vk_to_gen_compare_op[info->back.compareOp],
212 };
213
214 GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, dw, &wm_depth_stencil);
215 }
216
217 static void
218 emit_ms_state(struct anv_pipeline *pipeline,
219 const VkPipelineMultisampleStateCreateInfo *info)
220 {
221 uint32_t samples = 1;
222 uint32_t log2_samples = 0;
223
224 /* From the Vulkan 1.0 spec:
225 * If pSampleMask is NULL, it is treated as if the mask has all bits
226 * enabled, i.e. no coverage is removed from fragments.
227 *
228 * 3DSTATE_SAMPLE_MASK.SampleMask is 16 bits.
229 */
230 uint32_t sample_mask = 0xffff;
231
232 if (info) {
233 samples = info->rasterizationSamples;
234 log2_samples = __builtin_ffs(samples) - 1;
235 }
236
237 if (info && info->pSampleMask)
238 sample_mask &= info->pSampleMask[0];
239
240 if (info && info->sampleShadingEnable)
241 anv_finishme("VkPipelineMultisampleStateCreateInfo::sampleShadingEnable");
242
243 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE),
244
245 /* The PRM says that this bit is valid only for DX9:
246 *
247 * SW can choose to set this bit only for DX9 API. DX10/OGL API's
248 * should not have any effect by setting or not setting this bit.
249 */
250 .PixelPositionOffsetEnable = false,
251
252 .PixelLocation = CENTER,
253 .NumberofMultisamples = log2_samples);
254
255 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK),
256 .SampleMask = sample_mask);
257 }
258
259 VkResult
260 genX(graphics_pipeline_create)(
261 VkDevice _device,
262 struct anv_pipeline_cache * cache,
263 const VkGraphicsPipelineCreateInfo* pCreateInfo,
264 const struct anv_graphics_pipeline_create_info *extra,
265 const VkAllocationCallbacks* pAllocator,
266 VkPipeline* pPipeline)
267 {
268 ANV_FROM_HANDLE(anv_device, device, _device);
269 struct anv_pipeline *pipeline;
270 VkResult result;
271 uint32_t offset, length;
272
273 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
274
275 pipeline = anv_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
276 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
277 if (pipeline == NULL)
278 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
279
280 result = anv_pipeline_init(pipeline, device, cache,
281 pCreateInfo, extra, pAllocator);
282 if (result != VK_SUCCESS) {
283 anv_free2(&device->alloc, pAllocator, pipeline);
284 return result;
285 }
286
287 assert(pCreateInfo->pVertexInputState);
288 emit_vertex_input(pipeline, pCreateInfo->pVertexInputState, extra);
289 assert(pCreateInfo->pInputAssemblyState);
290 emit_ia_state(pipeline, pCreateInfo->pInputAssemblyState, extra);
291 assert(pCreateInfo->pRasterizationState);
292 emit_rs_state(pipeline, pCreateInfo->pRasterizationState,
293 pCreateInfo->pMultisampleState, extra);
294 emit_ms_state(pipeline, pCreateInfo->pMultisampleState);
295 emit_ds_state(pipeline, pCreateInfo->pDepthStencilState);
296 emit_cb_state(pipeline, pCreateInfo->pColorBlendState,
297 pCreateInfo->pMultisampleState);
298
299 emit_urb_setup(pipeline);
300
301 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP),
302 .ClipEnable = true,
303 .ViewportXYClipTestEnable = !(extra && extra->disable_viewport),
304 .MinimumPointWidth = 0.125,
305 .MaximumPointWidth = 255.875,
306 .MaximumVPIndex = pCreateInfo->pViewportState->viewportCount - 1);
307
308 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM),
309 .StatisticsEnable = true,
310 .LineEndCapAntialiasingRegionWidth = _05pixels,
311 .LineAntialiasingRegionWidth = _10pixels,
312 .EarlyDepthStencilControl = NORMAL,
313 .ForceThreadDispatchEnable = NORMAL,
314 .PointRasterizationRule = RASTRULE_UPPER_RIGHT,
315 .BarycentricInterpolationMode =
316 pipeline->ps_ksp0 == NO_KERNEL ?
317 0 : pipeline->wm_prog_data.barycentric_interp_modes);
318
319 const struct brw_gs_prog_data *gs_prog_data = &pipeline->gs_prog_data;
320 offset = 1;
321 length = (gs_prog_data->base.vue_map.num_slots + 1) / 2 - offset;
322
323 if (pipeline->gs_kernel == NO_KERNEL)
324 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), .Enable = false);
325 else
326 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS),
327 .SingleProgramFlow = false,
328 .KernelStartPointer = pipeline->gs_kernel,
329 .VectorMaskEnable = false,
330 .SamplerCount = 0,
331 .BindingTableEntryCount = 0,
332 .ExpectedVertexCount = gs_prog_data->vertices_in,
333
334 .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_GEOMETRY],
335 .PerThreadScratchSpace = scratch_space(&gs_prog_data->base.base),
336
337 .OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1,
338 .OutputTopology = gs_prog_data->output_topology,
339 .VertexURBEntryReadLength = gs_prog_data->base.urb_read_length,
340 .IncludeVertexHandles = gs_prog_data->base.include_vue_handles,
341 .DispatchGRFStartRegisterForURBData =
342 gs_prog_data->base.base.dispatch_grf_start_reg,
343
344 .MaximumNumberofThreads = device->info.max_gs_threads / 2 - 1,
345 .ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords,
346 .DispatchMode = gs_prog_data->base.dispatch_mode,
347 .StatisticsEnable = true,
348 .IncludePrimitiveID = gs_prog_data->include_primitive_id,
349 .ReorderMode = TRAILING,
350 .Enable = true,
351
352 .ControlDataFormat = gs_prog_data->control_data_format,
353
354 .StaticOutput = gs_prog_data->static_vertex_count >= 0,
355 .StaticOutputVertexCount =
356 gs_prog_data->static_vertex_count >= 0 ?
357 gs_prog_data->static_vertex_count : 0,
358
359 /* FIXME: mesa sets this based on ctx->Transform.ClipPlanesEnabled:
360 * UserClipDistanceClipTestEnableBitmask_3DSTATE_GS(v)
361 * UserClipDistanceCullTestEnableBitmask(v)
362 */
363
364 .VertexURBEntryOutputReadOffset = offset,
365 .VertexURBEntryOutputLength = length);
366
367 const struct brw_vue_prog_data *vue_prog_data = &pipeline->vs_prog_data.base;
368 /* Skip the VUE header and position slots */
369 offset = 1;
370 length = (vue_prog_data->vue_map.num_slots + 1) / 2 - offset;
371
372 uint32_t vs_start = pipeline->vs_simd8 != NO_KERNEL ? pipeline->vs_simd8 :
373 pipeline->vs_vec4;
374
375 if (vs_start == NO_KERNEL || (extra && extra->disable_vs))
376 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS),
377 .FunctionEnable = false,
378 /* Even if VS is disabled, SBE still gets the amount of
379 * vertex data to read from this field. */
380 .VertexURBEntryOutputReadOffset = offset,
381 .VertexURBEntryOutputLength = length);
382 else
383 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS),
384 .KernelStartPointer = vs_start,
385 .SingleVertexDispatch = false,
386 .VectorMaskEnable = false,
387 .SamplerCount = 0,
388 .BindingTableEntryCount =
389 vue_prog_data->base.binding_table.size_bytes / 4,
390 .ThreadDispatchPriority = false,
391 .FloatingPointMode = IEEE754,
392 .IllegalOpcodeExceptionEnable = false,
393 .AccessesUAV = false,
394 .SoftwareExceptionEnable = false,
395
396 .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_VERTEX],
397 .PerThreadScratchSpace = scratch_space(&vue_prog_data->base),
398
399 .DispatchGRFStartRegisterForURBData =
400 vue_prog_data->base.dispatch_grf_start_reg,
401 .VertexURBEntryReadLength = vue_prog_data->urb_read_length,
402 .VertexURBEntryReadOffset = 0,
403
404 .MaximumNumberofThreads = device->info.max_vs_threads - 1,
405 .StatisticsEnable = false,
406 .SIMD8DispatchEnable = pipeline->vs_simd8 != NO_KERNEL,
407 .VertexCacheDisable = false,
408 .FunctionEnable = true,
409
410 .VertexURBEntryOutputReadOffset = offset,
411 .VertexURBEntryOutputLength = length,
412 .UserClipDistanceClipTestEnableBitmask = 0,
413 .UserClipDistanceCullTestEnableBitmask = 0);
414
415 const struct brw_wm_prog_data *wm_prog_data = &pipeline->wm_prog_data;
416
417 const int num_thread_bias = GEN_GEN == 8 ? 2 : 1;
418 if (pipeline->ps_ksp0 == NO_KERNEL) {
419 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS));
420 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA),
421 .PixelShaderValid = false);
422 } else {
423 /* TODO: We should clean this up. Among other things, this is mostly
424 * shared with other gens.
425 */
426 const struct brw_vue_map *fs_input_map;
427 if (pipeline->gs_kernel == NO_KERNEL)
428 fs_input_map = &vue_prog_data->vue_map;
429 else
430 fs_input_map = &gs_prog_data->base.vue_map;
431
432 struct GENX(3DSTATE_SBE_SWIZ) swiz = {
433 GENX(3DSTATE_SBE_SWIZ_header),
434 };
435
436 int max_source_attr = 0;
437 for (int attr = 0; attr < VARYING_SLOT_MAX; attr++) {
438 int input_index = wm_prog_data->urb_setup[attr];
439
440 if (input_index < 0)
441 continue;
442
443 int source_attr = fs_input_map->varying_to_slot[attr];
444 max_source_attr = MAX2(max_source_attr, source_attr);
445
446 if (input_index >= 16)
447 continue;
448
449 if (source_attr == -1) {
450 /* This attribute does not exist in the VUE--that means that the
451 * vertex shader did not write to it. It could be that it's a
452 * regular varying read by the fragment shader but not written by
453 * the vertex shader or it's gl_PrimitiveID. In the first case the
454 * value is undefined, in the second it needs to be
455 * gl_PrimitiveID.
456 */
457 swiz.Attribute[input_index].ConstantSource = PRIM_ID;
458 swiz.Attribute[input_index].ComponentOverrideX = true;
459 swiz.Attribute[input_index].ComponentOverrideY = true;
460 swiz.Attribute[input_index].ComponentOverrideZ = true;
461 swiz.Attribute[input_index].ComponentOverrideW = true;
462 } else {
463 /* We have to subtract two slots to accout for the URB entry output
464 * read offset in the VS and GS stages.
465 */
466 swiz.Attribute[input_index].SourceAttribute = source_attr - 2;
467 }
468 }
469
470 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE),
471 .AttributeSwizzleEnable = true,
472 .ForceVertexURBEntryReadLength = false,
473 .ForceVertexURBEntryReadOffset = false,
474 .VertexURBEntryReadLength =
475 DIV_ROUND_UP(max_source_attr + 1, 2),
476 .PointSpriteTextureCoordinateOrigin = UPPERLEFT,
477 .NumberofSFOutputAttributes =
478 wm_prog_data->num_varying_inputs,
479
480 #if GEN_GEN >= 9
481 .Attribute0ActiveComponentFormat = ACF_XYZW,
482 .Attribute1ActiveComponentFormat = ACF_XYZW,
483 .Attribute2ActiveComponentFormat = ACF_XYZW,
484 .Attribute3ActiveComponentFormat = ACF_XYZW,
485 .Attribute4ActiveComponentFormat = ACF_XYZW,
486 .Attribute5ActiveComponentFormat = ACF_XYZW,
487 .Attribute6ActiveComponentFormat = ACF_XYZW,
488 .Attribute7ActiveComponentFormat = ACF_XYZW,
489 .Attribute8ActiveComponentFormat = ACF_XYZW,
490 .Attribute9ActiveComponentFormat = ACF_XYZW,
491 .Attribute10ActiveComponentFormat = ACF_XYZW,
492 .Attribute11ActiveComponentFormat = ACF_XYZW,
493 .Attribute12ActiveComponentFormat = ACF_XYZW,
494 .Attribute13ActiveComponentFormat = ACF_XYZW,
495 .Attribute14ActiveComponentFormat = ACF_XYZW,
496 .Attribute15ActiveComponentFormat = ACF_XYZW,
497 /* wow, much field, very attribute */
498 .Attribute16ActiveComponentFormat = ACF_XYZW,
499 .Attribute17ActiveComponentFormat = ACF_XYZW,
500 .Attribute18ActiveComponentFormat = ACF_XYZW,
501 .Attribute19ActiveComponentFormat = ACF_XYZW,
502 .Attribute20ActiveComponentFormat = ACF_XYZW,
503 .Attribute21ActiveComponentFormat = ACF_XYZW,
504 .Attribute22ActiveComponentFormat = ACF_XYZW,
505 .Attribute23ActiveComponentFormat = ACF_XYZW,
506 .Attribute24ActiveComponentFormat = ACF_XYZW,
507 .Attribute25ActiveComponentFormat = ACF_XYZW,
508 .Attribute26ActiveComponentFormat = ACF_XYZW,
509 .Attribute27ActiveComponentFormat = ACF_XYZW,
510 .Attribute28ActiveComponentFormat = ACF_XYZW,
511 .Attribute29ActiveComponentFormat = ACF_XYZW,
512 .Attribute28ActiveComponentFormat = ACF_XYZW,
513 .Attribute29ActiveComponentFormat = ACF_XYZW,
514 .Attribute30ActiveComponentFormat = ACF_XYZW,
515 #endif
516 );
517
518 uint32_t *dw = anv_batch_emit_dwords(&pipeline->batch,
519 GENX(3DSTATE_SBE_SWIZ_length));
520 GENX(3DSTATE_SBE_SWIZ_pack)(&pipeline->batch, dw, &swiz);
521
522 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS),
523 .KernelStartPointer0 = pipeline->ps_ksp0,
524
525 .SingleProgramFlow = false,
526 .VectorMaskEnable = true,
527 .SamplerCount = 1,
528
529 .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_FRAGMENT],
530 .PerThreadScratchSpace = scratch_space(&wm_prog_data->base),
531
532 .MaximumNumberofThreadsPerPSD = 64 - num_thread_bias,
533 .PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ?
534 POSOFFSET_SAMPLE: POSOFFSET_NONE,
535 .PushConstantEnable = wm_prog_data->base.nr_params > 0,
536 ._8PixelDispatchEnable = pipeline->ps_simd8 != NO_KERNEL,
537 ._16PixelDispatchEnable = pipeline->ps_simd16 != NO_KERNEL,
538 ._32PixelDispatchEnable = false,
539
540 .DispatchGRFStartRegisterForConstantSetupData0 = pipeline->ps_grf_start0,
541 .DispatchGRFStartRegisterForConstantSetupData1 = 0,
542 .DispatchGRFStartRegisterForConstantSetupData2 = pipeline->ps_grf_start2,
543
544 .KernelStartPointer1 = 0,
545 .KernelStartPointer2 = pipeline->ps_ksp2);
546
547 bool per_sample_ps = pCreateInfo->pMultisampleState &&
548 pCreateInfo->pMultisampleState->sampleShadingEnable;
549
550 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA),
551 .PixelShaderValid = true,
552 .PixelShaderKillsPixel = wm_prog_data->uses_kill,
553 .PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode,
554 .AttributeEnable = wm_prog_data->num_varying_inputs > 0,
555 .oMaskPresenttoRenderTarget = wm_prog_data->uses_omask,
556 .PixelShaderIsPerSample = per_sample_ps,
557 .PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth,
558 .PixelShaderUsesSourceW = wm_prog_data->uses_src_w,
559 #if GEN_GEN >= 9
560 .PixelShaderPullsBary = wm_prog_data->pulls_bary,
561 .InputCoverageMaskState = wm_prog_data->uses_sample_mask ?
562 ICMS_INNER_CONSERVATIVE : ICMS_NONE,
563 #else
564 .PixelShaderUsesInputCoverageMask =
565 wm_prog_data->uses_sample_mask,
566 #endif
567 );
568 }
569
570 *pPipeline = anv_pipeline_to_handle(pipeline);
571
572 return VK_SUCCESS;
573 }