Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / vulkan / gen8_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "gen8_pack.h"
33 #include "gen9_pack.h"
34
35 static void
36 emit_vertex_input(struct anv_pipeline *pipeline,
37 const VkPipelineVertexInputStateCreateInfo *info)
38 {
39 const uint32_t num_dwords = 1 + info->vertexAttributeDescriptionCount * 2;
40 uint32_t *p;
41
42 static_assert(ANV_GEN >= 8, "should be compiling this for gen < 8");
43
44 if (info->vertexAttributeDescriptionCount > 0) {
45 p = anv_batch_emitn(&pipeline->batch, num_dwords,
46 GENX(3DSTATE_VERTEX_ELEMENTS));
47 }
48
49 for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) {
50 const VkVertexInputAttributeDescription *desc =
51 &info->pVertexAttributeDescriptions[i];
52 const struct anv_format *format = anv_format_for_vk_format(desc->format);
53
54 struct GENX(VERTEX_ELEMENT_STATE) element = {
55 .VertexBufferIndex = desc->binding,
56 .Valid = true,
57 .SourceElementFormat = format->surface_format,
58 .EdgeFlagEnable = false,
59 .SourceElementOffset = desc->offset,
60 .Component0Control = VFCOMP_STORE_SRC,
61 .Component1Control = format->num_channels >= 2 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
62 .Component2Control = format->num_channels >= 3 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
63 .Component3Control = format->num_channels >= 4 ? VFCOMP_STORE_SRC : VFCOMP_STORE_1_FP
64 };
65 GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + i * 2], &element);
66
67 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING),
68 .InstancingEnable = pipeline->instancing_enable[desc->binding],
69 .VertexElementIndex = i,
70 /* Vulkan so far doesn't have an instance divisor, so
71 * this is always 1 (ignored if not instancing). */
72 .InstanceDataStepRate = 1);
73 }
74
75 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_SGVS),
76 .VertexIDEnable = pipeline->vs_prog_data.uses_vertexid,
77 .VertexIDComponentNumber = 2,
78 .VertexIDElementOffset = info->vertexBindingDescriptionCount,
79 .InstanceIDEnable = pipeline->vs_prog_data.uses_instanceid,
80 .InstanceIDComponentNumber = 3,
81 .InstanceIDElementOffset = info->vertexBindingDescriptionCount);
82 }
83
84 static void
85 emit_ia_state(struct anv_pipeline *pipeline,
86 const VkPipelineInputAssemblyStateCreateInfo *info,
87 const struct anv_graphics_pipeline_create_info *extra)
88 {
89 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY),
90 .PrimitiveTopologyType = pipeline->topology);
91 }
92
93 static void
94 emit_rs_state(struct anv_pipeline *pipeline,
95 const VkPipelineRasterizationStateCreateInfo *info,
96 const struct anv_graphics_pipeline_create_info *extra)
97 {
98 static const uint32_t vk_to_gen_cullmode[] = {
99 [VK_CULL_MODE_NONE] = CULLMODE_NONE,
100 [VK_CULL_MODE_FRONT_BIT] = CULLMODE_FRONT,
101 [VK_CULL_MODE_BACK_BIT] = CULLMODE_BACK,
102 [VK_CULL_MODE_FRONT_AND_BACK] = CULLMODE_BOTH
103 };
104
105 static const uint32_t vk_to_gen_fillmode[] = {
106 [VK_POLYGON_MODE_FILL] = RASTER_SOLID,
107 [VK_POLYGON_MODE_LINE] = RASTER_WIREFRAME,
108 [VK_POLYGON_MODE_POINT] = RASTER_POINT,
109 };
110
111 static const uint32_t vk_to_gen_front_face[] = {
112 [VK_FRONT_FACE_COUNTER_CLOCKWISE] = 1,
113 [VK_FRONT_FACE_CLOCKWISE] = 0
114 };
115
116 struct GENX(3DSTATE_SF) sf = {
117 GENX(3DSTATE_SF_header),
118 .ViewportTransformEnable = !(extra && extra->disable_viewport),
119 .TriangleStripListProvokingVertexSelect = 0,
120 .LineStripListProvokingVertexSelect = 0,
121 .TriangleFanProvokingVertexSelect = 0,
122 .PointWidthSource = pipeline->writes_point_size ? Vertex : State,
123 .PointWidth = 1.0,
124 };
125
126 /* FINISHME: VkBool32 rasterizerDiscardEnable; */
127
128 GENX(3DSTATE_SF_pack)(NULL, pipeline->gen8.sf, &sf);
129
130 struct GENX(3DSTATE_RASTER) raster = {
131 GENX(3DSTATE_RASTER_header),
132 .FrontWinding = vk_to_gen_front_face[info->frontFace],
133 .CullMode = vk_to_gen_cullmode[info->cullMode],
134 .FrontFaceFillMode = vk_to_gen_fillmode[info->polygonMode],
135 .BackFaceFillMode = vk_to_gen_fillmode[info->polygonMode],
136 .ScissorRectangleEnable = !(extra && extra->disable_scissor),
137 #if ANV_GEN == 8
138 .ViewportZClipTestEnable = true,
139 #else
140 /* GEN9+ splits ViewportZClipTestEnable into near and far enable bits */
141 .ViewportZFarClipTestEnable = true,
142 .ViewportZNearClipTestEnable = true,
143 #endif
144 };
145
146 GENX(3DSTATE_RASTER_pack)(NULL, pipeline->gen8.raster, &raster);
147 }
148
149 static void
150 emit_cb_state(struct anv_pipeline *pipeline,
151 const VkPipelineColorBlendStateCreateInfo *info,
152 const VkPipelineMultisampleStateCreateInfo *ms_info)
153 {
154 struct anv_device *device = pipeline->device;
155
156 static const uint32_t vk_to_gen_logic_op[] = {
157 [VK_LOGIC_OP_COPY] = LOGICOP_COPY,
158 [VK_LOGIC_OP_CLEAR] = LOGICOP_CLEAR,
159 [VK_LOGIC_OP_AND] = LOGICOP_AND,
160 [VK_LOGIC_OP_AND_REVERSE] = LOGICOP_AND_REVERSE,
161 [VK_LOGIC_OP_AND_INVERTED] = LOGICOP_AND_INVERTED,
162 [VK_LOGIC_OP_NO_OP] = LOGICOP_NOOP,
163 [VK_LOGIC_OP_XOR] = LOGICOP_XOR,
164 [VK_LOGIC_OP_OR] = LOGICOP_OR,
165 [VK_LOGIC_OP_NOR] = LOGICOP_NOR,
166 [VK_LOGIC_OP_EQUIVALENT] = LOGICOP_EQUIV,
167 [VK_LOGIC_OP_INVERT] = LOGICOP_INVERT,
168 [VK_LOGIC_OP_OR_REVERSE] = LOGICOP_OR_REVERSE,
169 [VK_LOGIC_OP_COPY_INVERTED] = LOGICOP_COPY_INVERTED,
170 [VK_LOGIC_OP_OR_INVERTED] = LOGICOP_OR_INVERTED,
171 [VK_LOGIC_OP_NAND] = LOGICOP_NAND,
172 [VK_LOGIC_OP_SET] = LOGICOP_SET,
173 };
174
175 static const uint32_t vk_to_gen_blend[] = {
176 [VK_BLEND_FACTOR_ZERO] = BLENDFACTOR_ZERO,
177 [VK_BLEND_FACTOR_ONE] = BLENDFACTOR_ONE,
178 [VK_BLEND_FACTOR_SRC_COLOR] = BLENDFACTOR_SRC_COLOR,
179 [VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR] = BLENDFACTOR_INV_SRC_COLOR,
180 [VK_BLEND_FACTOR_DST_COLOR] = BLENDFACTOR_DST_COLOR,
181 [VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR] = BLENDFACTOR_INV_DST_COLOR,
182 [VK_BLEND_FACTOR_SRC_ALPHA] = BLENDFACTOR_SRC_ALPHA,
183 [VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA] = BLENDFACTOR_INV_SRC_ALPHA,
184 [VK_BLEND_FACTOR_DST_ALPHA] = BLENDFACTOR_DST_ALPHA,
185 [VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA] = BLENDFACTOR_INV_DST_ALPHA,
186 [VK_BLEND_FACTOR_CONSTANT_COLOR] = BLENDFACTOR_CONST_COLOR,
187 [VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR]= BLENDFACTOR_INV_CONST_COLOR,
188 [VK_BLEND_FACTOR_CONSTANT_ALPHA] = BLENDFACTOR_CONST_ALPHA,
189 [VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA]= BLENDFACTOR_INV_CONST_ALPHA,
190 [VK_BLEND_FACTOR_SRC_ALPHA_SATURATE] = BLENDFACTOR_SRC_ALPHA_SATURATE,
191 [VK_BLEND_FACTOR_SRC1_COLOR] = BLENDFACTOR_SRC1_COLOR,
192 [VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR] = BLENDFACTOR_INV_SRC1_COLOR,
193 [VK_BLEND_FACTOR_SRC1_ALPHA] = BLENDFACTOR_SRC1_ALPHA,
194 [VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA] = BLENDFACTOR_INV_SRC1_ALPHA,
195 };
196
197 static const uint32_t vk_to_gen_blend_op[] = {
198 [VK_BLEND_OP_ADD] = BLENDFUNCTION_ADD,
199 [VK_BLEND_OP_SUBTRACT] = BLENDFUNCTION_SUBTRACT,
200 [VK_BLEND_OP_REVERSE_SUBTRACT] = BLENDFUNCTION_REVERSE_SUBTRACT,
201 [VK_BLEND_OP_MIN] = BLENDFUNCTION_MIN,
202 [VK_BLEND_OP_MAX] = BLENDFUNCTION_MAX,
203 };
204
205 uint32_t num_dwords = GENX(BLEND_STATE_length);
206 pipeline->blend_state =
207 anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64);
208
209 struct GENX(BLEND_STATE) blend_state = {
210 .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable,
211 .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable,
212 };
213
214 for (uint32_t i = 0; i < info->attachmentCount; i++) {
215 const VkPipelineColorBlendAttachmentState *a = &info->pAttachments[i];
216
217 if (a->srcColorBlendFactor != a->srcAlphaBlendFactor ||
218 a->dstColorBlendFactor != a->dstAlphaBlendFactor ||
219 a->colorBlendOp != a->alphaBlendOp) {
220 blend_state.IndependentAlphaBlendEnable = true;
221 }
222
223 blend_state.Entry[i] = (struct GENX(BLEND_STATE_ENTRY)) {
224 .LogicOpEnable = info->logicOpEnable,
225 .LogicOpFunction = vk_to_gen_logic_op[info->logicOp],
226 .ColorBufferBlendEnable = a->blendEnable,
227 .PreBlendSourceOnlyClampEnable = false,
228 .ColorClampRange = COLORCLAMP_RTFORMAT,
229 .PreBlendColorClampEnable = true,
230 .PostBlendColorClampEnable = true,
231 .SourceBlendFactor = vk_to_gen_blend[a->srcColorBlendFactor],
232 .DestinationBlendFactor = vk_to_gen_blend[a->dstColorBlendFactor],
233 .ColorBlendFunction = vk_to_gen_blend_op[a->colorBlendOp],
234 .SourceAlphaBlendFactor = vk_to_gen_blend[a->srcAlphaBlendFactor],
235 .DestinationAlphaBlendFactor = vk_to_gen_blend[a->dstAlphaBlendFactor],
236 .AlphaBlendFunction = vk_to_gen_blend_op[a->alphaBlendOp],
237 .WriteDisableAlpha = !(a->colorWriteMask & VK_COLOR_COMPONENT_A_BIT),
238 .WriteDisableRed = !(a->colorWriteMask & VK_COLOR_COMPONENT_R_BIT),
239 .WriteDisableGreen = !(a->colorWriteMask & VK_COLOR_COMPONENT_G_BIT),
240 .WriteDisableBlue = !(a->colorWriteMask & VK_COLOR_COMPONENT_B_BIT),
241 };
242
243 /* Our hardware applies the blend factor prior to the blend function
244 * regardless of what function is used. Technically, this means the
245 * hardware can do MORE than GL or Vulkan specify. However, it also
246 * means that, for MIN and MAX, we have to stomp the blend factor to
247 * ONE to make it a no-op.
248 */
249 if (a->colorBlendOp == VK_BLEND_OP_MIN ||
250 a->colorBlendOp == VK_BLEND_OP_MAX) {
251 blend_state.Entry[i].SourceBlendFactor = BLENDFACTOR_ONE;
252 blend_state.Entry[i].DestinationBlendFactor = BLENDFACTOR_ONE;
253 }
254 if (a->alphaBlendOp == VK_BLEND_OP_MIN ||
255 a->alphaBlendOp == VK_BLEND_OP_MAX) {
256 blend_state.Entry[i].SourceAlphaBlendFactor = BLENDFACTOR_ONE;
257 blend_state.Entry[i].DestinationAlphaBlendFactor = BLENDFACTOR_ONE;
258 }
259 }
260
261 GENX(BLEND_STATE_pack)(NULL, pipeline->blend_state.map, &blend_state);
262 if (!device->info.has_llc)
263 anv_state_clflush(pipeline->blend_state);
264
265 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS),
266 .BlendStatePointer = pipeline->blend_state.offset,
267 .BlendStatePointerValid = true);
268 }
269
270 static const uint32_t vk_to_gen_compare_op[] = {
271 [VK_COMPARE_OP_NEVER] = PREFILTEROPNEVER,
272 [VK_COMPARE_OP_LESS] = PREFILTEROPLESS,
273 [VK_COMPARE_OP_EQUAL] = PREFILTEROPEQUAL,
274 [VK_COMPARE_OP_LESS_OR_EQUAL] = PREFILTEROPLEQUAL,
275 [VK_COMPARE_OP_GREATER] = PREFILTEROPGREATER,
276 [VK_COMPARE_OP_NOT_EQUAL] = PREFILTEROPNOTEQUAL,
277 [VK_COMPARE_OP_GREATER_OR_EQUAL] = PREFILTEROPGEQUAL,
278 [VK_COMPARE_OP_ALWAYS] = PREFILTEROPALWAYS,
279 };
280
281 static const uint32_t vk_to_gen_stencil_op[] = {
282 [VK_STENCIL_OP_KEEP] = STENCILOP_KEEP,
283 [VK_STENCIL_OP_ZERO] = STENCILOP_ZERO,
284 [VK_STENCIL_OP_REPLACE] = STENCILOP_REPLACE,
285 [VK_STENCIL_OP_INCREMENT_AND_CLAMP] = STENCILOP_INCRSAT,
286 [VK_STENCIL_OP_DECREMENT_AND_CLAMP] = STENCILOP_DECRSAT,
287 [VK_STENCIL_OP_INVERT] = STENCILOP_INVERT,
288 [VK_STENCIL_OP_INCREMENT_AND_WRAP] = STENCILOP_INCR,
289 [VK_STENCIL_OP_DECREMENT_AND_WRAP] = STENCILOP_DECR,
290 };
291
292 static void
293 emit_ds_state(struct anv_pipeline *pipeline,
294 const VkPipelineDepthStencilStateCreateInfo *info)
295 {
296 uint32_t *dw = ANV_GEN == 8 ?
297 pipeline->gen8.wm_depth_stencil : pipeline->gen9.wm_depth_stencil;
298
299 if (info == NULL) {
300 /* We're going to OR this together with the dynamic state. We need
301 * to make sure it's initialized to something useful.
302 */
303 memset(pipeline->gen8.wm_depth_stencil, 0,
304 sizeof(pipeline->gen8.wm_depth_stencil));
305 memset(pipeline->gen9.wm_depth_stencil, 0,
306 sizeof(pipeline->gen9.wm_depth_stencil));
307 return;
308 }
309
310 /* VkBool32 depthBoundsTestEnable; // optional (depth_bounds_test) */
311
312 struct GENX(3DSTATE_WM_DEPTH_STENCIL) wm_depth_stencil = {
313 .DepthTestEnable = info->depthTestEnable,
314 .DepthBufferWriteEnable = info->depthWriteEnable,
315 .DepthTestFunction = vk_to_gen_compare_op[info->depthCompareOp],
316 .DoubleSidedStencilEnable = true,
317
318 .StencilTestEnable = info->stencilTestEnable,
319 .StencilFailOp = vk_to_gen_stencil_op[info->front.failOp],
320 .StencilPassDepthPassOp = vk_to_gen_stencil_op[info->front.passOp],
321 .StencilPassDepthFailOp = vk_to_gen_stencil_op[info->front.depthFailOp],
322 .StencilTestFunction = vk_to_gen_compare_op[info->front.compareOp],
323 .BackfaceStencilFailOp = vk_to_gen_stencil_op[info->back.failOp],
324 .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info->back.passOp],
325 .BackfaceStencilPassDepthFailOp =vk_to_gen_stencil_op[info->back.depthFailOp],
326 .BackfaceStencilTestFunction = vk_to_gen_compare_op[info->back.compareOp],
327 };
328
329 GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, dw, &wm_depth_stencil);
330 }
331
332 VkResult
333 genX(graphics_pipeline_create)(
334 VkDevice _device,
335 const VkGraphicsPipelineCreateInfo* pCreateInfo,
336 const struct anv_graphics_pipeline_create_info *extra,
337 const VkAllocationCallbacks* pAllocator,
338 VkPipeline* pPipeline)
339 {
340 ANV_FROM_HANDLE(anv_device, device, _device);
341 struct anv_pipeline *pipeline;
342 VkResult result;
343 uint32_t offset, length;
344
345 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
346
347 pipeline = anv_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
348 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
349 if (pipeline == NULL)
350 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
351
352 result = anv_pipeline_init(pipeline, device, pCreateInfo, extra, pAllocator);
353 if (result != VK_SUCCESS)
354 return result;
355
356 /* FIXME: The compiler dead-codes FS inputs when we don't have a VS, so we
357 * hard code this to num_attributes - 2. This is because the attributes
358 * include VUE header and position, which aren't counted as varying
359 * inputs. */
360 if (pipeline->vs_simd8 == NO_KERNEL) {
361 pipeline->wm_prog_data.num_varying_inputs =
362 pCreateInfo->pVertexInputState->vertexAttributeDescriptionCount - 2;
363 }
364
365 assert(pCreateInfo->pVertexInputState);
366 emit_vertex_input(pipeline, pCreateInfo->pVertexInputState);
367 assert(pCreateInfo->pInputAssemblyState);
368 emit_ia_state(pipeline, pCreateInfo->pInputAssemblyState, extra);
369 assert(pCreateInfo->pRasterizationState);
370 emit_rs_state(pipeline, pCreateInfo->pRasterizationState, extra);
371 emit_ds_state(pipeline, pCreateInfo->pDepthStencilState);
372 emit_cb_state(pipeline, pCreateInfo->pColorBlendState,
373 pCreateInfo->pMultisampleState);
374
375 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_STATISTICS),
376 .StatisticsEnable = true);
377 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_HS), .Enable = false);
378 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_TE), .TEEnable = false);
379 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_DS), .FunctionEnable = false);
380 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_STREAMOUT), .SOFunctionEnable = false);
381
382 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS),
383 .ConstantBufferOffset = 0,
384 .ConstantBufferSize = 4);
385 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_GS),
386 .ConstantBufferOffset = 4,
387 .ConstantBufferSize = 4);
388 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS),
389 .ConstantBufferOffset = 8,
390 .ConstantBufferSize = 4);
391
392 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM_CHROMAKEY),
393 .ChromaKeyKillEnable = false);
394 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_AA_LINE_PARAMETERS));
395
396 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP),
397 .ClipEnable = true,
398 .ViewportXYClipTestEnable = !(extra && extra->disable_viewport),
399 .MinimumPointWidth = 0.125,
400 .MaximumPointWidth = 255.875);
401
402 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM),
403 .StatisticsEnable = true,
404 .LineEndCapAntialiasingRegionWidth = _05pixels,
405 .LineAntialiasingRegionWidth = _10pixels,
406 .EarlyDepthStencilControl = NORMAL,
407 .ForceThreadDispatchEnable = NORMAL,
408 .PointRasterizationRule = RASTRULE_UPPER_RIGHT,
409 .BarycentricInterpolationMode =
410 pipeline->wm_prog_data.barycentric_interp_modes);
411
412 uint32_t samples = 1;
413 uint32_t log2_samples = __builtin_ffs(samples) - 1;
414 bool enable_sampling = samples > 1 ? true : false;
415
416 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE),
417 .PixelPositionOffsetEnable = enable_sampling,
418 .PixelLocation = CENTER,
419 .NumberofMultisamples = log2_samples);
420
421 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK),
422 .SampleMask = 0xffff);
423
424 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_VS),
425 .VSURBStartingAddress = pipeline->urb.vs_start,
426 .VSURBEntryAllocationSize = pipeline->urb.vs_size - 1,
427 .VSNumberofURBEntries = pipeline->urb.nr_vs_entries);
428
429 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_GS),
430 .GSURBStartingAddress = pipeline->urb.gs_start,
431 .GSURBEntryAllocationSize = pipeline->urb.gs_size - 1,
432 .GSNumberofURBEntries = pipeline->urb.nr_gs_entries);
433
434 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_HS),
435 .HSURBStartingAddress = pipeline->urb.vs_start,
436 .HSURBEntryAllocationSize = 0,
437 .HSNumberofURBEntries = 0);
438
439 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_DS),
440 .DSURBStartingAddress = pipeline->urb.vs_start,
441 .DSURBEntryAllocationSize = 0,
442 .DSNumberofURBEntries = 0);
443
444 const struct brw_gs_prog_data *gs_prog_data = &pipeline->gs_prog_data;
445 offset = 1;
446 length = (gs_prog_data->base.vue_map.num_slots + 1) / 2 - offset;
447
448 if (pipeline->gs_vec4 == NO_KERNEL)
449 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), .Enable = false);
450 else
451 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS),
452 .SingleProgramFlow = false,
453 .KernelStartPointer = pipeline->gs_vec4,
454 .VectorMaskEnable = Dmask,
455 .SamplerCount = 0,
456 .BindingTableEntryCount = 0,
457 .ExpectedVertexCount = pipeline->gs_vertex_count,
458
459 .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_GEOMETRY],
460 .PerThreadScratchSpace = ffs(gs_prog_data->base.base.total_scratch / 2048),
461
462 .OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1,
463 .OutputTopology = gs_prog_data->output_topology,
464 .VertexURBEntryReadLength = gs_prog_data->base.urb_read_length,
465 .DispatchGRFStartRegisterForURBData =
466 gs_prog_data->base.base.dispatch_grf_start_reg,
467
468 .MaximumNumberofThreads = device->info.max_gs_threads / 2 - 1,
469 .ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords,
470 .DispatchMode = gs_prog_data->base.dispatch_mode,
471 .StatisticsEnable = true,
472 .IncludePrimitiveID = gs_prog_data->include_primitive_id,
473 .ReorderMode = TRAILING,
474 .Enable = true,
475
476 .ControlDataFormat = gs_prog_data->control_data_format,
477
478 .StaticOutput = gs_prog_data->static_vertex_count >= 0,
479 .StaticOutputVertexCount =
480 gs_prog_data->static_vertex_count >= 0 ?
481 gs_prog_data->static_vertex_count : 0,
482
483 /* FIXME: mesa sets this based on ctx->Transform.ClipPlanesEnabled:
484 * UserClipDistanceClipTestEnableBitmask_3DSTATE_GS(v)
485 * UserClipDistanceCullTestEnableBitmask(v)
486 */
487
488 .VertexURBEntryOutputReadOffset = offset,
489 .VertexURBEntryOutputLength = length);
490
491 const struct brw_vue_prog_data *vue_prog_data = &pipeline->vs_prog_data.base;
492 /* Skip the VUE header and position slots */
493 offset = 1;
494 length = (vue_prog_data->vue_map.num_slots + 1) / 2 - offset;
495
496 if (pipeline->vs_simd8 == NO_KERNEL || (extra && extra->disable_vs))
497 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS),
498 .FunctionEnable = false,
499 /* Even if VS is disabled, SBE still gets the amount of
500 * vertex data to read from this field. */
501 .VertexURBEntryOutputReadOffset = offset,
502 .VertexURBEntryOutputLength = length);
503 else
504 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS),
505 .KernelStartPointer = pipeline->vs_simd8,
506 .SingleVertexDispatch = Multiple,
507 .VectorMaskEnable = Dmask,
508 .SamplerCount = 0,
509 .BindingTableEntryCount =
510 vue_prog_data->base.binding_table.size_bytes / 4,
511 .ThreadDispatchPriority = Normal,
512 .FloatingPointMode = IEEE754,
513 .IllegalOpcodeExceptionEnable = false,
514 .AccessesUAV = false,
515 .SoftwareExceptionEnable = false,
516
517 .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_VERTEX],
518 .PerThreadScratchSpace = ffs(vue_prog_data->base.total_scratch / 2048),
519
520 .DispatchGRFStartRegisterForURBData =
521 vue_prog_data->base.dispatch_grf_start_reg,
522 .VertexURBEntryReadLength = vue_prog_data->urb_read_length,
523 .VertexURBEntryReadOffset = 0,
524
525 .MaximumNumberofThreads = device->info.max_vs_threads - 1,
526 .StatisticsEnable = false,
527 .SIMD8DispatchEnable = true,
528 .VertexCacheDisable = false,
529 .FunctionEnable = true,
530
531 .VertexURBEntryOutputReadOffset = offset,
532 .VertexURBEntryOutputLength = length,
533 .UserClipDistanceClipTestEnableBitmask = 0,
534 .UserClipDistanceCullTestEnableBitmask = 0);
535
536 const struct brw_wm_prog_data *wm_prog_data = &pipeline->wm_prog_data;
537
538 /* TODO: We should clean this up. Among other things, this is mostly
539 * shared with other gens.
540 */
541 const struct brw_vue_map *fs_input_map;
542 if (pipeline->gs_vec4 == NO_KERNEL)
543 fs_input_map = &vue_prog_data->vue_map;
544 else
545 fs_input_map = &gs_prog_data->base.vue_map;
546
547 struct GENX(3DSTATE_SBE_SWIZ) swiz = {
548 GENX(3DSTATE_SBE_SWIZ_header),
549 };
550
551 int max_source_attr = 0;
552 for (int attr = 0; attr < VARYING_SLOT_MAX; attr++) {
553 int input_index = wm_prog_data->urb_setup[attr];
554
555 if (input_index < 0)
556 continue;
557
558 /* We have to subtract two slots to accout for the URB entry output
559 * read offset in the VS and GS stages.
560 */
561 int source_attr = fs_input_map->varying_to_slot[attr] - 2;
562 max_source_attr = MAX2(max_source_attr, source_attr);
563
564 if (input_index >= 16)
565 continue;
566
567 swiz.Attribute[input_index].SourceAttribute = source_attr;
568 }
569
570 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE),
571 .AttributeSwizzleEnable = true,
572 .ForceVertexURBEntryReadLength = false,
573 .ForceVertexURBEntryReadOffset = false,
574 .VertexURBEntryReadLength = DIV_ROUND_UP(max_source_attr + 1, 2),
575 .PointSpriteTextureCoordinateOrigin = UPPERLEFT,
576 .NumberofSFOutputAttributes =
577 wm_prog_data->num_varying_inputs,
578
579 #if ANV_GEN >= 9
580 .Attribute0ActiveComponentFormat = ACF_XYZW,
581 .Attribute1ActiveComponentFormat = ACF_XYZW,
582 .Attribute2ActiveComponentFormat = ACF_XYZW,
583 .Attribute3ActiveComponentFormat = ACF_XYZW,
584 .Attribute4ActiveComponentFormat = ACF_XYZW,
585 .Attribute5ActiveComponentFormat = ACF_XYZW,
586 .Attribute6ActiveComponentFormat = ACF_XYZW,
587 .Attribute7ActiveComponentFormat = ACF_XYZW,
588 .Attribute8ActiveComponentFormat = ACF_XYZW,
589 .Attribute9ActiveComponentFormat = ACF_XYZW,
590 .Attribute10ActiveComponentFormat = ACF_XYZW,
591 .Attribute11ActiveComponentFormat = ACF_XYZW,
592 .Attribute12ActiveComponentFormat = ACF_XYZW,
593 .Attribute13ActiveComponentFormat = ACF_XYZW,
594 .Attribute14ActiveComponentFormat = ACF_XYZW,
595 .Attribute15ActiveComponentFormat = ACF_XYZW,
596 /* wow, much field, very attribute */
597 .Attribute16ActiveComponentFormat = ACF_XYZW,
598 .Attribute17ActiveComponentFormat = ACF_XYZW,
599 .Attribute18ActiveComponentFormat = ACF_XYZW,
600 .Attribute19ActiveComponentFormat = ACF_XYZW,
601 .Attribute20ActiveComponentFormat = ACF_XYZW,
602 .Attribute21ActiveComponentFormat = ACF_XYZW,
603 .Attribute22ActiveComponentFormat = ACF_XYZW,
604 .Attribute23ActiveComponentFormat = ACF_XYZW,
605 .Attribute24ActiveComponentFormat = ACF_XYZW,
606 .Attribute25ActiveComponentFormat = ACF_XYZW,
607 .Attribute26ActiveComponentFormat = ACF_XYZW,
608 .Attribute27ActiveComponentFormat = ACF_XYZW,
609 .Attribute28ActiveComponentFormat = ACF_XYZW,
610 .Attribute29ActiveComponentFormat = ACF_XYZW,
611 .Attribute28ActiveComponentFormat = ACF_XYZW,
612 .Attribute29ActiveComponentFormat = ACF_XYZW,
613 .Attribute30ActiveComponentFormat = ACF_XYZW,
614 #endif
615 );
616
617 uint32_t *dw = anv_batch_emit_dwords(&pipeline->batch,
618 GENX(3DSTATE_SBE_SWIZ_length));
619 GENX(3DSTATE_SBE_SWIZ_pack)(&pipeline->batch, dw, &swiz);
620
621 const int num_thread_bias = ANV_GEN == 8 ? 2 : 1;
622 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS),
623 .KernelStartPointer0 = pipeline->ps_ksp0,
624
625 .SingleProgramFlow = false,
626 .VectorMaskEnable = true,
627 .SamplerCount = 1,
628
629 .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_FRAGMENT],
630 .PerThreadScratchSpace = ffs(wm_prog_data->base.total_scratch / 2048),
631
632 .MaximumNumberofThreadsPerPSD = 64 - num_thread_bias,
633 .PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ?
634 POSOFFSET_SAMPLE: POSOFFSET_NONE,
635 .PushConstantEnable = wm_prog_data->base.nr_params > 0,
636 ._8PixelDispatchEnable = pipeline->ps_simd8 != NO_KERNEL,
637 ._16PixelDispatchEnable = pipeline->ps_simd16 != NO_KERNEL,
638 ._32PixelDispatchEnable = false,
639
640 .DispatchGRFStartRegisterForConstantSetupData0 = pipeline->ps_grf_start0,
641 .DispatchGRFStartRegisterForConstantSetupData1 = 0,
642 .DispatchGRFStartRegisterForConstantSetupData2 = pipeline->ps_grf_start2,
643
644 .KernelStartPointer1 = 0,
645 .KernelStartPointer2 = pipeline->ps_ksp2);
646
647 bool per_sample_ps = false;
648 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA),
649 .PixelShaderValid = true,
650 .PixelShaderKillsPixel = wm_prog_data->uses_kill,
651 .PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode,
652 .AttributeEnable = wm_prog_data->num_varying_inputs > 0,
653 .oMaskPresenttoRenderTarget = wm_prog_data->uses_omask,
654 .PixelShaderIsPerSample = per_sample_ps,
655 #if ANV_GEN >= 9
656 .PixelShaderPullsBary = wm_prog_data->pulls_bary,
657 .InputCoverageMaskState = ICMS_NONE
658 #endif
659 );
660
661 *pPipeline = anv_pipeline_to_handle(pipeline);
662
663 return VK_SUCCESS;
664 }
665
666 VkResult genX(compute_pipeline_create)(
667 VkDevice _device,
668 const VkComputePipelineCreateInfo* pCreateInfo,
669 const VkAllocationCallbacks* pAllocator,
670 VkPipeline* pPipeline)
671 {
672 ANV_FROM_HANDLE(anv_device, device, _device);
673 struct anv_pipeline *pipeline;
674 VkResult result;
675
676 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
677
678 pipeline = anv_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
679 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
680 if (pipeline == NULL)
681 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
682
683 pipeline->device = device;
684 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
685
686 pipeline->blend_state.map = NULL;
687
688 result = anv_reloc_list_init(&pipeline->batch_relocs,
689 pAllocator ? pAllocator : &device->alloc);
690 if (result != VK_SUCCESS) {
691 anv_free2(&device->alloc, pAllocator, pipeline);
692 return result;
693 }
694 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
695 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
696 pipeline->batch.relocs = &pipeline->batch_relocs;
697
698 anv_state_stream_init(&pipeline->program_stream,
699 &device->instruction_block_pool);
700
701 /* When we free the pipeline, we detect stages based on the NULL status
702 * of various prog_data pointers. Make them NULL by default.
703 */
704 memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data));
705 memset(pipeline->scratch_start, 0, sizeof(pipeline->scratch_start));
706
707 pipeline->vs_simd8 = NO_KERNEL;
708 pipeline->vs_vec4 = NO_KERNEL;
709 pipeline->gs_vec4 = NO_KERNEL;
710
711 pipeline->active_stages = 0;
712 pipeline->total_scratch = 0;
713
714 assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT);
715 ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->stage.module);
716 anv_pipeline_compile_cs(pipeline, pCreateInfo, module,
717 pCreateInfo->stage.pName);
718
719 pipeline->use_repclear = false;
720
721 const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data;
722
723 anv_batch_emit(&pipeline->batch, GENX(MEDIA_VFE_STATE),
724 .ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_COMPUTE],
725 .PerThreadScratchSpace = ffs(cs_prog_data->base.total_scratch / 2048),
726 .ScratchSpaceBasePointerHigh = 0,
727 .StackSize = 0,
728
729 .MaximumNumberofThreads = device->info.max_cs_threads - 1,
730 .NumberofURBEntries = 2,
731 .ResetGatewayTimer = true,
732 #if ANV_GEN == 8
733 .BypassGatewayControl = true,
734 #endif
735 .URBEntryAllocationSize = 2,
736 .CURBEAllocationSize = 0);
737
738 struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
739 uint32_t group_size = prog_data->local_size[0] *
740 prog_data->local_size[1] * prog_data->local_size[2];
741 pipeline->cs_thread_width_max = DIV_ROUND_UP(group_size, prog_data->simd_size);
742 uint32_t remainder = group_size & (prog_data->simd_size - 1);
743
744 if (remainder > 0)
745 pipeline->cs_right_mask = ~0u >> (32 - remainder);
746 else
747 pipeline->cs_right_mask = ~0u >> (32 - prog_data->simd_size);
748
749
750 *pPipeline = anv_pipeline_to_handle(pipeline);
751
752 return VK_SUCCESS;
753 }