2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "gen8_pack.h"
35 emit_vertex_input(struct anv_pipeline
*pipeline
,
36 const VkPipelineVertexInputStateCreateInfo
*info
)
38 const uint32_t num_dwords
= 1 + info
->attributeCount
* 2;
41 if (info
->attributeCount
> 0) {
42 p
= anv_batch_emitn(&pipeline
->batch
, num_dwords
,
43 GEN8_3DSTATE_VERTEX_ELEMENTS
);
46 for (uint32_t i
= 0; i
< info
->attributeCount
; i
++) {
47 const VkVertexInputAttributeDescription
*desc
=
48 &info
->pVertexAttributeDescriptions
[i
];
49 const struct anv_format
*format
= anv_format_for_vk_format(desc
->format
);
51 struct GEN8_VERTEX_ELEMENT_STATE element
= {
52 .VertexBufferIndex
= desc
->binding
,
54 .SourceElementFormat
= format
->surface_format
,
55 .EdgeFlagEnable
= false,
56 .SourceElementOffset
= desc
->offsetInBytes
,
57 .Component0Control
= VFCOMP_STORE_SRC
,
58 .Component1Control
= format
->num_channels
>= 2 ? VFCOMP_STORE_SRC
: VFCOMP_STORE_0
,
59 .Component2Control
= format
->num_channels
>= 3 ? VFCOMP_STORE_SRC
: VFCOMP_STORE_0
,
60 .Component3Control
= format
->num_channels
>= 4 ? VFCOMP_STORE_SRC
: VFCOMP_STORE_1_FP
62 GEN8_VERTEX_ELEMENT_STATE_pack(NULL
, &p
[1 + i
* 2], &element
);
64 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF_INSTANCING
,
65 .InstancingEnable
= pipeline
->instancing_enable
[desc
->binding
],
66 .VertexElementIndex
= i
,
67 /* Vulkan so far doesn't have an instance divisor, so
68 * this is always 1 (ignored if not instancing). */
69 .InstanceDataStepRate
= 1);
72 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF_SGVS
,
73 .VertexIDEnable
= pipeline
->vs_prog_data
.uses_vertexid
,
74 .VertexIDComponentNumber
= 2,
75 .VertexIDElementOffset
= info
->bindingCount
,
76 .InstanceIDEnable
= pipeline
->vs_prog_data
.uses_instanceid
,
77 .InstanceIDComponentNumber
= 3,
78 .InstanceIDElementOffset
= info
->bindingCount
);
82 emit_ia_state(struct anv_pipeline
*pipeline
,
83 const VkPipelineInputAssemblyStateCreateInfo
*info
,
84 const struct anv_graphics_pipeline_create_info
*extra
)
86 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF_TOPOLOGY
,
87 .PrimitiveTopologyType
= pipeline
->topology
);
91 emit_rs_state(struct anv_pipeline
*pipeline
,
92 const VkPipelineRasterStateCreateInfo
*info
,
93 const struct anv_graphics_pipeline_create_info
*extra
)
95 static const uint32_t vk_to_gen_cullmode
[] = {
96 [VK_CULL_MODE_NONE
] = CULLMODE_NONE
,
97 [VK_CULL_MODE_FRONT
] = CULLMODE_FRONT
,
98 [VK_CULL_MODE_BACK
] = CULLMODE_BACK
,
99 [VK_CULL_MODE_FRONT_AND_BACK
] = CULLMODE_BOTH
102 static const uint32_t vk_to_gen_fillmode
[] = {
103 [VK_FILL_MODE_POINTS
] = RASTER_POINT
,
104 [VK_FILL_MODE_WIREFRAME
] = RASTER_WIREFRAME
,
105 [VK_FILL_MODE_SOLID
] = RASTER_SOLID
108 static const uint32_t vk_to_gen_front_face
[] = {
109 [VK_FRONT_FACE_CCW
] = CounterClockwise
,
110 [VK_FRONT_FACE_CW
] = Clockwise
113 struct GEN8_3DSTATE_SF sf
= {
114 GEN8_3DSTATE_SF_header
,
115 .ViewportTransformEnable
= !(extra
&& extra
->disable_viewport
),
116 .TriangleStripListProvokingVertexSelect
= 0,
117 .LineStripListProvokingVertexSelect
= 0,
118 .TriangleFanProvokingVertexSelect
= 0,
119 .PointWidthSource
= pipeline
->writes_point_size
? Vertex
: State
,
123 /* FINISHME: VkBool32 rasterizerDiscardEnable; */
125 GEN8_3DSTATE_SF_pack(NULL
, pipeline
->gen8
.sf
, &sf
);
127 struct GEN8_3DSTATE_RASTER raster
= {
128 GEN8_3DSTATE_RASTER_header
,
129 .FrontWinding
= vk_to_gen_front_face
[info
->frontFace
],
130 .CullMode
= vk_to_gen_cullmode
[info
->cullMode
],
131 .FrontFaceFillMode
= vk_to_gen_fillmode
[info
->fillMode
],
132 .BackFaceFillMode
= vk_to_gen_fillmode
[info
->fillMode
],
133 .ScissorRectangleEnable
= !(extra
&& extra
->disable_scissor
),
134 .ViewportZClipTestEnable
= info
->depthClipEnable
137 GEN8_3DSTATE_RASTER_pack(NULL
, pipeline
->gen8
.raster
, &raster
);
141 emit_cb_state(struct anv_pipeline
*pipeline
,
142 const VkPipelineColorBlendStateCreateInfo
*info
)
144 struct anv_device
*device
= pipeline
->device
;
146 static const uint32_t vk_to_gen_logic_op
[] = {
147 [VK_LOGIC_OP_COPY
] = LOGICOP_COPY
,
148 [VK_LOGIC_OP_CLEAR
] = LOGICOP_CLEAR
,
149 [VK_LOGIC_OP_AND
] = LOGICOP_AND
,
150 [VK_LOGIC_OP_AND_REVERSE
] = LOGICOP_AND_REVERSE
,
151 [VK_LOGIC_OP_AND_INVERTED
] = LOGICOP_AND_INVERTED
,
152 [VK_LOGIC_OP_NOOP
] = LOGICOP_NOOP
,
153 [VK_LOGIC_OP_XOR
] = LOGICOP_XOR
,
154 [VK_LOGIC_OP_OR
] = LOGICOP_OR
,
155 [VK_LOGIC_OP_NOR
] = LOGICOP_NOR
,
156 [VK_LOGIC_OP_EQUIV
] = LOGICOP_EQUIV
,
157 [VK_LOGIC_OP_INVERT
] = LOGICOP_INVERT
,
158 [VK_LOGIC_OP_OR_REVERSE
] = LOGICOP_OR_REVERSE
,
159 [VK_LOGIC_OP_COPY_INVERTED
] = LOGICOP_COPY_INVERTED
,
160 [VK_LOGIC_OP_OR_INVERTED
] = LOGICOP_OR_INVERTED
,
161 [VK_LOGIC_OP_NAND
] = LOGICOP_NAND
,
162 [VK_LOGIC_OP_SET
] = LOGICOP_SET
,
165 static const uint32_t vk_to_gen_blend
[] = {
166 [VK_BLEND_ZERO
] = BLENDFACTOR_ZERO
,
167 [VK_BLEND_ONE
] = BLENDFACTOR_ONE
,
168 [VK_BLEND_SRC_COLOR
] = BLENDFACTOR_SRC_COLOR
,
169 [VK_BLEND_ONE_MINUS_SRC_COLOR
] = BLENDFACTOR_INV_SRC_COLOR
,
170 [VK_BLEND_DEST_COLOR
] = BLENDFACTOR_DST_COLOR
,
171 [VK_BLEND_ONE_MINUS_DEST_COLOR
] = BLENDFACTOR_INV_DST_COLOR
,
172 [VK_BLEND_SRC_ALPHA
] = BLENDFACTOR_SRC_ALPHA
,
173 [VK_BLEND_ONE_MINUS_SRC_ALPHA
] = BLENDFACTOR_INV_SRC_ALPHA
,
174 [VK_BLEND_DEST_ALPHA
] = BLENDFACTOR_DST_ALPHA
,
175 [VK_BLEND_ONE_MINUS_DEST_ALPHA
] = BLENDFACTOR_INV_DST_ALPHA
,
176 [VK_BLEND_CONSTANT_COLOR
] = BLENDFACTOR_CONST_COLOR
,
177 [VK_BLEND_ONE_MINUS_CONSTANT_COLOR
] = BLENDFACTOR_INV_CONST_COLOR
,
178 [VK_BLEND_CONSTANT_ALPHA
] = BLENDFACTOR_CONST_ALPHA
,
179 [VK_BLEND_ONE_MINUS_CONSTANT_ALPHA
] = BLENDFACTOR_INV_CONST_ALPHA
,
180 [VK_BLEND_SRC_ALPHA_SATURATE
] = BLENDFACTOR_SRC_ALPHA_SATURATE
,
181 [VK_BLEND_SRC1_COLOR
] = BLENDFACTOR_SRC1_COLOR
,
182 [VK_BLEND_ONE_MINUS_SRC1_COLOR
] = BLENDFACTOR_INV_SRC1_COLOR
,
183 [VK_BLEND_SRC1_ALPHA
] = BLENDFACTOR_SRC1_ALPHA
,
184 [VK_BLEND_ONE_MINUS_SRC1_ALPHA
] = BLENDFACTOR_INV_SRC1_ALPHA
,
187 static const uint32_t vk_to_gen_blend_op
[] = {
188 [VK_BLEND_OP_ADD
] = BLENDFUNCTION_ADD
,
189 [VK_BLEND_OP_SUBTRACT
] = BLENDFUNCTION_SUBTRACT
,
190 [VK_BLEND_OP_REVERSE_SUBTRACT
] = BLENDFUNCTION_REVERSE_SUBTRACT
,
191 [VK_BLEND_OP_MIN
] = BLENDFUNCTION_MIN
,
192 [VK_BLEND_OP_MAX
] = BLENDFUNCTION_MAX
,
195 uint32_t num_dwords
= GEN8_BLEND_STATE_length
;
196 pipeline
->blend_state
=
197 anv_state_pool_alloc(&device
->dynamic_state_pool
, num_dwords
* 4, 64);
199 struct GEN8_BLEND_STATE blend_state
= {
200 .AlphaToCoverageEnable
= info
->alphaToCoverageEnable
,
201 .AlphaToOneEnable
= info
->alphaToOneEnable
,
204 for (uint32_t i
= 0; i
< info
->attachmentCount
; i
++) {
205 const VkPipelineColorBlendAttachmentState
*a
= &info
->pAttachments
[i
];
207 if (a
->srcBlendColor
!= a
->srcBlendAlpha
||
208 a
->destBlendColor
!= a
->destBlendAlpha
||
209 a
->blendOpColor
!= a
->blendOpAlpha
) {
210 blend_state
.IndependentAlphaBlendEnable
= true;
213 blend_state
.Entry
[i
] = (struct GEN8_BLEND_STATE_ENTRY
) {
214 .LogicOpEnable
= info
->logicOpEnable
,
215 .LogicOpFunction
= vk_to_gen_logic_op
[info
->logicOp
],
216 .ColorBufferBlendEnable
= a
->blendEnable
,
217 .PreBlendSourceOnlyClampEnable
= false,
218 .ColorClampRange
= COLORCLAMP_RTFORMAT
,
219 .PreBlendColorClampEnable
= true,
220 .PostBlendColorClampEnable
= true,
221 .SourceBlendFactor
= vk_to_gen_blend
[a
->srcBlendColor
],
222 .DestinationBlendFactor
= vk_to_gen_blend
[a
->destBlendColor
],
223 .ColorBlendFunction
= vk_to_gen_blend_op
[a
->blendOpColor
],
224 .SourceAlphaBlendFactor
= vk_to_gen_blend
[a
->srcBlendAlpha
],
225 .DestinationAlphaBlendFactor
= vk_to_gen_blend
[a
->destBlendAlpha
],
226 .AlphaBlendFunction
= vk_to_gen_blend_op
[a
->blendOpAlpha
],
227 .WriteDisableAlpha
= !(a
->channelWriteMask
& VK_CHANNEL_A_BIT
),
228 .WriteDisableRed
= !(a
->channelWriteMask
& VK_CHANNEL_R_BIT
),
229 .WriteDisableGreen
= !(a
->channelWriteMask
& VK_CHANNEL_G_BIT
),
230 .WriteDisableBlue
= !(a
->channelWriteMask
& VK_CHANNEL_B_BIT
),
233 /* Our hardware applies the blend factor prior to the blend function
234 * regardless of what function is used. Technically, this means the
235 * hardware can do MORE than GL or Vulkan specify. However, it also
236 * means that, for MIN and MAX, we have to stomp the blend factor to
237 * ONE to make it a no-op.
239 if (a
->blendOpColor
== VK_BLEND_OP_MIN
||
240 a
->blendOpColor
== VK_BLEND_OP_MAX
) {
241 blend_state
.Entry
[i
].SourceBlendFactor
= BLENDFACTOR_ONE
;
242 blend_state
.Entry
[i
].DestinationBlendFactor
= BLENDFACTOR_ONE
;
244 if (a
->blendOpAlpha
== VK_BLEND_OP_MIN
||
245 a
->blendOpAlpha
== VK_BLEND_OP_MAX
) {
246 blend_state
.Entry
[i
].SourceAlphaBlendFactor
= BLENDFACTOR_ONE
;
247 blend_state
.Entry
[i
].DestinationAlphaBlendFactor
= BLENDFACTOR_ONE
;
251 GEN8_BLEND_STATE_pack(NULL
, pipeline
->blend_state
.map
, &blend_state
);
253 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_BLEND_STATE_POINTERS
,
254 .BlendStatePointer
= pipeline
->blend_state
.offset
,
255 .BlendStatePointerValid
= true);
258 static const uint32_t vk_to_gen_compare_op
[] = {
259 [VK_COMPARE_OP_NEVER
] = COMPAREFUNCTION_NEVER
,
260 [VK_COMPARE_OP_LESS
] = COMPAREFUNCTION_LESS
,
261 [VK_COMPARE_OP_EQUAL
] = COMPAREFUNCTION_EQUAL
,
262 [VK_COMPARE_OP_LESS_EQUAL
] = COMPAREFUNCTION_LEQUAL
,
263 [VK_COMPARE_OP_GREATER
] = COMPAREFUNCTION_GREATER
,
264 [VK_COMPARE_OP_NOT_EQUAL
] = COMPAREFUNCTION_NOTEQUAL
,
265 [VK_COMPARE_OP_GREATER_EQUAL
] = COMPAREFUNCTION_GEQUAL
,
266 [VK_COMPARE_OP_ALWAYS
] = COMPAREFUNCTION_ALWAYS
,
269 static const uint32_t vk_to_gen_stencil_op
[] = {
270 [VK_STENCIL_OP_KEEP
] = STENCILOP_KEEP
,
271 [VK_STENCIL_OP_ZERO
] = STENCILOP_ZERO
,
272 [VK_STENCIL_OP_REPLACE
] = STENCILOP_REPLACE
,
273 [VK_STENCIL_OP_INC_CLAMP
] = STENCILOP_INCRSAT
,
274 [VK_STENCIL_OP_DEC_CLAMP
] = STENCILOP_DECRSAT
,
275 [VK_STENCIL_OP_INVERT
] = STENCILOP_INVERT
,
276 [VK_STENCIL_OP_INC_WRAP
] = STENCILOP_INCR
,
277 [VK_STENCIL_OP_DEC_WRAP
] = STENCILOP_DECR
,
281 emit_ds_state(struct anv_pipeline
*pipeline
,
282 const VkPipelineDepthStencilStateCreateInfo
*info
)
285 /* We're going to OR this together with the dynamic state. We need
286 * to make sure it's initialized to something useful.
288 memset(pipeline
->gen8
.wm_depth_stencil
, 0,
289 sizeof(pipeline
->gen8
.wm_depth_stencil
));
293 /* VkBool32 depthBoundsTestEnable; // optional (depth_bounds_test) */
295 struct GEN8_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
= {
296 .DepthTestEnable
= info
->depthTestEnable
,
297 .DepthBufferWriteEnable
= info
->depthWriteEnable
,
298 .DepthTestFunction
= vk_to_gen_compare_op
[info
->depthCompareOp
],
299 .DoubleSidedStencilEnable
= true,
301 .StencilTestEnable
= info
->stencilTestEnable
,
302 .StencilFailOp
= vk_to_gen_stencil_op
[info
->front
.stencilFailOp
],
303 .StencilPassDepthPassOp
= vk_to_gen_stencil_op
[info
->front
.stencilPassOp
],
304 .StencilPassDepthFailOp
= vk_to_gen_stencil_op
[info
->front
.stencilDepthFailOp
],
305 .StencilTestFunction
= vk_to_gen_compare_op
[info
->front
.stencilCompareOp
],
306 .BackfaceStencilFailOp
= vk_to_gen_stencil_op
[info
->back
.stencilFailOp
],
307 .BackfaceStencilPassDepthPassOp
= vk_to_gen_stencil_op
[info
->back
.stencilPassOp
],
308 .BackfaceStencilPassDepthFailOp
=vk_to_gen_stencil_op
[info
->back
.stencilDepthFailOp
],
309 .BackfaceStencilTestFunction
= vk_to_gen_compare_op
[info
->back
.stencilCompareOp
],
312 GEN8_3DSTATE_WM_DEPTH_STENCIL_pack(NULL
, pipeline
->gen8
.wm_depth_stencil
, &wm_depth_stencil
);
316 gen8_graphics_pipeline_create(
318 const VkGraphicsPipelineCreateInfo
* pCreateInfo
,
319 const struct anv_graphics_pipeline_create_info
*extra
,
320 VkPipeline
* pPipeline
)
322 ANV_FROM_HANDLE(anv_device
, device
, _device
);
323 struct anv_pipeline
*pipeline
;
325 uint32_t offset
, length
;
327 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
);
329 pipeline
= anv_device_alloc(device
, sizeof(*pipeline
), 8,
330 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
331 if (pipeline
== NULL
)
332 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
334 result
= anv_pipeline_init(pipeline
, device
, pCreateInfo
, extra
);
335 if (result
!= VK_SUCCESS
)
338 /* FIXME: The compiler dead-codes FS inputs when we don't have a VS, so we
339 * hard code this to num_attributes - 2. This is because the attributes
340 * include VUE header and position, which aren't counted as varying
342 if (pipeline
->vs_simd8
== NO_KERNEL
) {
343 pipeline
->wm_prog_data
.num_varying_inputs
=
344 pCreateInfo
->pVertexInputState
->attributeCount
- 2;
347 assert(pCreateInfo
->pVertexInputState
);
348 emit_vertex_input(pipeline
, pCreateInfo
->pVertexInputState
);
349 assert(pCreateInfo
->pInputAssemblyState
);
350 emit_ia_state(pipeline
, pCreateInfo
->pInputAssemblyState
, extra
);
351 assert(pCreateInfo
->pRasterState
);
352 emit_rs_state(pipeline
, pCreateInfo
->pRasterState
, extra
);
353 emit_ds_state(pipeline
, pCreateInfo
->pDepthStencilState
);
354 emit_cb_state(pipeline
, pCreateInfo
->pColorBlendState
);
356 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF_STATISTICS
,
357 .StatisticsEnable
= true);
358 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_HS
, .Enable
= false);
359 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_TE
, .TEEnable
= false);
360 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_DS
, .FunctionEnable
= false);
361 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_STREAMOUT
, .SOFunctionEnable
= false);
363 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_VS
,
364 .ConstantBufferOffset
= 0,
365 .ConstantBufferSize
= 4);
366 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_GS
,
367 .ConstantBufferOffset
= 4,
368 .ConstantBufferSize
= 4);
369 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_PS
,
370 .ConstantBufferOffset
= 8,
371 .ConstantBufferSize
= 4);
373 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_WM_CHROMAKEY
,
374 .ChromaKeyKillEnable
= false);
375 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_AA_LINE_PARAMETERS
);
377 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_CLIP
,
379 .ViewportXYClipTestEnable
= !(extra
&& extra
->disable_viewport
),
380 .MinimumPointWidth
= 0.125,
381 .MaximumPointWidth
= 255.875);
383 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_WM
,
384 .StatisticsEnable
= true,
385 .LineEndCapAntialiasingRegionWidth
= _05pixels
,
386 .LineAntialiasingRegionWidth
= _10pixels
,
387 .EarlyDepthStencilControl
= NORMAL
,
388 .ForceThreadDispatchEnable
= NORMAL
,
389 .PointRasterizationRule
= RASTRULE_UPPER_RIGHT
,
390 .BarycentricInterpolationMode
=
391 pipeline
->wm_prog_data
.barycentric_interp_modes
);
393 uint32_t samples
= 1;
394 uint32_t log2_samples
= __builtin_ffs(samples
) - 1;
395 bool enable_sampling
= samples
> 1 ? true : false;
397 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_MULTISAMPLE
,
398 .PixelPositionOffsetEnable
= enable_sampling
,
399 .PixelLocation
= CENTER
,
400 .NumberofMultisamples
= log2_samples
);
402 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_SAMPLE_MASK
,
403 .SampleMask
= 0xffff);
405 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_VS
,
406 .VSURBStartingAddress
= pipeline
->urb
.vs_start
,
407 .VSURBEntryAllocationSize
= pipeline
->urb
.vs_size
- 1,
408 .VSNumberofURBEntries
= pipeline
->urb
.nr_vs_entries
);
410 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_GS
,
411 .GSURBStartingAddress
= pipeline
->urb
.gs_start
,
412 .GSURBEntryAllocationSize
= pipeline
->urb
.gs_size
- 1,
413 .GSNumberofURBEntries
= pipeline
->urb
.nr_gs_entries
);
415 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_HS
,
416 .HSURBStartingAddress
= pipeline
->urb
.vs_start
,
417 .HSURBEntryAllocationSize
= 0,
418 .HSNumberofURBEntries
= 0);
420 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_DS
,
421 .DSURBStartingAddress
= pipeline
->urb
.vs_start
,
422 .DSURBEntryAllocationSize
= 0,
423 .DSNumberofURBEntries
= 0);
425 const struct brw_gs_prog_data
*gs_prog_data
= &pipeline
->gs_prog_data
;
427 length
= (gs_prog_data
->base
.vue_map
.num_slots
+ 1) / 2 - offset
;
429 if (pipeline
->gs_vec4
== NO_KERNEL
)
430 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_GS
, .Enable
= false);
432 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_GS
,
433 .SingleProgramFlow
= false,
434 .KernelStartPointer
= pipeline
->gs_vec4
,
435 .VectorMaskEnable
= Dmask
,
437 .BindingTableEntryCount
= 0,
438 .ExpectedVertexCount
= pipeline
->gs_vertex_count
,
440 .ScratchSpaceBasePointer
= pipeline
->scratch_start
[VK_SHADER_STAGE_GEOMETRY
],
441 .PerThreadScratchSpace
= ffs(gs_prog_data
->base
.base
.total_scratch
/ 2048),
443 .OutputVertexSize
= gs_prog_data
->output_vertex_size_hwords
* 2 - 1,
444 .OutputTopology
= gs_prog_data
->output_topology
,
445 .VertexURBEntryReadLength
= gs_prog_data
->base
.urb_read_length
,
446 .DispatchGRFStartRegisterForURBData
=
447 gs_prog_data
->base
.base
.dispatch_grf_start_reg
,
449 .MaximumNumberofThreads
= device
->info
.max_gs_threads
/ 2 - 1,
450 .ControlDataHeaderSize
= gs_prog_data
->control_data_header_size_hwords
,
451 .DispatchMode
= gs_prog_data
->base
.dispatch_mode
,
452 .StatisticsEnable
= true,
453 .IncludePrimitiveID
= gs_prog_data
->include_primitive_id
,
454 .ReorderMode
= TRAILING
,
457 .ControlDataFormat
= gs_prog_data
->control_data_format
,
459 .StaticOutput
= gs_prog_data
->static_vertex_count
>= 0,
460 .StaticOutputVertexCount
=
461 gs_prog_data
->static_vertex_count
>= 0 ?
462 gs_prog_data
->static_vertex_count
: 0,
464 /* FIXME: mesa sets this based on ctx->Transform.ClipPlanesEnabled:
465 * UserClipDistanceClipTestEnableBitmask_3DSTATE_GS(v)
466 * UserClipDistanceCullTestEnableBitmask(v)
469 .VertexURBEntryOutputReadOffset
= offset
,
470 .VertexURBEntryOutputLength
= length
);
472 const struct brw_vue_prog_data
*vue_prog_data
= &pipeline
->vs_prog_data
.base
;
473 /* Skip the VUE header and position slots */
475 length
= (vue_prog_data
->vue_map
.num_slots
+ 1) / 2 - offset
;
477 if (pipeline
->vs_simd8
== NO_KERNEL
|| (extra
&& extra
->disable_vs
))
478 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VS
,
479 .FunctionEnable
= false,
480 /* Even if VS is disabled, SBE still gets the amount of
481 * vertex data to read from this field. */
482 .VertexURBEntryOutputReadOffset
= offset
,
483 .VertexURBEntryOutputLength
= length
);
485 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VS
,
486 .KernelStartPointer
= pipeline
->vs_simd8
,
487 .SingleVertexDispatch
= Multiple
,
488 .VectorMaskEnable
= Dmask
,
490 .BindingTableEntryCount
=
491 vue_prog_data
->base
.binding_table
.size_bytes
/ 4,
492 .ThreadDispatchPriority
= Normal
,
493 .FloatingPointMode
= IEEE754
,
494 .IllegalOpcodeExceptionEnable
= false,
495 .AccessesUAV
= false,
496 .SoftwareExceptionEnable
= false,
498 .ScratchSpaceBasePointer
= pipeline
->scratch_start
[VK_SHADER_STAGE_VERTEX
],
499 .PerThreadScratchSpace
= ffs(vue_prog_data
->base
.total_scratch
/ 2048),
501 .DispatchGRFStartRegisterForURBData
=
502 vue_prog_data
->base
.dispatch_grf_start_reg
,
503 .VertexURBEntryReadLength
= vue_prog_data
->urb_read_length
,
504 .VertexURBEntryReadOffset
= 0,
506 .MaximumNumberofThreads
= device
->info
.max_vs_threads
- 1,
507 .StatisticsEnable
= false,
508 .SIMD8DispatchEnable
= true,
509 .VertexCacheDisable
= false,
510 .FunctionEnable
= true,
512 .VertexURBEntryOutputReadOffset
= offset
,
513 .VertexURBEntryOutputLength
= length
,
514 .UserClipDistanceClipTestEnableBitmask
= 0,
515 .UserClipDistanceCullTestEnableBitmask
= 0);
517 const struct brw_wm_prog_data
*wm_prog_data
= &pipeline
->wm_prog_data
;
519 /* TODO: We should clean this up. Among other things, this is mostly
520 * shared with other gens.
522 const struct brw_vue_map
*fs_input_map
;
523 if (pipeline
->gs_vec4
== NO_KERNEL
)
524 fs_input_map
= &vue_prog_data
->vue_map
;
526 fs_input_map
= &gs_prog_data
->base
.vue_map
;
528 struct GEN8_3DSTATE_SBE_SWIZ swiz
= {
529 GEN8_3DSTATE_SBE_SWIZ_header
,
532 int max_source_attr
= 0;
533 for (int attr
= 0; attr
< VARYING_SLOT_MAX
; attr
++) {
534 int input_index
= wm_prog_data
->urb_setup
[attr
];
539 /* We have to subtract two slots to accout for the URB entry output
540 * read offset in the VS and GS stages.
542 int source_attr
= fs_input_map
->varying_to_slot
[attr
] - 2;
543 max_source_attr
= MAX2(max_source_attr
, source_attr
);
545 if (input_index
>= 16)
548 swiz
.Attribute
[input_index
].SourceAttribute
= source_attr
;
551 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_SBE
,
552 .AttributeSwizzleEnable
= true,
553 .ForceVertexURBEntryReadLength
= false,
554 .ForceVertexURBEntryReadOffset
= false,
555 .VertexURBEntryReadLength
= DIV_ROUND_UP(max_source_attr
+ 1, 2),
556 .PointSpriteTextureCoordinateOrigin
= UPPERLEFT
,
557 .NumberofSFOutputAttributes
=
558 wm_prog_data
->num_varying_inputs
);
560 uint32_t *dw
= anv_batch_emit_dwords(&pipeline
->batch
,
561 GEN8_3DSTATE_SBE_SWIZ_length
);
562 GEN8_3DSTATE_SBE_SWIZ_pack(&pipeline
->batch
, dw
, &swiz
);
564 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_PS
,
565 .KernelStartPointer0
= pipeline
->ps_ksp0
,
567 .SingleProgramFlow
= false,
568 .VectorMaskEnable
= true,
571 .ScratchSpaceBasePointer
= pipeline
->scratch_start
[VK_SHADER_STAGE_FRAGMENT
],
572 .PerThreadScratchSpace
= ffs(wm_prog_data
->base
.total_scratch
/ 2048),
574 .MaximumNumberofThreadsPerPSD
= 64 - 2,
575 .PositionXYOffsetSelect
= wm_prog_data
->uses_pos_offset
?
576 POSOFFSET_SAMPLE
: POSOFFSET_NONE
,
577 .PushConstantEnable
= wm_prog_data
->base
.nr_params
> 0,
578 ._8PixelDispatchEnable
= pipeline
->ps_simd8
!= NO_KERNEL
,
579 ._16PixelDispatchEnable
= pipeline
->ps_simd16
!= NO_KERNEL
,
580 ._32PixelDispatchEnable
= false,
582 .DispatchGRFStartRegisterForConstantSetupData0
= pipeline
->ps_grf_start0
,
583 .DispatchGRFStartRegisterForConstantSetupData1
= 0,
584 .DispatchGRFStartRegisterForConstantSetupData2
= pipeline
->ps_grf_start2
,
586 .KernelStartPointer1
= 0,
587 .KernelStartPointer2
= pipeline
->ps_ksp2
);
589 bool per_sample_ps
= false;
590 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_PS_EXTRA
,
591 .PixelShaderValid
= true,
592 .PixelShaderKillsPixel
= wm_prog_data
->uses_kill
,
593 .PixelShaderComputedDepthMode
= wm_prog_data
->computed_depth_mode
,
594 .AttributeEnable
= wm_prog_data
->num_varying_inputs
> 0,
595 .oMaskPresenttoRenderTarget
= wm_prog_data
->uses_omask
,
596 .PixelShaderIsPerSample
= per_sample_ps
);
598 *pPipeline
= anv_pipeline_to_handle(pipeline
);
603 VkResult
gen8_compute_pipeline_create(
605 const VkComputePipelineCreateInfo
* pCreateInfo
,
606 VkPipeline
* pPipeline
)
608 ANV_FROM_HANDLE(anv_device
, device
, _device
);
609 struct anv_pipeline
*pipeline
;
612 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
);
614 pipeline
= anv_device_alloc(device
, sizeof(*pipeline
), 8,
615 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
616 if (pipeline
== NULL
)
617 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
619 pipeline
->device
= device
;
620 pipeline
->layout
= anv_pipeline_layout_from_handle(pCreateInfo
->layout
);
622 pipeline
->blend_state
.map
= NULL
;
624 result
= anv_reloc_list_init(&pipeline
->batch_relocs
, device
);
625 if (result
!= VK_SUCCESS
) {
626 anv_device_free(device
, pipeline
);
629 pipeline
->batch
.next
= pipeline
->batch
.start
= pipeline
->batch_data
;
630 pipeline
->batch
.end
= pipeline
->batch
.start
+ sizeof(pipeline
->batch_data
);
631 pipeline
->batch
.relocs
= &pipeline
->batch_relocs
;
633 anv_state_stream_init(&pipeline
->program_stream
,
634 &device
->instruction_block_pool
);
636 /* When we free the pipeline, we detect stages based on the NULL status
637 * of various prog_data pointers. Make them NULL by default.
639 memset(pipeline
->prog_data
, 0, sizeof(pipeline
->prog_data
));
640 memset(pipeline
->scratch_start
, 0, sizeof(pipeline
->scratch_start
));
642 pipeline
->vs_simd8
= NO_KERNEL
;
643 pipeline
->vs_vec4
= NO_KERNEL
;
644 pipeline
->gs_vec4
= NO_KERNEL
;
646 pipeline
->active_stages
= 0;
647 pipeline
->total_scratch
= 0;
649 assert(pCreateInfo
->stage
.stage
== VK_SHADER_STAGE_COMPUTE
);
650 ANV_FROM_HANDLE(anv_shader
, shader
, pCreateInfo
->stage
.shader
);
651 anv_pipeline_compile_cs(pipeline
, pCreateInfo
, shader
);
653 pipeline
->use_repclear
= false;
655 const struct brw_cs_prog_data
*cs_prog_data
= &pipeline
->cs_prog_data
;
657 anv_batch_emit(&pipeline
->batch
, GEN8_MEDIA_VFE_STATE
,
658 .ScratchSpaceBasePointer
= pipeline
->scratch_start
[VK_SHADER_STAGE_COMPUTE
],
659 .PerThreadScratchSpace
= ffs(cs_prog_data
->base
.total_scratch
/ 2048),
660 .ScratchSpaceBasePointerHigh
= 0,
663 .MaximumNumberofThreads
= device
->info
.max_cs_threads
- 1,
664 .NumberofURBEntries
= 2,
665 .ResetGatewayTimer
= true,
666 .BypassGatewayControl
= true,
667 .URBEntryAllocationSize
= 2,
668 .CURBEAllocationSize
= 0);
670 struct brw_cs_prog_data
*prog_data
= &pipeline
->cs_prog_data
;
671 uint32_t group_size
= prog_data
->local_size
[0] *
672 prog_data
->local_size
[1] * prog_data
->local_size
[2];
673 pipeline
->cs_thread_width_max
= DIV_ROUND_UP(group_size
, prog_data
->simd_size
);
674 uint32_t remainder
= group_size
& (prog_data
->simd_size
- 1);
677 pipeline
->cs_right_mask
= ~0u >> (32 - remainder
);
679 pipeline
->cs_right_mask
= ~0u >> (32 - prog_data
->simd_size
);
682 *pPipeline
= anv_pipeline_to_handle(pipeline
);