2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
35 #include "genX_pipeline_util.h"
38 genX(graphics_pipeline_create
)(
40 struct anv_pipeline_cache
* cache
,
41 const VkGraphicsPipelineCreateInfo
* pCreateInfo
,
42 const struct anv_graphics_pipeline_create_info
*extra
,
43 const VkAllocationCallbacks
* pAllocator
,
44 VkPipeline
* pPipeline
)
46 ANV_FROM_HANDLE(anv_device
, device
, _device
);
47 ANV_FROM_HANDLE(anv_render_pass
, pass
, pCreateInfo
->renderPass
);
48 struct anv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
49 struct anv_pipeline
*pipeline
;
52 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
);
54 pipeline
= anv_alloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
55 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
57 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
59 result
= anv_pipeline_init(pipeline
, device
, cache
,
60 pCreateInfo
, extra
, pAllocator
);
61 if (result
!= VK_SUCCESS
) {
62 anv_free2(&device
->alloc
, pAllocator
, pipeline
);
66 assert(pCreateInfo
->pVertexInputState
);
67 emit_vertex_input(pipeline
, pCreateInfo
->pVertexInputState
, extra
);
69 assert(pCreateInfo
->pRasterizationState
);
70 emit_rs_state(pipeline
, pCreateInfo
->pRasterizationState
, extra
);
72 emit_ds_state(pipeline
, pCreateInfo
->pDepthStencilState
, pass
, subpass
);
74 emit_cb_state(pipeline
, pCreateInfo
->pColorBlendState
,
75 pCreateInfo
->pMultisampleState
);
77 emit_urb_setup(pipeline
);
79 emit_3dstate_clip(pipeline
, pCreateInfo
->pViewportState
,
80 pCreateInfo
->pRasterizationState
, extra
);
81 emit_3dstate_streamout(pipeline
, pCreateInfo
->pRasterizationState
);
83 if (pCreateInfo
->pMultisampleState
&&
84 pCreateInfo
->pMultisampleState
->rasterizationSamples
> 1)
85 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO");
88 uint32_t log2_samples
= __builtin_ffs(samples
) - 1;
90 anv_batch_emit(&pipeline
->batch
, GENX(3DSTATE_MULTISAMPLE
), ms
) {
91 ms
.PixelLocation
= PIXLOC_CENTER
;
92 ms
.NumberofMultisamples
= log2_samples
;
95 anv_batch_emit(&pipeline
->batch
, GENX(3DSTATE_SAMPLE_MASK
), sm
) {
99 const struct brw_vs_prog_data
*vs_prog_data
= get_vs_prog_data(pipeline
);
102 /* From gen7_vs_state.c */
105 * From Graphics BSpec: 3D-Media-GPGPU Engine > 3D Pipeline Stages >
106 * Geometry > Geometry Shader > State:
108 * "Note: Because of corruption in IVB:GT2, software needs to flush the
109 * whole fixed function pipeline when the GS enable changes value in
112 * The hardware architects have clarified that in this context "flush the
113 * whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS
116 if (!brw
->is_haswell
&& !brw
->is_baytrail
)
117 gen7_emit_vs_workaround_flush(brw
);
120 if (pipeline
->vs_vec4
== NO_KERNEL
|| (extra
&& extra
->disable_vs
))
121 anv_batch_emit(&pipeline
->batch
, GENX(3DSTATE_VS
), vs
);
123 anv_batch_emit(&pipeline
->batch
, GENX(3DSTATE_VS
), vs
) {
124 vs
.KernelStartPointer
= pipeline
->vs_vec4
;
126 vs
.ScratchSpaceBasePointer
= (struct anv_address
) {
127 .bo
= anv_scratch_pool_alloc(device
, &device
->scratch_pool
,
129 vs_prog_data
->base
.base
.total_scratch
),
132 vs
.PerThreadScratchSpace
= scratch_space(&vs_prog_data
->base
.base
);
134 vs
.DispatchGRFStartRegisterforURBData
=
135 vs_prog_data
->base
.base
.dispatch_grf_start_reg
;
137 vs
.VertexURBEntryReadLength
= vs_prog_data
->base
.urb_read_length
;
138 vs
.VertexURBEntryReadOffset
= 0;
139 vs
.MaximumNumberofThreads
= device
->info
.max_vs_threads
- 1;
140 vs
.StatisticsEnable
= true;
141 vs
.VSFunctionEnable
= true;
144 const struct brw_gs_prog_data
*gs_prog_data
= get_gs_prog_data(pipeline
);
146 if (pipeline
->gs_kernel
== NO_KERNEL
|| (extra
&& extra
->disable_vs
)) {
147 anv_batch_emit(&pipeline
->batch
, GENX(3DSTATE_GS
), gs
);
149 anv_batch_emit(&pipeline
->batch
, GENX(3DSTATE_GS
), gs
) {
150 gs
.KernelStartPointer
= pipeline
->gs_kernel
;
152 gs
.ScratchSpaceBasePointer
= (struct anv_address
) {
153 .bo
= anv_scratch_pool_alloc(device
, &device
->scratch_pool
,
154 MESA_SHADER_GEOMETRY
,
155 gs_prog_data
->base
.base
.total_scratch
),
158 gs
.PerThreadScratchSpace
= scratch_space(&gs_prog_data
->base
.base
);
160 gs
.OutputVertexSize
= gs_prog_data
->output_vertex_size_hwords
* 2 - 1;
161 gs
.OutputTopology
= gs_prog_data
->output_topology
;
162 gs
.VertexURBEntryReadLength
= gs_prog_data
->base
.urb_read_length
;
163 gs
.IncludeVertexHandles
= gs_prog_data
->base
.include_vue_handles
;
165 gs
.DispatchGRFStartRegisterforURBData
=
166 gs_prog_data
->base
.base
.dispatch_grf_start_reg
;
168 gs
.MaximumNumberofThreads
= device
->info
.max_gs_threads
- 1;
169 /* This in the next dword on HSW. */
170 gs
.ControlDataFormat
= gs_prog_data
->control_data_format
;
171 gs
.ControlDataHeaderSize
= gs_prog_data
->control_data_header_size_hwords
;
172 gs
.InstanceControl
= MAX2(gs_prog_data
->invocations
, 1) - 1;
173 gs
.DispatchMode
= gs_prog_data
->base
.dispatch_mode
;
174 gs
.GSStatisticsEnable
= true;
175 gs
.IncludePrimitiveID
= gs_prog_data
->include_primitive_id
;
176 # if (GEN_IS_HASWELL)
177 gs
.ReorderMode
= REORDER_TRAILING
;
179 gs
.ReorderEnable
= true;
185 if (pipeline
->ps_ksp0
== NO_KERNEL
) {
186 anv_batch_emit(&pipeline
->batch
, GENX(3DSTATE_SBE
), sbe
);
188 anv_batch_emit(&pipeline
->batch
, GENX(3DSTATE_WM
), wm
) {
189 wm
.StatisticsEnable
= true;
190 wm
.ThreadDispatchEnable
= false;
191 wm
.LineEndCapAntialiasingRegionWidth
= 0; /* 0.5 pixels */
192 wm
.LineAntialiasingRegionWidth
= 1; /* 1.0 pixels */
193 wm
.EarlyDepthStencilControl
= EDSC_NORMAL
;
194 wm
.PointRasterizationRule
= RASTRULE_UPPER_RIGHT
;
197 /* Even if no fragments are ever dispatched, the hardware hangs if we
198 * don't at least set the maximum number of threads.
200 anv_batch_emit(&pipeline
->batch
, GENX(3DSTATE_PS
), ps
) {
201 ps
.MaximumNumberofThreads
= device
->info
.max_wm_threads
- 1;
204 const struct brw_wm_prog_data
*wm_prog_data
= get_wm_prog_data(pipeline
);
205 if (wm_prog_data
->urb_setup
[VARYING_SLOT_BFC0
] != -1 ||
206 wm_prog_data
->urb_setup
[VARYING_SLOT_BFC1
] != -1)
207 anv_finishme("two-sided color needs sbe swizzling setup");
208 if (wm_prog_data
->urb_setup
[VARYING_SLOT_PRIMITIVE_ID
] != -1)
209 anv_finishme("primitive_id needs sbe swizzling setup");
211 emit_3dstate_sbe(pipeline
);
213 anv_batch_emit(&pipeline
->batch
, GENX(3DSTATE_PS
), ps
) {
214 ps
.KernelStartPointer0
= pipeline
->ps_ksp0
;
216 ps
.ScratchSpaceBasePointer
= (struct anv_address
) {
217 .bo
= anv_scratch_pool_alloc(device
, &device
->scratch_pool
,
218 MESA_SHADER_FRAGMENT
,
219 wm_prog_data
->base
.total_scratch
),
222 ps
.PerThreadScratchSpace
= scratch_space(&wm_prog_data
->base
);
223 ps
.MaximumNumberofThreads
= device
->info
.max_wm_threads
- 1;
224 ps
.PushConstantEnable
= wm_prog_data
->base
.nr_params
> 0;
225 ps
.AttributeEnable
= wm_prog_data
->num_varying_inputs
> 0;
226 ps
.oMaskPresenttoRenderTarget
= wm_prog_data
->uses_omask
;
228 ps
.RenderTargetFastClearEnable
= false;
229 ps
.DualSourceBlendEnable
= false;
230 ps
.RenderTargetResolveEnable
= false;
232 ps
.PositionXYOffsetSelect
= wm_prog_data
->uses_pos_offset
?
233 POSOFFSET_SAMPLE
: POSOFFSET_NONE
;
235 ps
._32PixelDispatchEnable
= false;
236 ps
._16PixelDispatchEnable
= wm_prog_data
->dispatch_16
;
237 ps
._8PixelDispatchEnable
= wm_prog_data
->dispatch_8
;
239 ps
.DispatchGRFStartRegisterforConstantSetupData0
=
240 wm_prog_data
->base
.dispatch_grf_start_reg
,
241 ps
.DispatchGRFStartRegisterforConstantSetupData1
= 0,
242 ps
.DispatchGRFStartRegisterforConstantSetupData2
=
243 wm_prog_data
->dispatch_grf_start_reg_2
,
245 /* Haswell requires the sample mask to be set in this packet as well as
246 * in 3DSTATE_SAMPLE_MASK; the values should match. */
247 /* _NEW_BUFFERS, _NEW_MULTISAMPLE */
249 ps
.KernelStartPointer1
= 0;
250 ps
.KernelStartPointer2
= pipeline
->ps_ksp0
+ wm_prog_data
->prog_offset_2
;
253 /* FIXME-GEN7: This needs a lot more work, cf gen7 upload_wm_state(). */
254 anv_batch_emit(&pipeline
->batch
, GENX(3DSTATE_WM
), wm
) {
255 wm
.StatisticsEnable
= true;
256 wm
.ThreadDispatchEnable
= true;
257 wm
.LineEndCapAntialiasingRegionWidth
= 0; /* 0.5 pixels */
258 wm
.LineAntialiasingRegionWidth
= 1; /* 1.0 pixels */
259 wm
.PointRasterizationRule
= RASTRULE_UPPER_RIGHT
;
260 wm
.PixelShaderKillPixel
= wm_prog_data
->uses_kill
;
261 wm
.PixelShaderComputedDepthMode
= wm_prog_data
->computed_depth_mode
;
262 wm
.PixelShaderUsesSourceDepth
= wm_prog_data
->uses_src_depth
;
263 wm
.PixelShaderUsesSourceW
= wm_prog_data
->uses_src_w
;
264 wm
.PixelShaderUsesInputCoverageMask
= wm_prog_data
->uses_sample_mask
;
266 if (wm_prog_data
->early_fragment_tests
) {
267 wm
.EarlyDepthStencilControl
= EDSC_PREPS
;
268 } else if (wm_prog_data
->has_side_effects
) {
269 wm
.EarlyDepthStencilControl
= EDSC_PSEXEC
;
271 wm
.EarlyDepthStencilControl
= EDSC_NORMAL
;
274 wm
.BarycentricInterpolationMode
= wm_prog_data
->barycentric_interp_modes
;
278 *pPipeline
= anv_pipeline_to_handle(pipeline
);