anv/format: handle unsupported formats earlier
[mesa.git] / src / intel / vulkan / gen7_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
34
35 #include "genX_pipeline_util.h"
36
37 VkResult
38 genX(graphics_pipeline_create)(
39 VkDevice _device,
40 struct anv_pipeline_cache * cache,
41 const VkGraphicsPipelineCreateInfo* pCreateInfo,
42 const VkAllocationCallbacks* pAllocator,
43 VkPipeline* pPipeline)
44 {
45 ANV_FROM_HANDLE(anv_device, device, _device);
46 ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
47 const struct anv_physical_device *physical_device =
48 &device->instance->physicalDevice;
49 const struct gen_device_info *devinfo = &physical_device->info;
50 struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
51 struct anv_pipeline *pipeline;
52 VkResult result;
53
54 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
55
56 pipeline = vk_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
57 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
58 if (pipeline == NULL)
59 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
60
61 result = anv_pipeline_init(pipeline, device, cache,
62 pCreateInfo, pAllocator);
63 if (result != VK_SUCCESS) {
64 vk_free2(&device->alloc, pAllocator, pipeline);
65 return result;
66 }
67
68 assert(pCreateInfo->pVertexInputState);
69 emit_vertex_input(pipeline, pCreateInfo->pVertexInputState);
70
71 assert(pCreateInfo->pRasterizationState);
72 emit_rs_state(pipeline, pCreateInfo->pRasterizationState,
73 pCreateInfo->pMultisampleState, pass, subpass);
74
75 emit_ds_state(pipeline, pCreateInfo->pDepthStencilState, pass, subpass);
76
77 emit_cb_state(pipeline, pCreateInfo->pColorBlendState,
78 pCreateInfo->pMultisampleState);
79
80 emit_urb_setup(pipeline);
81
82 emit_3dstate_clip(pipeline, pCreateInfo->pViewportState,
83 pCreateInfo->pRasterizationState);
84 emit_3dstate_streamout(pipeline, pCreateInfo->pRasterizationState);
85
86 emit_ms_state(pipeline, pCreateInfo->pMultisampleState);
87
88 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
89
90 #if 0
91 /* From gen7_vs_state.c */
92
93 /**
94 * From Graphics BSpec: 3D-Media-GPGPU Engine > 3D Pipeline Stages >
95 * Geometry > Geometry Shader > State:
96 *
97 * "Note: Because of corruption in IVB:GT2, software needs to flush the
98 * whole fixed function pipeline when the GS enable changes value in
99 * the 3DSTATE_GS."
100 *
101 * The hardware architects have clarified that in this context "flush the
102 * whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS
103 * Stall" bit set.
104 */
105 if (!brw->is_haswell && !brw->is_baytrail)
106 gen7_emit_vs_workaround_flush(brw);
107 #endif
108
109 if (pipeline->vs_vec4 == NO_KERNEL)
110 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs);
111 else
112 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) {
113 vs.KernelStartPointer = pipeline->vs_vec4;
114
115 vs.ScratchSpaceBasePointer = (struct anv_address) {
116 .bo = anv_scratch_pool_alloc(device, &device->scratch_pool,
117 MESA_SHADER_VERTEX,
118 vs_prog_data->base.base.total_scratch),
119 .offset = 0,
120 };
121 vs.PerThreadScratchSpace = scratch_space(&vs_prog_data->base.base);
122
123 vs.DispatchGRFStartRegisterforURBData =
124 vs_prog_data->base.base.dispatch_grf_start_reg;
125
126 vs.VertexURBEntryReadLength = vs_prog_data->base.urb_read_length;
127 vs.VertexURBEntryReadOffset = 0;
128 vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
129 vs.StatisticsEnable = true;
130 vs.VSFunctionEnable = true;
131 }
132
133 const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
134
135 if (pipeline->gs_kernel == NO_KERNEL) {
136 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs);
137 } else {
138 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs) {
139 gs.KernelStartPointer = pipeline->gs_kernel;
140
141 gs.ScratchSpaceBasePointer = (struct anv_address) {
142 .bo = anv_scratch_pool_alloc(device, &device->scratch_pool,
143 MESA_SHADER_GEOMETRY,
144 gs_prog_data->base.base.total_scratch),
145 .offset = 0,
146 };
147 gs.PerThreadScratchSpace = scratch_space(&gs_prog_data->base.base);
148
149 gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
150 gs.OutputTopology = gs_prog_data->output_topology;
151 gs.VertexURBEntryReadLength = gs_prog_data->base.urb_read_length;
152 gs.IncludeVertexHandles = gs_prog_data->base.include_vue_handles;
153
154 gs.DispatchGRFStartRegisterforURBData =
155 gs_prog_data->base.base.dispatch_grf_start_reg;
156
157 gs.MaximumNumberofThreads = devinfo->max_gs_threads - 1;
158 /* This in the next dword on HSW. */
159 gs.ControlDataFormat = gs_prog_data->control_data_format;
160 gs.ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords;
161 gs.InstanceControl = MAX2(gs_prog_data->invocations, 1) - 1;
162 gs.DispatchMode = gs_prog_data->base.dispatch_mode;
163 gs.GSStatisticsEnable = true;
164 gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
165 # if (GEN_IS_HASWELL)
166 gs.ReorderMode = REORDER_TRAILING;
167 # else
168 gs.ReorderEnable = true;
169 # endif
170 gs.GSEnable = true;
171 }
172 }
173
174 if (pipeline->ps_ksp0 == NO_KERNEL) {
175 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE), sbe);
176
177 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM), wm) {
178 wm.StatisticsEnable = true;
179 wm.ThreadDispatchEnable = false;
180 wm.LineEndCapAntialiasingRegionWidth = 0; /* 0.5 pixels */
181 wm.LineAntialiasingRegionWidth = 1; /* 1.0 pixels */
182 wm.EarlyDepthStencilControl = EDSC_NORMAL;
183 wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
184 }
185
186 /* Even if no fragments are ever dispatched, the hardware hangs if we
187 * don't at least set the maximum number of threads.
188 */
189 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) {
190 ps.MaximumNumberofThreads = devinfo->max_wm_threads - 1;
191 }
192 } else {
193 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
194 if (wm_prog_data->urb_setup[VARYING_SLOT_BFC0] != -1 ||
195 wm_prog_data->urb_setup[VARYING_SLOT_BFC1] != -1)
196 anv_finishme("two-sided color needs sbe swizzling setup");
197 if (wm_prog_data->urb_setup[VARYING_SLOT_PRIMITIVE_ID] != -1)
198 anv_finishme("primitive_id needs sbe swizzling setup");
199
200 emit_3dstate_sbe(pipeline);
201
202 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) {
203 ps.KernelStartPointer0 = pipeline->ps_ksp0;
204 ps.KernelStartPointer1 = 0;
205 ps.KernelStartPointer2 = pipeline->ps_ksp0 + wm_prog_data->prog_offset_2;
206
207 ps.ScratchSpaceBasePointer = (struct anv_address) {
208 .bo = anv_scratch_pool_alloc(device, &device->scratch_pool,
209 MESA_SHADER_FRAGMENT,
210 wm_prog_data->base.total_scratch),
211 .offset = 0,
212 };
213 ps.PerThreadScratchSpace = scratch_space(&wm_prog_data->base);
214 ps.MaximumNumberofThreads = devinfo->max_wm_threads - 1;
215 ps.PushConstantEnable = wm_prog_data->base.nr_params > 0;
216 ps.AttributeEnable = wm_prog_data->num_varying_inputs > 0;
217 ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
218
219 ps.RenderTargetFastClearEnable = false;
220 ps.DualSourceBlendEnable = false;
221 ps.RenderTargetResolveEnable = false;
222
223 ps.PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ?
224 POSOFFSET_SAMPLE : POSOFFSET_NONE;
225
226 ps._32PixelDispatchEnable = false;
227 ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
228 ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
229
230 ps.DispatchGRFStartRegisterforConstantSetupData0 =
231 wm_prog_data->base.dispatch_grf_start_reg,
232 ps.DispatchGRFStartRegisterforConstantSetupData1 = 0,
233 ps.DispatchGRFStartRegisterforConstantSetupData2 =
234 wm_prog_data->dispatch_grf_start_reg_2;
235
236 /* Haswell requires the sample mask to be set in this packet as well as
237 * in 3DSTATE_SAMPLE_MASK; the values should match. */
238 /* _NEW_BUFFERS, _NEW_MULTISAMPLE */
239 #if GEN_IS_HASWELL
240 ps.SampleMask = 0xff;
241 #endif
242 }
243
244 uint32_t samples = pCreateInfo->pMultisampleState ?
245 pCreateInfo->pMultisampleState->rasterizationSamples : 1;
246
247 /* FIXME-GEN7: This needs a lot more work, cf gen7 upload_wm_state(). */
248 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM), wm) {
249 wm.StatisticsEnable = true;
250 wm.ThreadDispatchEnable = true;
251 wm.LineEndCapAntialiasingRegionWidth = 0; /* 0.5 pixels */
252 wm.LineAntialiasingRegionWidth = 1; /* 1.0 pixels */
253 wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
254 wm.PixelShaderKillPixel = wm_prog_data->uses_kill;
255 wm.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
256 wm.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
257 wm.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
258 wm.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask;
259
260 if (wm_prog_data->early_fragment_tests) {
261 wm.EarlyDepthStencilControl = EDSC_PREPS;
262 } else if (wm_prog_data->has_side_effects) {
263 wm.EarlyDepthStencilControl = EDSC_PSEXEC;
264 } else {
265 wm.EarlyDepthStencilControl = EDSC_NORMAL;
266 }
267
268 wm.BarycentricInterpolationMode = wm_prog_data->barycentric_interp_modes;
269
270 wm.MultisampleRasterizationMode = samples > 1 ?
271 MSRASTMODE_ON_PATTERN : MSRASTMODE_OFF_PIXEL;
272 wm.MultisampleDispatchMode = ((samples == 1) ||
273 (samples > 1 && wm_prog_data->persample_dispatch)) ?
274 MSDISPMODE_PERSAMPLE : MSDISPMODE_PERPIXEL;
275 }
276 }
277
278 *pPipeline = anv_pipeline_to_handle(pipeline);
279
280 return VK_SUCCESS;
281 }