i965/fs: Add an allow_spilling flag to brw_compile_fs
[mesa.git] / src / mesa / drivers / dri / i965 / brw_blorp.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <errno.h>
25 #include "intel_batchbuffer.h"
26 #include "intel_fbo.h"
27
28 #include "brw_blorp.h"
29 #include "brw_compiler.h"
30 #include "brw_nir.h"
31 #include "brw_state.h"
32
33 #define FILE_DEBUG_FLAG DEBUG_BLORP
34
35 void
36 brw_blorp_surface_info_init(struct brw_context *brw,
37 struct brw_blorp_surface_info *info,
38 struct intel_mipmap_tree *mt,
39 unsigned int level, unsigned int layer,
40 mesa_format format, bool is_render_target)
41 {
42 /* Layer is a physical layer, so if this is a 2D multisample array texture
43 * using INTEL_MSAA_LAYOUT_UMS or INTEL_MSAA_LAYOUT_CMS, then it had better
44 * be a multiple of num_samples.
45 */
46 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
47 mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
48 assert(mt->num_samples <= 1 || layer % mt->num_samples == 0);
49 }
50
51 intel_miptree_check_level_layer(mt, level, layer);
52
53 info->mt = mt;
54 info->level = level;
55 info->layer = layer;
56 info->width = minify(mt->physical_width0, level - mt->first_level);
57 info->height = minify(mt->physical_height0, level - mt->first_level);
58
59 intel_miptree_get_image_offset(mt, level, layer,
60 &info->x_offset, &info->y_offset);
61
62 info->num_samples = mt->num_samples;
63 info->array_layout = mt->array_layout;
64 info->map_stencil_as_y_tiled = false;
65 info->msaa_layout = mt->msaa_layout;
66 info->swizzle = SWIZZLE_XYZW;
67
68 if (format == MESA_FORMAT_NONE)
69 format = mt->format;
70
71 switch (format) {
72 case MESA_FORMAT_S_UINT8:
73 /* The miptree is a W-tiled stencil buffer. Surface states can't be set
74 * up for W tiling, so we'll need to use Y tiling and have the WM
75 * program swizzle the coordinates.
76 */
77 info->map_stencil_as_y_tiled = true;
78 info->brw_surfaceformat = brw->gen >= 8 ? BRW_SURFACEFORMAT_R8_UINT :
79 BRW_SURFACEFORMAT_R8_UNORM;
80 break;
81 case MESA_FORMAT_Z24_UNORM_X8_UINT:
82 /* It would make sense to use BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS
83 * here, but unfortunately it isn't supported as a render target, which
84 * would prevent us from blitting to 24-bit depth.
85 *
86 * The miptree consists of 32 bits per pixel, arranged as 24-bit depth
87 * values interleaved with 8 "don't care" bits. Since depth values don't
88 * require any blending, it doesn't matter how we interpret the bit
89 * pattern as long as we copy the right amount of data, so just map it
90 * as 8-bit BGRA.
91 */
92 info->brw_surfaceformat = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
93 break;
94 case MESA_FORMAT_Z_FLOAT32:
95 info->brw_surfaceformat = BRW_SURFACEFORMAT_R32_FLOAT;
96 break;
97 case MESA_FORMAT_Z_UNORM16:
98 info->brw_surfaceformat = BRW_SURFACEFORMAT_R16_UNORM;
99 break;
100 default: {
101 if (is_render_target) {
102 assert(brw->format_supported_as_render_target[format]);
103 info->brw_surfaceformat = brw->render_target_format[format];
104 } else {
105 info->brw_surfaceformat = brw_format_for_mesa_format(format);
106 }
107 break;
108 }
109 }
110 }
111
112
113 /**
114 * Split x_offset and y_offset into a base offset (in bytes) and a remaining
115 * x/y offset (in pixels). Note: we can't do this by calling
116 * intel_renderbuffer_tile_offsets(), because the offsets may have been
117 * adjusted to account for Y vs. W tiling differences. So we compute it
118 * directly from the adjusted offsets.
119 */
120 uint32_t
121 brw_blorp_compute_tile_offsets(const struct brw_blorp_surface_info *info,
122 uint32_t *tile_x, uint32_t *tile_y)
123 {
124 uint32_t mask_x, mask_y;
125
126 intel_get_tile_masks(info->mt->tiling, info->mt->tr_mode, info->mt->cpp,
127 info->map_stencil_as_y_tiled,
128 &mask_x, &mask_y);
129
130 *tile_x = info->x_offset & mask_x;
131 *tile_y = info->y_offset & mask_y;
132
133 return intel_miptree_get_aligned_offset(info->mt, info->x_offset & ~mask_x,
134 info->y_offset & ~mask_y,
135 info->map_stencil_as_y_tiled);
136 }
137
138
139 void
140 brw_blorp_params_init(struct brw_blorp_params *params)
141 {
142 memset(params, 0, sizeof(*params));
143 params->hiz_op = GEN6_HIZ_OP_NONE;
144 params->fast_clear_op = 0;
145 params->num_varyings = 0;
146 params->num_draw_buffers = 1;
147 params->num_layers = 1;
148 }
149
150 void
151 brw_blorp_init_wm_prog_key(struct brw_wm_prog_key *wm_key)
152 {
153 memset(wm_key, 0, sizeof(*wm_key));
154 wm_key->nr_color_regions = 1;
155 for (int i = 0; i < MAX_SAMPLERS; i++)
156 wm_key->tex.swizzles[i] = SWIZZLE_XYZW;
157 }
158
159 static int
160 nir_uniform_type_size(const struct glsl_type *type)
161 {
162 /* Only very basic types are allowed */
163 assert(glsl_type_is_vector_or_scalar(type));
164 assert(glsl_get_bit_size(type) == 32);
165
166 return glsl_get_vector_elements(type) * 4;
167 }
168
169 const unsigned *
170 brw_blorp_compile_nir_shader(struct brw_context *brw, struct nir_shader *nir,
171 const struct brw_wm_prog_key *wm_key,
172 bool use_repclear,
173 struct brw_blorp_prog_data *prog_data,
174 unsigned *program_size)
175 {
176 const struct brw_compiler *compiler = brw->intelScreen->compiler;
177
178 void *mem_ctx = ralloc_context(NULL);
179
180 /* Calling brw_preprocess_nir and friends is destructive and, if cloning is
181 * enabled, may end up completely replacing the nir_shader. Therefore, we
182 * own it and might as well put it in our context for easy cleanup.
183 */
184 ralloc_steal(mem_ctx, nir);
185 nir->options =
186 compiler->glsl_compiler_options[MESA_SHADER_FRAGMENT].NirOptions;
187
188 struct brw_wm_prog_data wm_prog_data;
189 memset(&wm_prog_data, 0, sizeof(wm_prog_data));
190
191 /* We set up the params array but instead of making them point at actual
192 * GL constant values, they just store an index. This is just fine as the
193 * backend compiler never looks at the contents of the pointers, it just
194 * re-arranges them for us.
195 */
196 const union gl_constant_value *param[BRW_BLORP_NUM_PUSH_CONSTANT_DWORDS];
197 for (unsigned i = 0; i < ARRAY_SIZE(param); i++)
198 param[i] = (const union gl_constant_value *)(intptr_t)i;
199
200 wm_prog_data.base.nr_params = BRW_BLORP_NUM_PUSH_CONSTANT_DWORDS;
201 wm_prog_data.base.param = param;
202
203 /* BLORP always just uses the first two binding table entries */
204 wm_prog_data.binding_table.render_target_start = 0;
205 wm_prog_data.base.binding_table.texture_start = 1;
206
207 nir = brw_preprocess_nir(compiler, nir);
208 nir_remove_dead_variables(nir, nir_var_shader_in);
209 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir)->impl);
210
211 /* Uniforms are required to be lowered before going into compile_fs. For
212 * BLORP, we'll assume that whoever builds the shader sets the location
213 * they want so we just need to lower them and figure out how many we have
214 * in total.
215 */
216 nir->num_uniforms = 0;
217 nir_foreach_variable(var, &nir->uniforms) {
218 var->data.driver_location = var->data.location;
219 unsigned end = var->data.location + nir_uniform_type_size(var->type);
220 nir->num_uniforms = MAX2(nir->num_uniforms, end);
221 }
222 nir_lower_io(nir, nir_var_uniform, nir_uniform_type_size);
223
224 const unsigned *program =
225 brw_compile_fs(compiler, brw, mem_ctx, wm_key, &wm_prog_data, nir,
226 NULL, -1, -1, false, use_repclear, program_size, NULL);
227
228 /* Copy the relavent bits of wm_prog_data over into the blorp prog data */
229 prog_data->dispatch_8 = wm_prog_data.dispatch_8;
230 prog_data->dispatch_16 = wm_prog_data.dispatch_16;
231 prog_data->first_curbe_grf_0 = wm_prog_data.base.dispatch_grf_start_reg;
232 prog_data->first_curbe_grf_2 = wm_prog_data.dispatch_grf_start_reg_2;
233 prog_data->ksp_offset_2 = wm_prog_data.prog_offset_2;
234 prog_data->persample_msaa_dispatch = wm_prog_data.persample_dispatch;
235
236 prog_data->nr_params = wm_prog_data.base.nr_params;
237 for (unsigned i = 0; i < ARRAY_SIZE(param); i++)
238 prog_data->param[i] = (uintptr_t)wm_prog_data.base.param[i];
239
240 return program;
241 }
242
243 /**
244 * Perform a HiZ or depth resolve operation.
245 *
246 * For an overview of HiZ ops, see the following sections of the Sandy Bridge
247 * PRM, Volume 1, Part 2:
248 * - 7.5.3.1 Depth Buffer Clear
249 * - 7.5.3.2 Depth Buffer Resolve
250 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
251 */
252 void
253 intel_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
254 unsigned int level, unsigned int layer, enum gen6_hiz_op op)
255 {
256 const char *opname = NULL;
257
258 switch (op) {
259 case GEN6_HIZ_OP_DEPTH_RESOLVE:
260 opname = "depth resolve";
261 break;
262 case GEN6_HIZ_OP_HIZ_RESOLVE:
263 opname = "hiz ambiguate";
264 break;
265 case GEN6_HIZ_OP_DEPTH_CLEAR:
266 opname = "depth clear";
267 break;
268 case GEN6_HIZ_OP_NONE:
269 opname = "noop?";
270 break;
271 }
272
273 DBG("%s %s to mt %p level %d layer %d\n",
274 __func__, opname, mt, level, layer);
275
276 if (brw->gen >= 8) {
277 gen8_hiz_exec(brw, mt, level, layer, op);
278 } else {
279 gen6_blorp_hiz_exec(brw, mt, level, layer, op);
280 }
281 }
282
283 void
284 brw_blorp_exec(struct brw_context *brw, const struct brw_blorp_params *params)
285 {
286 struct gl_context *ctx = &brw->ctx;
287 const uint32_t estimated_max_batch_usage = brw->gen >= 8 ? 1800 : 1500;
288 bool check_aperture_failed_once = false;
289
290 /* Flush the sampler and render caches. We definitely need to flush the
291 * sampler cache so that we get updated contents from the render cache for
292 * the glBlitFramebuffer() source. Also, we are sometimes warned in the
293 * docs to flush the cache between reinterpretations of the same surface
294 * data with different formats, which blorp does for stencil and depth
295 * data.
296 */
297 brw_emit_mi_flush(brw);
298
299 brw_select_pipeline(brw, BRW_RENDER_PIPELINE);
300
301 retry:
302 intel_batchbuffer_require_space(brw, estimated_max_batch_usage, RENDER_RING);
303 intel_batchbuffer_save_state(brw);
304 drm_intel_bo *saved_bo = brw->batch.bo;
305 uint32_t saved_used = USED_BATCH(brw->batch);
306 uint32_t saved_state_batch_offset = brw->batch.state_batch_offset;
307
308 switch (brw->gen) {
309 case 6:
310 gen6_blorp_exec(brw, params);
311 break;
312 case 7:
313 gen7_blorp_exec(brw, params);
314 break;
315 case 8:
316 case 9:
317 gen8_blorp_exec(brw, params);
318 break;
319 default:
320 /* BLORP is not supported before Gen6. */
321 unreachable("not reached");
322 }
323
324 /* Make sure we didn't wrap the batch unintentionally, and make sure we
325 * reserved enough space that a wrap will never happen.
326 */
327 assert(brw->batch.bo == saved_bo);
328 assert((USED_BATCH(brw->batch) - saved_used) * 4 +
329 (saved_state_batch_offset - brw->batch.state_batch_offset) <
330 estimated_max_batch_usage);
331 /* Shut up compiler warnings on release build */
332 (void)saved_bo;
333 (void)saved_used;
334 (void)saved_state_batch_offset;
335
336 /* Check if the blorp op we just did would make our batch likely to fail to
337 * map all the BOs into the GPU at batch exec time later. If so, flush the
338 * batch and try again with nothing else in the batch.
339 */
340 if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
341 if (!check_aperture_failed_once) {
342 check_aperture_failed_once = true;
343 intel_batchbuffer_reset_to_saved(brw);
344 intel_batchbuffer_flush(brw);
345 goto retry;
346 } else {
347 int ret = intel_batchbuffer_flush(brw);
348 WARN_ONCE(ret == -ENOSPC,
349 "i965: blorp emit exceeded available aperture space\n");
350 }
351 }
352
353 if (unlikely(brw->always_flush_batch))
354 intel_batchbuffer_flush(brw);
355
356 /* We've smashed all state compared to what the normal 3D pipeline
357 * rendering tracks for GL.
358 */
359 brw->ctx.NewDriverState |= BRW_NEW_BLORP;
360 brw->no_depth_or_stencil = false;
361 brw->ib.type = -1;
362
363 /* Flush the sampler cache so any texturing from the destination is
364 * coherent.
365 */
366 brw_emit_mi_flush(brw);
367 }
368
369 void
370 gen6_blorp_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
371 unsigned int level, unsigned int layer, enum gen6_hiz_op op)
372 {
373 struct brw_blorp_params params;
374 brw_blorp_params_init(&params);
375
376 params.hiz_op = op;
377
378 brw_blorp_surface_info_init(brw, &params.depth, mt, level, layer,
379 mt->format, true);
380
381 /* Align the rectangle primitive to 8x4 pixels.
382 *
383 * During fast depth clears, the emitted rectangle primitive must be
384 * aligned to 8x4 pixels. From the Ivybridge PRM, Vol 2 Part 1 Section
385 * 11.5.3.1 Depth Buffer Clear (and the matching section in the Sandybridge
386 * PRM):
387 * If Number of Multisamples is NUMSAMPLES_1, the rectangle must be
388 * aligned to an 8x4 pixel block relative to the upper left corner
389 * of the depth buffer [...]
390 *
391 * For hiz resolves, the rectangle must also be 8x4 aligned. Item
392 * WaHizAmbiguate8x4Aligned from the Haswell workarounds page and the
393 * Ivybridge simulator require the alignment.
394 *
395 * To be safe, let's just align the rect for all hiz operations and all
396 * hardware generations.
397 *
398 * However, for some miptree slices of a Z24 texture, emitting an 8x4
399 * aligned rectangle that covers the slice may clobber adjacent slices if
400 * we strictly adhered to the texture alignments specified in the PRM. The
401 * Ivybridge PRM, Section "Alignment Unit Size", states that
402 * SURFACE_STATE.Surface_Horizontal_Alignment should be 4 for Z24 surfaces,
403 * not 8. But commit 1f112cc increased the alignment from 4 to 8, which
404 * prevents the clobbering.
405 */
406 params.dst.num_samples = mt->num_samples;
407 if (params.dst.num_samples > 1) {
408 params.depth.width = ALIGN(mt->logical_width0, 8);
409 params.depth.height = ALIGN(mt->logical_height0, 4);
410 } else {
411 params.depth.width = ALIGN(params.depth.width, 8);
412 params.depth.height = ALIGN(params.depth.height, 4);
413 }
414
415 params.x1 = params.depth.width;
416 params.y1 = params.depth.height;
417
418 assert(intel_miptree_level_has_hiz(mt, level));
419
420 switch (mt->format) {
421 case MESA_FORMAT_Z_UNORM16:
422 params.depth_format = BRW_DEPTHFORMAT_D16_UNORM;
423 break;
424 case MESA_FORMAT_Z_FLOAT32:
425 params.depth_format = BRW_DEPTHFORMAT_D32_FLOAT;
426 break;
427 case MESA_FORMAT_Z24_UNORM_X8_UINT:
428 params.depth_format = BRW_DEPTHFORMAT_D24_UNORM_X8_UINT;
429 break;
430 default:
431 unreachable("not reached");
432 }
433
434 brw_blorp_exec(brw, &params);
435 }