i965/blorp: Store input read mask
[mesa.git] / src / mesa / drivers / dri / i965 / brw_blorp.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <errno.h>
25 #include "intel_batchbuffer.h"
26 #include "intel_fbo.h"
27
28 #include "brw_blorp.h"
29 #include "brw_compiler.h"
30 #include "brw_nir.h"
31 #include "brw_state.h"
32
33 #define FILE_DEBUG_FLAG DEBUG_BLORP
34
35 void
36 brw_blorp_surface_info_init(struct brw_context *brw,
37 struct brw_blorp_surface_info *info,
38 struct intel_mipmap_tree *mt,
39 unsigned int level, unsigned int layer,
40 mesa_format format, bool is_render_target)
41 {
42 /* Layer is a physical layer, so if this is a 2D multisample array texture
43 * using INTEL_MSAA_LAYOUT_UMS or INTEL_MSAA_LAYOUT_CMS, then it had better
44 * be a multiple of num_samples.
45 */
46 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
47 mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
48 assert(mt->num_samples <= 1 || layer % mt->num_samples == 0);
49 }
50
51 intel_miptree_check_level_layer(mt, level, layer);
52
53 info->mt = mt;
54 info->level = level;
55 info->layer = layer;
56 info->width = minify(mt->physical_width0, level - mt->first_level);
57 info->height = minify(mt->physical_height0, level - mt->first_level);
58
59 intel_miptree_get_image_offset(mt, level, layer,
60 &info->x_offset, &info->y_offset);
61
62 info->num_samples = mt->num_samples;
63 info->array_layout = mt->array_layout;
64 info->map_stencil_as_y_tiled = false;
65 info->msaa_layout = mt->msaa_layout;
66 info->swizzle = SWIZZLE_XYZW;
67
68 if (format == MESA_FORMAT_NONE)
69 format = mt->format;
70
71 switch (format) {
72 case MESA_FORMAT_S_UINT8:
73 /* The miptree is a W-tiled stencil buffer. Surface states can't be set
74 * up for W tiling, so we'll need to use Y tiling and have the WM
75 * program swizzle the coordinates.
76 */
77 info->map_stencil_as_y_tiled = true;
78 info->brw_surfaceformat = brw->gen >= 8 ? BRW_SURFACEFORMAT_R8_UINT :
79 BRW_SURFACEFORMAT_R8_UNORM;
80 break;
81 case MESA_FORMAT_Z24_UNORM_X8_UINT:
82 /* It would make sense to use BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS
83 * here, but unfortunately it isn't supported as a render target, which
84 * would prevent us from blitting to 24-bit depth.
85 *
86 * The miptree consists of 32 bits per pixel, arranged as 24-bit depth
87 * values interleaved with 8 "don't care" bits. Since depth values don't
88 * require any blending, it doesn't matter how we interpret the bit
89 * pattern as long as we copy the right amount of data, so just map it
90 * as 8-bit BGRA.
91 */
92 info->brw_surfaceformat = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
93 break;
94 case MESA_FORMAT_Z_FLOAT32:
95 info->brw_surfaceformat = BRW_SURFACEFORMAT_R32_FLOAT;
96 break;
97 case MESA_FORMAT_Z_UNORM16:
98 info->brw_surfaceformat = BRW_SURFACEFORMAT_R16_UNORM;
99 break;
100 default: {
101 if (is_render_target) {
102 assert(brw->format_supported_as_render_target[format]);
103 info->brw_surfaceformat = brw->render_target_format[format];
104 } else {
105 info->brw_surfaceformat = brw_format_for_mesa_format(format);
106 }
107 break;
108 }
109 }
110 }
111
112
113 /**
114 * Split x_offset and y_offset into a base offset (in bytes) and a remaining
115 * x/y offset (in pixels). Note: we can't do this by calling
116 * intel_renderbuffer_tile_offsets(), because the offsets may have been
117 * adjusted to account for Y vs. W tiling differences. So we compute it
118 * directly from the adjusted offsets.
119 */
120 uint32_t
121 brw_blorp_compute_tile_offsets(const struct brw_blorp_surface_info *info,
122 uint32_t *tile_x, uint32_t *tile_y)
123 {
124 uint32_t mask_x, mask_y;
125
126 intel_get_tile_masks(info->mt->tiling, info->mt->tr_mode, info->mt->cpp,
127 info->map_stencil_as_y_tiled,
128 &mask_x, &mask_y);
129
130 *tile_x = info->x_offset & mask_x;
131 *tile_y = info->y_offset & mask_y;
132
133 return intel_miptree_get_aligned_offset(info->mt, info->x_offset & ~mask_x,
134 info->y_offset & ~mask_y,
135 info->map_stencil_as_y_tiled);
136 }
137
138
139 void
140 brw_blorp_params_init(struct brw_blorp_params *params)
141 {
142 memset(params, 0, sizeof(*params));
143 params->hiz_op = GEN6_HIZ_OP_NONE;
144 params->fast_clear_op = 0;
145 params->num_draw_buffers = 1;
146 params->num_layers = 1;
147 }
148
149 void
150 brw_blorp_init_wm_prog_key(struct brw_wm_prog_key *wm_key)
151 {
152 memset(wm_key, 0, sizeof(*wm_key));
153 wm_key->nr_color_regions = 1;
154 for (int i = 0; i < MAX_SAMPLERS; i++)
155 wm_key->tex.swizzles[i] = SWIZZLE_XYZW;
156 }
157
158 static int
159 nir_uniform_type_size(const struct glsl_type *type)
160 {
161 /* Only very basic types are allowed */
162 assert(glsl_type_is_vector_or_scalar(type));
163 assert(glsl_get_bit_size(type) == 32);
164
165 return glsl_get_vector_elements(type) * 4;
166 }
167
168 const unsigned *
169 brw_blorp_compile_nir_shader(struct brw_context *brw, struct nir_shader *nir,
170 const struct brw_wm_prog_key *wm_key,
171 bool use_repclear,
172 struct brw_blorp_prog_data *prog_data,
173 unsigned *program_size)
174 {
175 const struct brw_compiler *compiler = brw->intelScreen->compiler;
176
177 void *mem_ctx = ralloc_context(NULL);
178
179 /* Calling brw_preprocess_nir and friends is destructive and, if cloning is
180 * enabled, may end up completely replacing the nir_shader. Therefore, we
181 * own it and might as well put it in our context for easy cleanup.
182 */
183 ralloc_steal(mem_ctx, nir);
184 nir->options =
185 compiler->glsl_compiler_options[MESA_SHADER_FRAGMENT].NirOptions;
186
187 struct brw_wm_prog_data wm_prog_data;
188 memset(&wm_prog_data, 0, sizeof(wm_prog_data));
189
190 /* We set up the params array but instead of making them point at actual
191 * GL constant values, they just store an index. This is just fine as the
192 * backend compiler never looks at the contents of the pointers, it just
193 * re-arranges them for us.
194 */
195 const union gl_constant_value *param[BRW_BLORP_NUM_PUSH_CONSTANT_DWORDS];
196 for (unsigned i = 0; i < ARRAY_SIZE(param); i++)
197 param[i] = (const union gl_constant_value *)(intptr_t)i;
198
199 wm_prog_data.base.nr_params = BRW_BLORP_NUM_PUSH_CONSTANT_DWORDS;
200 wm_prog_data.base.param = param;
201
202 /* BLORP always just uses the first two binding table entries */
203 wm_prog_data.binding_table.render_target_start = 0;
204 wm_prog_data.base.binding_table.texture_start = 1;
205
206 nir = brw_preprocess_nir(compiler, nir);
207 nir_remove_dead_variables(nir, nir_var_shader_in);
208 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir)->impl);
209
210 /* Uniforms are required to be lowered before going into compile_fs. For
211 * BLORP, we'll assume that whoever builds the shader sets the location
212 * they want so we just need to lower them and figure out how many we have
213 * in total.
214 */
215 nir->num_uniforms = 0;
216 nir_foreach_variable(var, &nir->uniforms) {
217 var->data.driver_location = var->data.location;
218 unsigned end = var->data.location + nir_uniform_type_size(var->type);
219 nir->num_uniforms = MAX2(nir->num_uniforms, end);
220 }
221 nir_lower_io(nir, nir_var_uniform, nir_uniform_type_size);
222
223 const unsigned *program =
224 brw_compile_fs(compiler, brw, mem_ctx, wm_key, &wm_prog_data, nir,
225 NULL, -1, -1, false, use_repclear, program_size, NULL);
226
227 /* Copy the relavent bits of wm_prog_data over into the blorp prog data */
228 prog_data->dispatch_8 = wm_prog_data.dispatch_8;
229 prog_data->dispatch_16 = wm_prog_data.dispatch_16;
230 prog_data->first_curbe_grf_0 = wm_prog_data.base.dispatch_grf_start_reg;
231 prog_data->first_curbe_grf_2 = wm_prog_data.dispatch_grf_start_reg_2;
232 prog_data->ksp_offset_2 = wm_prog_data.prog_offset_2;
233 prog_data->persample_msaa_dispatch = wm_prog_data.persample_dispatch;
234 prog_data->flat_inputs = wm_prog_data.flat_inputs;
235 prog_data->num_varying_inputs = wm_prog_data.num_varying_inputs;
236 prog_data->inputs_read = nir->info.inputs_read;
237
238 prog_data->nr_params = wm_prog_data.base.nr_params;
239 for (unsigned i = 0; i < ARRAY_SIZE(param); i++)
240 prog_data->param[i] = (uintptr_t)wm_prog_data.base.param[i];
241
242 return program;
243 }
244
245 /**
246 * Perform a HiZ or depth resolve operation.
247 *
248 * For an overview of HiZ ops, see the following sections of the Sandy Bridge
249 * PRM, Volume 1, Part 2:
250 * - 7.5.3.1 Depth Buffer Clear
251 * - 7.5.3.2 Depth Buffer Resolve
252 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
253 */
254 void
255 intel_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
256 unsigned int level, unsigned int layer, enum gen6_hiz_op op)
257 {
258 const char *opname = NULL;
259
260 switch (op) {
261 case GEN6_HIZ_OP_DEPTH_RESOLVE:
262 opname = "depth resolve";
263 break;
264 case GEN6_HIZ_OP_HIZ_RESOLVE:
265 opname = "hiz ambiguate";
266 break;
267 case GEN6_HIZ_OP_DEPTH_CLEAR:
268 opname = "depth clear";
269 break;
270 case GEN6_HIZ_OP_NONE:
271 opname = "noop?";
272 break;
273 }
274
275 DBG("%s %s to mt %p level %d layer %d\n",
276 __func__, opname, mt, level, layer);
277
278 if (brw->gen >= 8) {
279 gen8_hiz_exec(brw, mt, level, layer, op);
280 } else {
281 gen6_blorp_hiz_exec(brw, mt, level, layer, op);
282 }
283 }
284
285 void
286 brw_blorp_exec(struct brw_context *brw, const struct brw_blorp_params *params)
287 {
288 struct gl_context *ctx = &brw->ctx;
289 const uint32_t estimated_max_batch_usage = brw->gen >= 8 ? 1800 : 1500;
290 bool check_aperture_failed_once = false;
291
292 /* Flush the sampler and render caches. We definitely need to flush the
293 * sampler cache so that we get updated contents from the render cache for
294 * the glBlitFramebuffer() source. Also, we are sometimes warned in the
295 * docs to flush the cache between reinterpretations of the same surface
296 * data with different formats, which blorp does for stencil and depth
297 * data.
298 */
299 brw_emit_mi_flush(brw);
300
301 brw_select_pipeline(brw, BRW_RENDER_PIPELINE);
302
303 retry:
304 intel_batchbuffer_require_space(brw, estimated_max_batch_usage, RENDER_RING);
305 intel_batchbuffer_save_state(brw);
306 drm_intel_bo *saved_bo = brw->batch.bo;
307 uint32_t saved_used = USED_BATCH(brw->batch);
308 uint32_t saved_state_batch_offset = brw->batch.state_batch_offset;
309
310 switch (brw->gen) {
311 case 6:
312 gen6_blorp_exec(brw, params);
313 break;
314 case 7:
315 gen7_blorp_exec(brw, params);
316 break;
317 case 8:
318 case 9:
319 gen8_blorp_exec(brw, params);
320 break;
321 default:
322 /* BLORP is not supported before Gen6. */
323 unreachable("not reached");
324 }
325
326 /* Make sure we didn't wrap the batch unintentionally, and make sure we
327 * reserved enough space that a wrap will never happen.
328 */
329 assert(brw->batch.bo == saved_bo);
330 assert((USED_BATCH(brw->batch) - saved_used) * 4 +
331 (saved_state_batch_offset - brw->batch.state_batch_offset) <
332 estimated_max_batch_usage);
333 /* Shut up compiler warnings on release build */
334 (void)saved_bo;
335 (void)saved_used;
336 (void)saved_state_batch_offset;
337
338 /* Check if the blorp op we just did would make our batch likely to fail to
339 * map all the BOs into the GPU at batch exec time later. If so, flush the
340 * batch and try again with nothing else in the batch.
341 */
342 if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
343 if (!check_aperture_failed_once) {
344 check_aperture_failed_once = true;
345 intel_batchbuffer_reset_to_saved(brw);
346 intel_batchbuffer_flush(brw);
347 goto retry;
348 } else {
349 int ret = intel_batchbuffer_flush(brw);
350 WARN_ONCE(ret == -ENOSPC,
351 "i965: blorp emit exceeded available aperture space\n");
352 }
353 }
354
355 if (unlikely(brw->always_flush_batch))
356 intel_batchbuffer_flush(brw);
357
358 /* We've smashed all state compared to what the normal 3D pipeline
359 * rendering tracks for GL.
360 */
361 brw->ctx.NewDriverState |= BRW_NEW_BLORP;
362 brw->no_depth_or_stencil = false;
363 brw->ib.type = -1;
364
365 /* Flush the sampler cache so any texturing from the destination is
366 * coherent.
367 */
368 brw_emit_mi_flush(brw);
369 }
370
371 void
372 gen6_blorp_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
373 unsigned int level, unsigned int layer, enum gen6_hiz_op op)
374 {
375 struct brw_blorp_params params;
376 brw_blorp_params_init(&params);
377
378 params.hiz_op = op;
379
380 brw_blorp_surface_info_init(brw, &params.depth, mt, level, layer,
381 mt->format, true);
382
383 /* Align the rectangle primitive to 8x4 pixels.
384 *
385 * During fast depth clears, the emitted rectangle primitive must be
386 * aligned to 8x4 pixels. From the Ivybridge PRM, Vol 2 Part 1 Section
387 * 11.5.3.1 Depth Buffer Clear (and the matching section in the Sandybridge
388 * PRM):
389 * If Number of Multisamples is NUMSAMPLES_1, the rectangle must be
390 * aligned to an 8x4 pixel block relative to the upper left corner
391 * of the depth buffer [...]
392 *
393 * For hiz resolves, the rectangle must also be 8x4 aligned. Item
394 * WaHizAmbiguate8x4Aligned from the Haswell workarounds page and the
395 * Ivybridge simulator require the alignment.
396 *
397 * To be safe, let's just align the rect for all hiz operations and all
398 * hardware generations.
399 *
400 * However, for some miptree slices of a Z24 texture, emitting an 8x4
401 * aligned rectangle that covers the slice may clobber adjacent slices if
402 * we strictly adhered to the texture alignments specified in the PRM. The
403 * Ivybridge PRM, Section "Alignment Unit Size", states that
404 * SURFACE_STATE.Surface_Horizontal_Alignment should be 4 for Z24 surfaces,
405 * not 8. But commit 1f112cc increased the alignment from 4 to 8, which
406 * prevents the clobbering.
407 */
408 params.dst.num_samples = mt->num_samples;
409 if (params.dst.num_samples > 1) {
410 params.depth.width = ALIGN(mt->logical_width0, 8);
411 params.depth.height = ALIGN(mt->logical_height0, 4);
412 } else {
413 params.depth.width = ALIGN(params.depth.width, 8);
414 params.depth.height = ALIGN(params.depth.height, 4);
415 }
416
417 params.x1 = params.depth.width;
418 params.y1 = params.depth.height;
419
420 assert(intel_miptree_level_has_hiz(mt, level));
421
422 switch (mt->format) {
423 case MESA_FORMAT_Z_UNORM16:
424 params.depth_format = BRW_DEPTHFORMAT_D16_UNORM;
425 break;
426 case MESA_FORMAT_Z_FLOAT32:
427 params.depth_format = BRW_DEPTHFORMAT_D32_FLOAT;
428 break;
429 case MESA_FORMAT_Z24_UNORM_X8_UINT:
430 params.depth_format = BRW_DEPTHFORMAT_D24_UNORM_X8_UINT;
431 break;
432 default:
433 unreachable("not reached");
434 }
435
436 brw_blorp_exec(brw, &params);
437 }