Revert "i965/gen9: Disable MCS for 1x color surfaces"
[mesa.git] / src / mesa / drivers / dri / i965 / intel_mipmap_tree.c
1 /*
2 * Copyright 2006 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include <GL/gl.h>
27 #include <GL/internal/dri_interface.h>
28
29 #include "intel_batchbuffer.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_resolve_map.h"
32 #include "intel_tex.h"
33 #include "intel_blit.h"
34 #include "intel_fbo.h"
35
36 #include "brw_blorp.h"
37 #include "brw_context.h"
38 #include "brw_state.h"
39
40 #include "main/enums.h"
41 #include "main/fbobject.h"
42 #include "main/formats.h"
43 #include "main/glformats.h"
44 #include "main/texcompress_etc.h"
45 #include "main/teximage.h"
46 #include "main/streaming-load-memcpy.h"
47 #include "x86/common_x86_asm.h"
48
49 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
50
51 static void *intel_miptree_map_raw(struct brw_context *brw,
52 struct intel_mipmap_tree *mt);
53
54 static void intel_miptree_unmap_raw(struct intel_mipmap_tree *mt);
55
56 static bool
57 intel_miptree_alloc_mcs(struct brw_context *brw,
58 struct intel_mipmap_tree *mt,
59 GLuint num_samples);
60
61 /**
62 * Determine which MSAA layout should be used by the MSAA surface being
63 * created, based on the chip generation and the surface type.
64 */
65 static enum intel_msaa_layout
66 compute_msaa_layout(struct brw_context *brw, mesa_format format,
67 bool disable_aux_buffers)
68 {
69 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
70 if (brw->gen < 7)
71 return INTEL_MSAA_LAYOUT_IMS;
72
73 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
74 switch (_mesa_get_format_base_format(format)) {
75 case GL_DEPTH_COMPONENT:
76 case GL_STENCIL_INDEX:
77 case GL_DEPTH_STENCIL:
78 return INTEL_MSAA_LAYOUT_IMS;
79 default:
80 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
81 *
82 * This field must be set to 0 for all SINT MSRTs when all RT channels
83 * are not written
84 *
85 * In practice this means that we have to disable MCS for all signed
86 * integer MSAA buffers. The alternative, to disable MCS only when one
87 * of the render target channels is disabled, is impractical because it
88 * would require converting between CMS and UMS MSAA layouts on the fly,
89 * which is expensive.
90 */
91 if (brw->gen == 7 && _mesa_get_format_datatype(format) == GL_INT) {
92 return INTEL_MSAA_LAYOUT_UMS;
93 } else if (disable_aux_buffers) {
94 /* We can't use the CMS layout because it uses an aux buffer, the MCS
95 * buffer. So fallback to UMS, which is identical to CMS without the
96 * MCS. */
97 return INTEL_MSAA_LAYOUT_UMS;
98 } else {
99 return INTEL_MSAA_LAYOUT_CMS;
100 }
101 }
102 }
103
104
105 /**
106 * For single-sampled render targets ("non-MSRT"), the MCS buffer is a
107 * scaled-down bitfield representation of the color buffer which is capable of
108 * recording when blocks of the color buffer are equal to the clear value.
109 * This function returns the block size that will be used by the MCS buffer
110 * corresponding to a certain color miptree.
111 *
112 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
113 * beneath the "Fast Color Clear" bullet (p327):
114 *
115 * The following table describes the RT alignment
116 *
117 * Pixels Lines
118 * TiledY RT CL
119 * bpp
120 * 32 8 4
121 * 64 4 4
122 * 128 2 4
123 * TiledX RT CL
124 * bpp
125 * 32 16 2
126 * 64 8 2
127 * 128 4 2
128 *
129 * This alignment has the following uses:
130 *
131 * - For figuring out the size of the MCS buffer. Each 4k tile in the MCS
132 * buffer contains 128 blocks horizontally and 256 blocks vertically.
133 *
134 * - For figuring out alignment restrictions for a fast clear operation. Fast
135 * clear operations must always clear aligned multiples of 16 blocks
136 * horizontally and 32 blocks vertically.
137 *
138 * - For scaling down the coordinates sent through the render pipeline during
139 * a fast clear. X coordinates must be scaled down by 8 times the block
140 * width, and Y coordinates by 16 times the block height.
141 *
142 * - For scaling down the coordinates sent through the render pipeline during
143 * a "Render Target Resolve" operation. X coordinates must be scaled down
144 * by half the block width, and Y coordinates by half the block height.
145 */
146 void
147 intel_get_non_msrt_mcs_alignment(struct intel_mipmap_tree *mt,
148 unsigned *width_px, unsigned *height)
149 {
150 switch (mt->tiling) {
151 default:
152 unreachable("Non-MSRT MCS requires X or Y tiling");
153 /* In release builds, fall through */
154 case I915_TILING_Y:
155 *width_px = 32 / mt->cpp;
156 *height = 4;
157 break;
158 case I915_TILING_X:
159 *width_px = 64 / mt->cpp;
160 *height = 2;
161 }
162 }
163
164 static bool
165 intel_tiling_supports_non_msrt_mcs(struct brw_context *brw, unsigned tiling)
166 {
167 /* From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render
168 * Target(s)", beneath the "Fast Color Clear" bullet (p326):
169 *
170 * - Support is limited to tiled render targets.
171 *
172 * Gen9 changes the restriction to Y-tile only.
173 */
174 if (brw->gen >= 9)
175 return tiling == I915_TILING_Y;
176 else if (brw->gen >= 7)
177 return tiling != I915_TILING_NONE;
178 else
179 return false;
180 }
181
182 /**
183 * For a single-sampled render target ("non-MSRT"), determine if an MCS buffer
184 * can be used. This doesn't (and should not) inspect any of the properties of
185 * the miptree's BO.
186 *
187 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
188 * beneath the "Fast Color Clear" bullet (p326):
189 *
190 * - Support is for non-mip-mapped and non-array surface types only.
191 *
192 * And then later, on p327:
193 *
194 * - MCS buffer for non-MSRT is supported only for RT formats 32bpp,
195 * 64bpp, and 128bpp.
196 *
197 * From the Skylake documentation, it is made clear that X-tiling is no longer
198 * supported:
199 *
200 * - MCS and Lossless compression is supported for TiledY/TileYs/TileYf
201 * non-MSRTs only.
202 */
203 static bool
204 intel_miptree_supports_non_msrt_fast_clear(struct brw_context *brw,
205 struct intel_mipmap_tree *mt)
206 {
207 /* MCS support does not exist prior to Gen7 */
208 if (brw->gen < 7)
209 return false;
210
211 if (mt->disable_aux_buffers)
212 return false;
213
214 /* This function applies only to non-multisampled render targets. */
215 if (mt->num_samples > 1)
216 return false;
217
218 /* MCS is only supported for color buffers */
219 switch (_mesa_get_format_base_format(mt->format)) {
220 case GL_DEPTH_COMPONENT:
221 case GL_DEPTH_STENCIL:
222 case GL_STENCIL_INDEX:
223 return false;
224 }
225
226 if (mt->cpp != 4 && mt->cpp != 8 && mt->cpp != 16)
227 return false;
228 if (mt->first_level != 0 || mt->last_level != 0) {
229 if (brw->gen >= 8) {
230 perf_debug("Multi-LOD fast clear - giving up (%dx%dx%d).\n",
231 mt->logical_width0, mt->logical_height0, mt->last_level);
232 }
233
234 return false;
235 }
236
237 /* Check for layered surfaces. */
238 if (mt->physical_depth0 != 1) {
239 /* Multisample surfaces with the CMS layout are not layered surfaces,
240 * yet still have physical_depth0 > 1. Assert that we don't
241 * accidentally reject a multisampled surface here. We should have
242 * rejected it earlier by explicitly checking the sample count.
243 */
244 assert(mt->num_samples <= 1);
245
246 if (brw->gen >= 8) {
247 perf_debug("Layered fast clear - giving up. (%dx%d%d)\n",
248 mt->logical_width0, mt->logical_height0,
249 mt->physical_depth0);
250 }
251
252 return false;
253 }
254
255 /* There's no point in using an MCS buffer if the surface isn't in a
256 * renderable format.
257 */
258 if (!brw->format_supported_as_render_target[mt->format])
259 return false;
260
261 if (brw->gen >= 9) {
262 const uint32_t brw_format = brw_format_for_mesa_format(mt->format);
263 return brw_losslessly_compressible_format(brw, brw_format);
264 } else
265 return true;
266 }
267
268
269 /**
270 * Determine depth format corresponding to a depth+stencil format,
271 * for separate stencil.
272 */
273 mesa_format
274 intel_depth_format_for_depthstencil_format(mesa_format format) {
275 switch (format) {
276 case MESA_FORMAT_Z24_UNORM_S8_UINT:
277 return MESA_FORMAT_Z24_UNORM_X8_UINT;
278 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT:
279 return MESA_FORMAT_Z_FLOAT32;
280 default:
281 return format;
282 }
283 }
284
285
286 /**
287 * @param for_bo Indicates that the caller is
288 * intel_miptree_create_for_bo(). If true, then do not create
289 * \c stencil_mt.
290 */
291 static struct intel_mipmap_tree *
292 intel_miptree_create_layout(struct brw_context *brw,
293 GLenum target,
294 mesa_format format,
295 GLuint first_level,
296 GLuint last_level,
297 GLuint width0,
298 GLuint height0,
299 GLuint depth0,
300 GLuint num_samples,
301 uint32_t layout_flags)
302 {
303 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
304 if (!mt)
305 return NULL;
306
307 DBG("%s target %s format %s level %d..%d slices %d <-- %p\n", __func__,
308 _mesa_enum_to_string(target),
309 _mesa_get_format_name(format),
310 first_level, last_level, depth0, mt);
311
312 if (target == GL_TEXTURE_1D_ARRAY) {
313 /* For a 1D Array texture the OpenGL API will treat the height0
314 * parameter as the number of array slices. For Intel hardware, we treat
315 * the 1D array as a 2D Array with a height of 1.
316 *
317 * So, when we first come through this path to create a 1D Array
318 * texture, height0 stores the number of slices, and depth0 is 1. In
319 * this case, we want to swap height0 and depth0.
320 *
321 * Since some miptrees will be created based on the base miptree, we may
322 * come through this path and see height0 as 1 and depth0 being the
323 * number of slices. In this case we don't need to do the swap.
324 */
325 assert(height0 == 1 || depth0 == 1);
326 if (height0 > 1) {
327 depth0 = height0;
328 height0 = 1;
329 }
330 }
331
332 mt->target = target;
333 mt->format = format;
334 mt->first_level = first_level;
335 mt->last_level = last_level;
336 mt->logical_width0 = width0;
337 mt->logical_height0 = height0;
338 mt->logical_depth0 = depth0;
339 mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_NO_MCS;
340 mt->disable_aux_buffers = (layout_flags & MIPTREE_LAYOUT_DISABLE_AUX) != 0;
341 exec_list_make_empty(&mt->hiz_map);
342 mt->cpp = _mesa_get_format_bytes(format);
343 mt->num_samples = num_samples;
344 mt->compressed = _mesa_is_format_compressed(format);
345 mt->msaa_layout = INTEL_MSAA_LAYOUT_NONE;
346 mt->refcount = 1;
347
348 if (num_samples > 1) {
349 /* Adjust width/height/depth for MSAA */
350 mt->msaa_layout = compute_msaa_layout(brw, format,
351 mt->disable_aux_buffers);
352 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
353 /* From the Ivybridge PRM, Volume 1, Part 1, page 108:
354 * "If the surface is multisampled and it is a depth or stencil
355 * surface or Multisampled Surface StorageFormat in SURFACE_STATE is
356 * MSFMT_DEPTH_STENCIL, WL and HL must be adjusted as follows before
357 * proceeding:
358 *
359 * +----------------------------------------------------------------+
360 * | Num Multisamples | W_l = | H_l = |
361 * +----------------------------------------------------------------+
362 * | 2 | ceiling(W_l / 2) * 4 | H_l (no adjustment) |
363 * | 4 | ceiling(W_l / 2) * 4 | ceiling(H_l / 2) * 4 |
364 * | 8 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 4 |
365 * | 16 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 8 |
366 * +----------------------------------------------------------------+
367 * "
368 *
369 * Note that MSFMT_DEPTH_STENCIL just means the IMS (interleaved)
370 * format rather than UMS/CMS (array slices). The Sandybridge PRM,
371 * Volume 1, Part 1, Page 111 has the same formula for 4x MSAA.
372 *
373 * Another more complicated explanation for these adjustments comes
374 * from the Sandybridge PRM, volume 4, part 1, page 31:
375 *
376 * "Any of the other messages (sample*, LOD, load4) used with a
377 * (4x) multisampled surface will in-effect sample a surface with
378 * double the height and width as that indicated in the surface
379 * state. Each pixel position on the original-sized surface is
380 * replaced with a 2x2 of samples with the following arrangement:
381 *
382 * sample 0 sample 2
383 * sample 1 sample 3"
384 *
385 * Thus, when sampling from a multisampled texture, it behaves as
386 * though the layout in memory for (x,y,sample) is:
387 *
388 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
389 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
390 *
391 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
392 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
393 *
394 * However, the actual layout of multisampled data in memory is:
395 *
396 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
397 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
398 *
399 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
400 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
401 *
402 * This pattern repeats for each 2x2 pixel block.
403 *
404 * As a result, when calculating the size of our 4-sample buffer for
405 * an odd width or height, we have to align before scaling up because
406 * sample 3 is in that bottom right 2x2 block.
407 */
408 switch (num_samples) {
409 case 2:
410 assert(brw->gen >= 8);
411 width0 = ALIGN(width0, 2) * 2;
412 height0 = ALIGN(height0, 2);
413 break;
414 case 4:
415 width0 = ALIGN(width0, 2) * 2;
416 height0 = ALIGN(height0, 2) * 2;
417 break;
418 case 8:
419 width0 = ALIGN(width0, 2) * 4;
420 height0 = ALIGN(height0, 2) * 2;
421 break;
422 case 16:
423 width0 = ALIGN(width0, 2) * 4;
424 height0 = ALIGN(height0, 2) * 4;
425 break;
426 default:
427 /* num_samples should already have been quantized to 0, 1, 2, 4, 8
428 * or 16.
429 */
430 unreachable("not reached");
431 }
432 } else {
433 /* Non-interleaved */
434 depth0 *= num_samples;
435 }
436 }
437
438 /* Set array_layout to ALL_SLICES_AT_EACH_LOD when array_spacing_lod0 can
439 * be used. array_spacing_lod0 is only used for non-IMS MSAA surfaces on
440 * Gen 7 and 8. On Gen 8 and 9 this layout is not available but it is still
441 * used on Gen8 to make it pick a qpitch value which doesn't include space
442 * for the mipmaps. On Gen9 this is not necessary because it will
443 * automatically pick a packed qpitch value whenever mt->first_level ==
444 * mt->last_level.
445 * TODO: can we use it elsewhere?
446 * TODO: also disable this on Gen8 and pick the qpitch value like Gen9
447 */
448 if (brw->gen >= 9) {
449 mt->array_layout = ALL_LOD_IN_EACH_SLICE;
450 } else {
451 switch (mt->msaa_layout) {
452 case INTEL_MSAA_LAYOUT_NONE:
453 case INTEL_MSAA_LAYOUT_IMS:
454 mt->array_layout = ALL_LOD_IN_EACH_SLICE;
455 break;
456 case INTEL_MSAA_LAYOUT_UMS:
457 case INTEL_MSAA_LAYOUT_CMS:
458 mt->array_layout = ALL_SLICES_AT_EACH_LOD;
459 break;
460 }
461 }
462
463 if (target == GL_TEXTURE_CUBE_MAP) {
464 assert(depth0 == 1);
465 depth0 = 6;
466 }
467
468 mt->physical_width0 = width0;
469 mt->physical_height0 = height0;
470 mt->physical_depth0 = depth0;
471
472 if (!(layout_flags & MIPTREE_LAYOUT_FOR_BO) &&
473 _mesa_get_format_base_format(format) == GL_DEPTH_STENCIL &&
474 (brw->must_use_separate_stencil ||
475 (brw->has_separate_stencil &&
476 intel_miptree_wants_hiz_buffer(brw, mt)))) {
477 uint32_t stencil_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
478 if (brw->gen == 6) {
479 stencil_flags |= MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD |
480 MIPTREE_LAYOUT_TILING_ANY;
481 }
482
483 mt->stencil_mt = intel_miptree_create(brw,
484 mt->target,
485 MESA_FORMAT_S_UINT8,
486 mt->first_level,
487 mt->last_level,
488 mt->logical_width0,
489 mt->logical_height0,
490 mt->logical_depth0,
491 num_samples,
492 stencil_flags);
493
494 if (!mt->stencil_mt) {
495 intel_miptree_release(&mt);
496 return NULL;
497 }
498
499 /* Fix up the Z miptree format for how we're splitting out separate
500 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
501 */
502 mt->format = intel_depth_format_for_depthstencil_format(mt->format);
503 mt->cpp = 4;
504
505 if (format == mt->format) {
506 _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
507 _mesa_get_format_name(mt->format));
508 }
509 }
510
511 if (layout_flags & MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD)
512 mt->array_layout = ALL_SLICES_AT_EACH_LOD;
513
514 /*
515 * Obey HALIGN_16 constraints for Gen8 and Gen9 buffers which are
516 * multisampled or have an AUX buffer attached to it.
517 *
518 * GEN | MSRT | AUX_CCS_* or AUX_MCS
519 * -------------------------------------------
520 * 9 | HALIGN_16 | HALIGN_16
521 * 8 | HALIGN_ANY | HALIGN_16
522 * 7 | ? | ?
523 * 6 | ? | ?
524 */
525 if (intel_miptree_supports_non_msrt_fast_clear(brw, mt)) {
526 if (brw->gen >= 9 || (brw->gen == 8 && num_samples <= 1))
527 layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
528 } else if (brw->gen >= 9 && num_samples > 1) {
529 layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
530 } else {
531 /* For now, nothing else has this requirement */
532 assert((layout_flags & MIPTREE_LAYOUT_FORCE_HALIGN16) == 0);
533 }
534
535 brw_miptree_layout(brw, mt, layout_flags);
536
537 if (mt->disable_aux_buffers)
538 assert(mt->msaa_layout != INTEL_MSAA_LAYOUT_CMS);
539
540 return mt;
541 }
542
543
544 /**
545 * Choose an appropriate uncompressed format for a requested
546 * compressed format, if unsupported.
547 */
548 mesa_format
549 intel_lower_compressed_format(struct brw_context *brw, mesa_format format)
550 {
551 /* No need to lower ETC formats on these platforms,
552 * they are supported natively.
553 */
554 if (brw->gen >= 8 || brw->is_baytrail)
555 return format;
556
557 switch (format) {
558 case MESA_FORMAT_ETC1_RGB8:
559 return MESA_FORMAT_R8G8B8X8_UNORM;
560 case MESA_FORMAT_ETC2_RGB8:
561 return MESA_FORMAT_R8G8B8X8_UNORM;
562 case MESA_FORMAT_ETC2_SRGB8:
563 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC:
564 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1:
565 return MESA_FORMAT_B8G8R8A8_SRGB;
566 case MESA_FORMAT_ETC2_RGBA8_EAC:
567 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1:
568 return MESA_FORMAT_R8G8B8A8_UNORM;
569 case MESA_FORMAT_ETC2_R11_EAC:
570 return MESA_FORMAT_R_UNORM16;
571 case MESA_FORMAT_ETC2_SIGNED_R11_EAC:
572 return MESA_FORMAT_R_SNORM16;
573 case MESA_FORMAT_ETC2_RG11_EAC:
574 return MESA_FORMAT_R16G16_UNORM;
575 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC:
576 return MESA_FORMAT_R16G16_SNORM;
577 default:
578 /* Non ETC1 / ETC2 format */
579 return format;
580 }
581 }
582
583 /* This function computes Yf/Ys tiled bo size, alignment and pitch. */
584 static unsigned long
585 intel_get_yf_ys_bo_size(struct intel_mipmap_tree *mt, unsigned *alignment,
586 unsigned long *pitch)
587 {
588 uint32_t tile_width, tile_height;
589 unsigned long stride, size, aligned_y;
590
591 assert(mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE);
592 intel_get_tile_dims(mt->tiling, mt->tr_mode, mt->cpp,
593 &tile_width, &tile_height);
594
595 aligned_y = ALIGN(mt->total_height, tile_height);
596 stride = mt->total_width * mt->cpp;
597 stride = ALIGN(stride, tile_width);
598 size = stride * aligned_y;
599
600 if (mt->tr_mode == INTEL_MIPTREE_TRMODE_YF) {
601 assert(size % 4096 == 0);
602 *alignment = 4096;
603 } else {
604 assert(size % (64 * 1024) == 0);
605 *alignment = 64 * 1024;
606 }
607 *pitch = stride;
608 return size;
609 }
610
611 struct intel_mipmap_tree *
612 intel_miptree_create(struct brw_context *brw,
613 GLenum target,
614 mesa_format format,
615 GLuint first_level,
616 GLuint last_level,
617 GLuint width0,
618 GLuint height0,
619 GLuint depth0,
620 GLuint num_samples,
621 uint32_t layout_flags)
622 {
623 struct intel_mipmap_tree *mt;
624 mesa_format tex_format = format;
625 mesa_format etc_format = MESA_FORMAT_NONE;
626 GLuint total_width, total_height;
627 uint32_t alloc_flags = 0;
628
629 format = intel_lower_compressed_format(brw, format);
630
631 etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
632
633 assert((layout_flags & MIPTREE_LAYOUT_DISABLE_AUX) == 0);
634 assert((layout_flags & MIPTREE_LAYOUT_FOR_BO) == 0);
635 mt = intel_miptree_create_layout(brw, target, format,
636 first_level, last_level, width0,
637 height0, depth0, num_samples,
638 layout_flags);
639 /*
640 * pitch == 0 || height == 0 indicates the null texture
641 */
642 if (!mt || !mt->total_width || !mt->total_height) {
643 intel_miptree_release(&mt);
644 return NULL;
645 }
646
647 total_width = mt->total_width;
648 total_height = mt->total_height;
649
650 if (format == MESA_FORMAT_S_UINT8) {
651 /* Align to size of W tile, 64x64. */
652 total_width = ALIGN(total_width, 64);
653 total_height = ALIGN(total_height, 64);
654 }
655
656 bool y_or_x = false;
657
658 if (mt->tiling == (I915_TILING_Y | I915_TILING_X)) {
659 y_or_x = true;
660 mt->tiling = I915_TILING_Y;
661 }
662
663 if (layout_flags & MIPTREE_LAYOUT_ACCELERATED_UPLOAD)
664 alloc_flags |= BO_ALLOC_FOR_RENDER;
665
666 unsigned long pitch;
667 mt->etc_format = etc_format;
668
669 if (mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE) {
670 unsigned alignment = 0;
671 unsigned long size;
672 size = intel_get_yf_ys_bo_size(mt, &alignment, &pitch);
673 assert(size);
674 mt->bo = drm_intel_bo_alloc_for_render(brw->bufmgr, "miptree",
675 size, alignment);
676 } else {
677 mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
678 total_width, total_height, mt->cpp,
679 &mt->tiling, &pitch,
680 alloc_flags);
681 }
682
683 mt->pitch = pitch;
684
685 /* If the BO is too large to fit in the aperture, we need to use the
686 * BLT engine to support it. Prior to Sandybridge, the BLT paths can't
687 * handle Y-tiling, so we need to fall back to X.
688 */
689 if (brw->gen < 6 && y_or_x && mt->bo->size >= brw->max_gtt_map_object_size) {
690 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
691 mt->total_width, mt->total_height);
692
693 mt->tiling = I915_TILING_X;
694 drm_intel_bo_unreference(mt->bo);
695 mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
696 total_width, total_height, mt->cpp,
697 &mt->tiling, &pitch, alloc_flags);
698 mt->pitch = pitch;
699 }
700
701 mt->offset = 0;
702
703 if (!mt->bo) {
704 intel_miptree_release(&mt);
705 return NULL;
706 }
707
708
709 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
710 assert(mt->num_samples > 1);
711 if (!intel_miptree_alloc_mcs(brw, mt, num_samples)) {
712 intel_miptree_release(&mt);
713 return NULL;
714 }
715 }
716
717 /* If this miptree is capable of supporting fast color clears, set
718 * fast_clear_state appropriately to ensure that fast clears will occur.
719 * Allocation of the MCS miptree will be deferred until the first fast
720 * clear actually occurs.
721 */
722 if (intel_tiling_supports_non_msrt_mcs(brw, mt->tiling) &&
723 intel_miptree_supports_non_msrt_fast_clear(brw, mt)) {
724 mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED;
725 assert(brw->gen < 8 || mt->halign == 16 || num_samples <= 1);
726 }
727
728 return mt;
729 }
730
731 struct intel_mipmap_tree *
732 intel_miptree_create_for_bo(struct brw_context *brw,
733 drm_intel_bo *bo,
734 mesa_format format,
735 uint32_t offset,
736 uint32_t width,
737 uint32_t height,
738 uint32_t depth,
739 int pitch,
740 uint32_t layout_flags)
741 {
742 struct intel_mipmap_tree *mt;
743 uint32_t tiling, swizzle;
744 GLenum target;
745
746 drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
747
748 /* Nothing will be able to use this miptree with the BO if the offset isn't
749 * aligned.
750 */
751 if (tiling != I915_TILING_NONE)
752 assert(offset % 4096 == 0);
753
754 /* miptrees can't handle negative pitch. If you need flipping of images,
755 * that's outside of the scope of the mt.
756 */
757 assert(pitch >= 0);
758
759 target = depth > 1 ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D;
760
761 /* The BO already has a tiling format and we shouldn't confuse the lower
762 * layers by making it try to find a tiling format again.
763 */
764 assert((layout_flags & MIPTREE_LAYOUT_TILING_ANY) == 0);
765 assert((layout_flags & MIPTREE_LAYOUT_TILING_NONE) == 0);
766
767 layout_flags |= MIPTREE_LAYOUT_FOR_BO;
768 mt = intel_miptree_create_layout(brw, target, format,
769 0, 0,
770 width, height, depth, 0,
771 layout_flags);
772 if (!mt)
773 return NULL;
774
775 drm_intel_bo_reference(bo);
776 mt->bo = bo;
777 mt->pitch = pitch;
778 mt->offset = offset;
779 mt->tiling = tiling;
780
781 return mt;
782 }
783
784 /**
785 * For a singlesample renderbuffer, this simply wraps the given BO with a
786 * miptree.
787 *
788 * For a multisample renderbuffer, this wraps the window system's
789 * (singlesample) BO with a singlesample miptree attached to the
790 * intel_renderbuffer, then creates a multisample miptree attached to irb->mt
791 * that will contain the actual rendering (which is lazily resolved to
792 * irb->singlesample_mt).
793 */
794 void
795 intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
796 struct intel_renderbuffer *irb,
797 drm_intel_bo *bo,
798 uint32_t width, uint32_t height,
799 uint32_t pitch)
800 {
801 struct intel_mipmap_tree *singlesample_mt = NULL;
802 struct intel_mipmap_tree *multisample_mt = NULL;
803 struct gl_renderbuffer *rb = &irb->Base.Base;
804 mesa_format format = rb->Format;
805 int num_samples = rb->NumSamples;
806
807 /* Only the front and back buffers, which are color buffers, are allocated
808 * through the image loader.
809 */
810 assert(_mesa_get_format_base_format(format) == GL_RGB ||
811 _mesa_get_format_base_format(format) == GL_RGBA);
812
813 singlesample_mt = intel_miptree_create_for_bo(intel,
814 bo,
815 format,
816 0,
817 width,
818 height,
819 1,
820 pitch,
821 0);
822 if (!singlesample_mt)
823 goto fail;
824
825 /* If this miptree is capable of supporting fast color clears, set
826 * mcs_state appropriately to ensure that fast clears will occur.
827 * Allocation of the MCS miptree will be deferred until the first fast
828 * clear actually occurs.
829 */
830 if (intel_tiling_supports_non_msrt_mcs(intel, singlesample_mt->tiling) &&
831 intel_miptree_supports_non_msrt_fast_clear(intel, singlesample_mt)) {
832 singlesample_mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED;
833 }
834
835 if (num_samples == 0) {
836 intel_miptree_release(&irb->mt);
837 irb->mt = singlesample_mt;
838
839 assert(!irb->singlesample_mt);
840 } else {
841 intel_miptree_release(&irb->singlesample_mt);
842 irb->singlesample_mt = singlesample_mt;
843
844 if (!irb->mt ||
845 irb->mt->logical_width0 != width ||
846 irb->mt->logical_height0 != height) {
847 multisample_mt = intel_miptree_create_for_renderbuffer(intel,
848 format,
849 width,
850 height,
851 num_samples);
852 if (!multisample_mt)
853 goto fail;
854
855 irb->need_downsample = false;
856 intel_miptree_release(&irb->mt);
857 irb->mt = multisample_mt;
858 }
859 }
860 return;
861
862 fail:
863 intel_miptree_release(&irb->singlesample_mt);
864 intel_miptree_release(&irb->mt);
865 return;
866 }
867
868 struct intel_mipmap_tree*
869 intel_miptree_create_for_renderbuffer(struct brw_context *brw,
870 mesa_format format,
871 uint32_t width,
872 uint32_t height,
873 uint32_t num_samples)
874 {
875 struct intel_mipmap_tree *mt;
876 uint32_t depth = 1;
877 bool ok;
878 GLenum target = num_samples > 1 ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
879 const uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD |
880 MIPTREE_LAYOUT_TILING_ANY;
881
882
883 mt = intel_miptree_create(brw, target, format, 0, 0,
884 width, height, depth, num_samples,
885 layout_flags);
886 if (!mt)
887 goto fail;
888
889 if (intel_miptree_wants_hiz_buffer(brw, mt)) {
890 ok = intel_miptree_alloc_hiz(brw, mt);
891 if (!ok)
892 goto fail;
893 }
894
895 return mt;
896
897 fail:
898 intel_miptree_release(&mt);
899 return NULL;
900 }
901
902 void
903 intel_miptree_reference(struct intel_mipmap_tree **dst,
904 struct intel_mipmap_tree *src)
905 {
906 if (*dst == src)
907 return;
908
909 intel_miptree_release(dst);
910
911 if (src) {
912 src->refcount++;
913 DBG("%s %p refcount now %d\n", __func__, src, src->refcount);
914 }
915
916 *dst = src;
917 }
918
919
920 void
921 intel_miptree_release(struct intel_mipmap_tree **mt)
922 {
923 if (!*mt)
924 return;
925
926 DBG("%s %p refcount will be %d\n", __func__, *mt, (*mt)->refcount - 1);
927 if (--(*mt)->refcount <= 0) {
928 GLuint i;
929
930 DBG("%s deleting %p\n", __func__, *mt);
931
932 drm_intel_bo_unreference((*mt)->bo);
933 intel_miptree_release(&(*mt)->stencil_mt);
934 if ((*mt)->hiz_buf) {
935 if ((*mt)->hiz_buf->mt)
936 intel_miptree_release(&(*mt)->hiz_buf->mt);
937 else
938 drm_intel_bo_unreference((*mt)->hiz_buf->bo);
939 free((*mt)->hiz_buf);
940 }
941 intel_miptree_release(&(*mt)->mcs_mt);
942 intel_resolve_map_clear(&(*mt)->hiz_map);
943
944 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
945 free((*mt)->level[i].slice);
946 }
947
948 free(*mt);
949 }
950 *mt = NULL;
951 }
952
953
954 void
955 intel_get_image_dims(struct gl_texture_image *image,
956 int *width, int *height, int *depth)
957 {
958 switch (image->TexObject->Target) {
959 case GL_TEXTURE_1D_ARRAY:
960 /* For a 1D Array texture the OpenGL API will treat the image height as
961 * the number of array slices. For Intel hardware, we treat the 1D array
962 * as a 2D Array with a height of 1. So, here we want to swap image
963 * height and depth.
964 */
965 *width = image->Width;
966 *height = 1;
967 *depth = image->Height;
968 break;
969 default:
970 *width = image->Width;
971 *height = image->Height;
972 *depth = image->Depth;
973 break;
974 }
975 }
976
977 /**
978 * Can the image be pulled into a unified mipmap tree? This mirrors
979 * the completeness test in a lot of ways.
980 *
981 * Not sure whether I want to pass gl_texture_image here.
982 */
983 bool
984 intel_miptree_match_image(struct intel_mipmap_tree *mt,
985 struct gl_texture_image *image)
986 {
987 struct intel_texture_image *intelImage = intel_texture_image(image);
988 GLuint level = intelImage->base.Base.Level;
989 int width, height, depth;
990
991 /* glTexImage* choose the texture object based on the target passed in, and
992 * objects can't change targets over their lifetimes, so this should be
993 * true.
994 */
995 assert(image->TexObject->Target == mt->target);
996
997 mesa_format mt_format = mt->format;
998 if (mt->format == MESA_FORMAT_Z24_UNORM_X8_UINT && mt->stencil_mt)
999 mt_format = MESA_FORMAT_Z24_UNORM_S8_UINT;
1000 if (mt->format == MESA_FORMAT_Z_FLOAT32 && mt->stencil_mt)
1001 mt_format = MESA_FORMAT_Z32_FLOAT_S8X24_UINT;
1002 if (mt->etc_format != MESA_FORMAT_NONE)
1003 mt_format = mt->etc_format;
1004
1005 if (image->TexFormat != mt_format)
1006 return false;
1007
1008 intel_get_image_dims(image, &width, &height, &depth);
1009
1010 if (mt->target == GL_TEXTURE_CUBE_MAP)
1011 depth = 6;
1012
1013 int level_depth = mt->level[level].depth;
1014 if (mt->num_samples > 1) {
1015 switch (mt->msaa_layout) {
1016 case INTEL_MSAA_LAYOUT_NONE:
1017 case INTEL_MSAA_LAYOUT_IMS:
1018 break;
1019 case INTEL_MSAA_LAYOUT_UMS:
1020 case INTEL_MSAA_LAYOUT_CMS:
1021 level_depth /= mt->num_samples;
1022 break;
1023 }
1024 }
1025
1026 /* Test image dimensions against the base level image adjusted for
1027 * minification. This will also catch images not present in the
1028 * tree, changed targets, etc.
1029 */
1030 if (width != minify(mt->logical_width0, level - mt->first_level) ||
1031 height != minify(mt->logical_height0, level - mt->first_level) ||
1032 depth != level_depth) {
1033 return false;
1034 }
1035
1036 if (image->NumSamples != mt->num_samples)
1037 return false;
1038
1039 return true;
1040 }
1041
1042
1043 void
1044 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
1045 GLuint level,
1046 GLuint x, GLuint y, GLuint d)
1047 {
1048 mt->level[level].depth = d;
1049 mt->level[level].level_x = x;
1050 mt->level[level].level_y = y;
1051
1052 DBG("%s level %d, depth %d, offset %d,%d\n", __func__,
1053 level, d, x, y);
1054
1055 assert(mt->level[level].slice == NULL);
1056
1057 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
1058 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
1059 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
1060 }
1061
1062
1063 void
1064 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
1065 GLuint level, GLuint img,
1066 GLuint x, GLuint y)
1067 {
1068 if (img == 0 && level == 0)
1069 assert(x == 0 && y == 0);
1070
1071 assert(img < mt->level[level].depth);
1072
1073 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
1074 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
1075
1076 DBG("%s level %d img %d pos %d,%d\n",
1077 __func__, level, img,
1078 mt->level[level].slice[img].x_offset,
1079 mt->level[level].slice[img].y_offset);
1080 }
1081
1082 void
1083 intel_miptree_get_image_offset(const struct intel_mipmap_tree *mt,
1084 GLuint level, GLuint slice,
1085 GLuint *x, GLuint *y)
1086 {
1087 assert(slice < mt->level[level].depth);
1088
1089 *x = mt->level[level].slice[slice].x_offset;
1090 *y = mt->level[level].slice[slice].y_offset;
1091 }
1092
1093
1094 /**
1095 * This function computes the tile_w (in bytes) and tile_h (in rows) of
1096 * different tiling patterns. If the BO is untiled, tile_w is set to cpp
1097 * and tile_h is set to 1.
1098 */
1099 void
1100 intel_get_tile_dims(uint32_t tiling, uint32_t tr_mode, uint32_t cpp,
1101 uint32_t *tile_w, uint32_t *tile_h)
1102 {
1103 if (tr_mode == INTEL_MIPTREE_TRMODE_NONE) {
1104 switch (tiling) {
1105 case I915_TILING_X:
1106 *tile_w = 512;
1107 *tile_h = 8;
1108 break;
1109 case I915_TILING_Y:
1110 *tile_w = 128;
1111 *tile_h = 32;
1112 break;
1113 case I915_TILING_NONE:
1114 *tile_w = cpp;
1115 *tile_h = 1;
1116 break;
1117 default:
1118 unreachable("not reached");
1119 }
1120 } else {
1121 uint32_t aspect_ratio = 1;
1122 assert(_mesa_is_pow_two(cpp));
1123
1124 switch (cpp) {
1125 case 1:
1126 *tile_h = 64;
1127 break;
1128 case 2:
1129 case 4:
1130 *tile_h = 32;
1131 break;
1132 case 8:
1133 case 16:
1134 *tile_h = 16;
1135 break;
1136 default:
1137 unreachable("not reached");
1138 }
1139
1140 if (cpp == 2 || cpp == 8)
1141 aspect_ratio = 2;
1142
1143 if (tr_mode == INTEL_MIPTREE_TRMODE_YS)
1144 *tile_h *= 4;
1145
1146 *tile_w = *tile_h * aspect_ratio * cpp;
1147 }
1148 }
1149
1150
1151 /**
1152 * This function computes masks that may be used to select the bits of the X
1153 * and Y coordinates that indicate the offset within a tile. If the BO is
1154 * untiled, the masks are set to 0.
1155 */
1156 void
1157 intel_get_tile_masks(uint32_t tiling, uint32_t tr_mode, uint32_t cpp,
1158 bool map_stencil_as_y_tiled,
1159 uint32_t *mask_x, uint32_t *mask_y)
1160 {
1161 uint32_t tile_w_bytes, tile_h;
1162 if (map_stencil_as_y_tiled)
1163 tiling = I915_TILING_Y;
1164
1165 intel_get_tile_dims(tiling, tr_mode, cpp, &tile_w_bytes, &tile_h);
1166
1167 *mask_x = tile_w_bytes / cpp - 1;
1168 *mask_y = tile_h - 1;
1169 }
1170
1171 /**
1172 * Compute the offset (in bytes) from the start of the BO to the given x
1173 * and y coordinate. For tiled BOs, caller must ensure that x and y are
1174 * multiples of the tile size.
1175 */
1176 uint32_t
1177 intel_miptree_get_aligned_offset(const struct intel_mipmap_tree *mt,
1178 uint32_t x, uint32_t y,
1179 bool map_stencil_as_y_tiled)
1180 {
1181 int cpp = mt->cpp;
1182 uint32_t pitch = mt->pitch;
1183 uint32_t tiling = mt->tiling;
1184
1185 if (map_stencil_as_y_tiled) {
1186 tiling = I915_TILING_Y;
1187
1188 /* When mapping a W-tiled stencil buffer as Y-tiled, each 64-high W-tile
1189 * gets transformed into a 32-high Y-tile. Accordingly, the pitch of
1190 * the resulting surface is twice the pitch of the original miptree,
1191 * since each row in the Y-tiled view corresponds to two rows in the
1192 * actual W-tiled surface. So we need to correct the pitch before
1193 * computing the offsets.
1194 */
1195 pitch *= 2;
1196 }
1197
1198 switch (tiling) {
1199 default:
1200 unreachable("not reached");
1201 case I915_TILING_NONE:
1202 return y * pitch + x * cpp;
1203 case I915_TILING_X:
1204 assert((x % (512 / cpp)) == 0);
1205 assert((y % 8) == 0);
1206 return y * pitch + x / (512 / cpp) * 4096;
1207 case I915_TILING_Y:
1208 assert((x % (128 / cpp)) == 0);
1209 assert((y % 32) == 0);
1210 return y * pitch + x / (128 / cpp) * 4096;
1211 }
1212 }
1213
1214 /**
1215 * Rendering with tiled buffers requires that the base address of the buffer
1216 * be aligned to a page boundary. For renderbuffers, and sometimes with
1217 * textures, we may want the surface to point at a texture image level that
1218 * isn't at a page boundary.
1219 *
1220 * This function returns an appropriately-aligned base offset
1221 * according to the tiling restrictions, plus any required x/y offset
1222 * from there.
1223 */
1224 uint32_t
1225 intel_miptree_get_tile_offsets(const struct intel_mipmap_tree *mt,
1226 GLuint level, GLuint slice,
1227 uint32_t *tile_x,
1228 uint32_t *tile_y)
1229 {
1230 uint32_t x, y;
1231 uint32_t mask_x, mask_y;
1232
1233 intel_get_tile_masks(mt->tiling, mt->tr_mode, mt->cpp, false, &mask_x, &mask_y);
1234 intel_miptree_get_image_offset(mt, level, slice, &x, &y);
1235
1236 *tile_x = x & mask_x;
1237 *tile_y = y & mask_y;
1238
1239 return intel_miptree_get_aligned_offset(mt, x & ~mask_x, y & ~mask_y, false);
1240 }
1241
1242 static void
1243 intel_miptree_copy_slice_sw(struct brw_context *brw,
1244 struct intel_mipmap_tree *dst_mt,
1245 struct intel_mipmap_tree *src_mt,
1246 int level,
1247 int slice,
1248 int width,
1249 int height)
1250 {
1251 void *src, *dst;
1252 ptrdiff_t src_stride, dst_stride;
1253 int cpp = dst_mt->cpp;
1254
1255 intel_miptree_map(brw, src_mt,
1256 level, slice,
1257 0, 0,
1258 width, height,
1259 GL_MAP_READ_BIT | BRW_MAP_DIRECT_BIT,
1260 &src, &src_stride);
1261
1262 intel_miptree_map(brw, dst_mt,
1263 level, slice,
1264 0, 0,
1265 width, height,
1266 GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT |
1267 BRW_MAP_DIRECT_BIT,
1268 &dst, &dst_stride);
1269
1270 DBG("sw blit %s mt %p %p/%"PRIdPTR" -> %s mt %p %p/%"PRIdPTR" (%dx%d)\n",
1271 _mesa_get_format_name(src_mt->format),
1272 src_mt, src, src_stride,
1273 _mesa_get_format_name(dst_mt->format),
1274 dst_mt, dst, dst_stride,
1275 width, height);
1276
1277 int row_size = cpp * width;
1278 if (src_stride == row_size &&
1279 dst_stride == row_size) {
1280 memcpy(dst, src, row_size * height);
1281 } else {
1282 for (int i = 0; i < height; i++) {
1283 memcpy(dst, src, row_size);
1284 dst += dst_stride;
1285 src += src_stride;
1286 }
1287 }
1288
1289 intel_miptree_unmap(brw, dst_mt, level, slice);
1290 intel_miptree_unmap(brw, src_mt, level, slice);
1291
1292 /* Don't forget to copy the stencil data over, too. We could have skipped
1293 * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
1294 * shuffling the two data sources in/out of temporary storage instead of
1295 * the direct mapping we get this way.
1296 */
1297 if (dst_mt->stencil_mt) {
1298 assert(src_mt->stencil_mt);
1299 intel_miptree_copy_slice_sw(brw, dst_mt->stencil_mt, src_mt->stencil_mt,
1300 level, slice, width, height);
1301 }
1302 }
1303
1304 static void
1305 intel_miptree_copy_slice(struct brw_context *brw,
1306 struct intel_mipmap_tree *dst_mt,
1307 struct intel_mipmap_tree *src_mt,
1308 int level,
1309 int face,
1310 int depth)
1311
1312 {
1313 mesa_format format = src_mt->format;
1314 uint32_t width = minify(src_mt->physical_width0, level - src_mt->first_level);
1315 uint32_t height = minify(src_mt->physical_height0, level - src_mt->first_level);
1316 int slice;
1317
1318 if (face > 0)
1319 slice = face;
1320 else
1321 slice = depth;
1322
1323 assert(depth < src_mt->level[level].depth);
1324 assert(src_mt->format == dst_mt->format);
1325
1326 if (dst_mt->compressed) {
1327 unsigned int i, j;
1328 _mesa_get_format_block_size(dst_mt->format, &i, &j);
1329 height = ALIGN_NPOT(height, j) / j;
1330 width = ALIGN_NPOT(width, i) / i;
1331 }
1332
1333 /* If it's a packed depth/stencil buffer with separate stencil, the blit
1334 * below won't apply since we can't do the depth's Y tiling or the
1335 * stencil's W tiling in the blitter.
1336 */
1337 if (src_mt->stencil_mt) {
1338 intel_miptree_copy_slice_sw(brw,
1339 dst_mt, src_mt,
1340 level, slice,
1341 width, height);
1342 return;
1343 }
1344
1345 uint32_t dst_x, dst_y, src_x, src_y;
1346 intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
1347 intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
1348
1349 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
1350 _mesa_get_format_name(src_mt->format),
1351 src_mt, src_x, src_y, src_mt->pitch,
1352 _mesa_get_format_name(dst_mt->format),
1353 dst_mt, dst_x, dst_y, dst_mt->pitch,
1354 width, height);
1355
1356 if (!intel_miptree_blit(brw,
1357 src_mt, level, slice, 0, 0, false,
1358 dst_mt, level, slice, 0, 0, false,
1359 width, height, GL_COPY)) {
1360 perf_debug("miptree validate blit for %s failed\n",
1361 _mesa_get_format_name(format));
1362
1363 intel_miptree_copy_slice_sw(brw, dst_mt, src_mt, level, slice,
1364 width, height);
1365 }
1366 }
1367
1368 /**
1369 * Copies the image's current data to the given miptree, and associates that
1370 * miptree with the image.
1371 *
1372 * If \c invalidate is true, then the actual image data does not need to be
1373 * copied, but the image still needs to be associated to the new miptree (this
1374 * is set to true if we're about to clear the image).
1375 */
1376 void
1377 intel_miptree_copy_teximage(struct brw_context *brw,
1378 struct intel_texture_image *intelImage,
1379 struct intel_mipmap_tree *dst_mt,
1380 bool invalidate)
1381 {
1382 struct intel_mipmap_tree *src_mt = intelImage->mt;
1383 struct intel_texture_object *intel_obj =
1384 intel_texture_object(intelImage->base.Base.TexObject);
1385 int level = intelImage->base.Base.Level;
1386 int face = intelImage->base.Base.Face;
1387
1388 GLuint depth;
1389 if (intel_obj->base.Target == GL_TEXTURE_1D_ARRAY)
1390 depth = intelImage->base.Base.Height;
1391 else
1392 depth = intelImage->base.Base.Depth;
1393
1394 if (!invalidate) {
1395 for (int slice = 0; slice < depth; slice++) {
1396 intel_miptree_copy_slice(brw, dst_mt, src_mt, level, face, slice);
1397 }
1398 }
1399
1400 intel_miptree_reference(&intelImage->mt, dst_mt);
1401 intel_obj->needs_validate = true;
1402 }
1403
1404 static bool
1405 intel_miptree_alloc_mcs(struct brw_context *brw,
1406 struct intel_mipmap_tree *mt,
1407 GLuint num_samples)
1408 {
1409 assert(brw->gen >= 7); /* MCS only used on Gen7+ */
1410 assert(mt->mcs_mt == NULL);
1411 assert(!mt->disable_aux_buffers);
1412
1413 /* Choose the correct format for the MCS buffer. All that really matters
1414 * is that we allocate the right buffer size, since we'll always be
1415 * accessing this miptree using MCS-specific hardware mechanisms, which
1416 * infer the correct format based on num_samples.
1417 */
1418 mesa_format format;
1419 switch (num_samples) {
1420 case 2:
1421 case 4:
1422 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
1423 * each sample).
1424 */
1425 format = MESA_FORMAT_R_UNORM8;
1426 break;
1427 case 8:
1428 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
1429 * for each sample, plus 8 padding bits).
1430 */
1431 format = MESA_FORMAT_R_UINT32;
1432 break;
1433 case 16:
1434 /* 64 bits/pixel are required for MCS data when using 16x MSAA (4 bits
1435 * for each sample).
1436 */
1437 format = MESA_FORMAT_RG_UINT32;
1438 break;
1439 default:
1440 unreachable("Unrecognized sample count in intel_miptree_alloc_mcs");
1441 };
1442
1443 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
1444 *
1445 * "The MCS surface must be stored as Tile Y."
1446 */
1447 const uint32_t mcs_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD |
1448 MIPTREE_LAYOUT_TILING_Y;
1449 mt->mcs_mt = intel_miptree_create(brw,
1450 mt->target,
1451 format,
1452 mt->first_level,
1453 mt->last_level,
1454 mt->logical_width0,
1455 mt->logical_height0,
1456 mt->logical_depth0,
1457 0 /* num_samples */,
1458 mcs_flags);
1459
1460 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
1461 *
1462 * When MCS buffer is enabled and bound to MSRT, it is required that it
1463 * is cleared prior to any rendering.
1464 *
1465 * Since we don't use the MCS buffer for any purpose other than rendering,
1466 * it makes sense to just clear it immediately upon allocation.
1467 *
1468 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
1469 */
1470 void *data = intel_miptree_map_raw(brw, mt->mcs_mt);
1471 memset(data, 0xff, mt->mcs_mt->total_height * mt->mcs_mt->pitch);
1472 intel_miptree_unmap_raw(mt->mcs_mt);
1473 mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_CLEAR;
1474
1475 return mt->mcs_mt;
1476 }
1477
1478
1479 bool
1480 intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw,
1481 struct intel_mipmap_tree *mt)
1482 {
1483 assert(mt->mcs_mt == NULL);
1484 assert(!mt->disable_aux_buffers);
1485
1486 /* The format of the MCS buffer is opaque to the driver; all that matters
1487 * is that we get its size and pitch right. We'll pretend that the format
1488 * is R32. Since an MCS tile covers 128 blocks horizontally, and a Y-tiled
1489 * R32 buffer is 32 pixels across, we'll need to scale the width down by
1490 * the block width and then a further factor of 4. Since an MCS tile
1491 * covers 256 blocks vertically, and a Y-tiled R32 buffer is 32 rows high,
1492 * we'll need to scale the height down by the block height and then a
1493 * further factor of 8.
1494 */
1495 const mesa_format format = MESA_FORMAT_R_UINT32;
1496 unsigned block_width_px;
1497 unsigned block_height;
1498 intel_get_non_msrt_mcs_alignment(mt, &block_width_px, &block_height);
1499 unsigned width_divisor = block_width_px * 4;
1500 unsigned height_divisor = block_height * 8;
1501
1502 /* The Skylake MCS is twice as tall as the Broadwell MCS.
1503 *
1504 * In pre-Skylake, each bit in the MCS contained the state of 2 cachelines
1505 * in the main surface. In Skylake, it's two bits. The extra bit
1506 * doubles the MCS height, not width, because in Skylake the MCS is always
1507 * Y-tiled.
1508 */
1509 if (brw->gen >= 9)
1510 height_divisor /= 2;
1511
1512 unsigned mcs_width =
1513 ALIGN(mt->logical_width0, width_divisor) / width_divisor;
1514 unsigned mcs_height =
1515 ALIGN(mt->logical_height0, height_divisor) / height_divisor;
1516 assert(mt->logical_depth0 == 1);
1517 uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD |
1518 MIPTREE_LAYOUT_TILING_Y;
1519 if (brw->gen >= 8) {
1520 layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
1521 }
1522 mt->mcs_mt = intel_miptree_create(brw,
1523 mt->target,
1524 format,
1525 mt->first_level,
1526 mt->last_level,
1527 mcs_width,
1528 mcs_height,
1529 mt->logical_depth0,
1530 0 /* num_samples */,
1531 layout_flags);
1532
1533 return mt->mcs_mt;
1534 }
1535
1536
1537 /**
1538 * Helper for intel_miptree_alloc_hiz() that sets
1539 * \c mt->level[level].has_hiz. Return true if and only if
1540 * \c has_hiz was set.
1541 */
1542 static bool
1543 intel_miptree_level_enable_hiz(struct brw_context *brw,
1544 struct intel_mipmap_tree *mt,
1545 uint32_t level)
1546 {
1547 assert(mt->hiz_buf);
1548
1549 if (brw->gen >= 8 || brw->is_haswell) {
1550 uint32_t width = minify(mt->physical_width0, level);
1551 uint32_t height = minify(mt->physical_height0, level);
1552
1553 /* Disable HiZ for LOD > 0 unless the width is 8 aligned
1554 * and the height is 4 aligned. This allows our HiZ support
1555 * to fulfill Haswell restrictions for HiZ ops. For LOD == 0,
1556 * we can grow the width & height to allow the HiZ op to
1557 * force the proper size alignments.
1558 */
1559 if (level > 0 && ((width & 7) || (height & 3))) {
1560 DBG("mt %p level %d: HiZ DISABLED\n", mt, level);
1561 return false;
1562 }
1563 }
1564
1565 DBG("mt %p level %d: HiZ enabled\n", mt, level);
1566 mt->level[level].has_hiz = true;
1567 return true;
1568 }
1569
1570
1571 /**
1572 * Helper for intel_miptree_alloc_hiz() that determines the required hiz
1573 * buffer dimensions and allocates a bo for the hiz buffer.
1574 */
1575 static struct intel_miptree_aux_buffer *
1576 intel_gen7_hiz_buf_create(struct brw_context *brw,
1577 struct intel_mipmap_tree *mt)
1578 {
1579 unsigned z_width = mt->logical_width0;
1580 unsigned z_height = mt->logical_height0;
1581 const unsigned z_depth = MAX2(mt->logical_depth0, 1);
1582 unsigned hz_width, hz_height;
1583 struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
1584
1585 if (!buf)
1586 return NULL;
1587
1588 /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents
1589 * adjustments required for Z_Height and Z_Width based on multisampling.
1590 */
1591 switch (mt->num_samples) {
1592 case 0:
1593 case 1:
1594 break;
1595 case 2:
1596 case 4:
1597 z_width *= 2;
1598 z_height *= 2;
1599 break;
1600 case 8:
1601 z_width *= 4;
1602 z_height *= 2;
1603 break;
1604 default:
1605 unreachable("unsupported sample count");
1606 }
1607
1608 const unsigned vertical_align = 8; /* 'j' in the docs */
1609 const unsigned H0 = z_height;
1610 const unsigned h0 = ALIGN(H0, vertical_align);
1611 const unsigned h1 = ALIGN(minify(H0, 1), vertical_align);
1612 const unsigned Z0 = z_depth;
1613
1614 /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */
1615 hz_width = ALIGN(z_width, 16);
1616
1617 if (mt->target == GL_TEXTURE_3D) {
1618 unsigned H_i = H0;
1619 unsigned Z_i = Z0;
1620 hz_height = 0;
1621 for (unsigned level = mt->first_level; level <= mt->last_level; ++level) {
1622 unsigned h_i = ALIGN(H_i, vertical_align);
1623 /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1624 hz_height += h_i * Z_i;
1625 H_i = minify(H_i, 1);
1626 Z_i = minify(Z_i, 1);
1627 }
1628 /* HZ_Height =
1629 * (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i)))
1630 */
1631 hz_height = DIV_ROUND_UP(hz_height, 2);
1632 } else {
1633 const unsigned hz_qpitch = h0 + h1 + (12 * vertical_align);
1634 if (mt->target == GL_TEXTURE_CUBE_MAP_ARRAY ||
1635 mt->target == GL_TEXTURE_CUBE_MAP) {
1636 /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth * 6/2) /8 ) * 8 */
1637 hz_height = DIV_ROUND_UP(hz_qpitch * Z0 * 6, 2 * 8) * 8;
1638 } else {
1639 /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth/2) /8 ) * 8 */
1640 hz_height = DIV_ROUND_UP(hz_qpitch * Z0, 2 * 8) * 8;
1641 }
1642 }
1643
1644 unsigned long pitch;
1645 uint32_t tiling = I915_TILING_Y;
1646 buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
1647 hz_width, hz_height, 1,
1648 &tiling, &pitch,
1649 BO_ALLOC_FOR_RENDER);
1650 if (!buf->bo) {
1651 free(buf);
1652 return NULL;
1653 } else if (tiling != I915_TILING_Y) {
1654 drm_intel_bo_unreference(buf->bo);
1655 free(buf);
1656 return NULL;
1657 }
1658
1659 buf->pitch = pitch;
1660
1661 return buf;
1662 }
1663
1664
1665 /**
1666 * Helper for intel_miptree_alloc_hiz() that determines the required hiz
1667 * buffer dimensions and allocates a bo for the hiz buffer.
1668 */
1669 static struct intel_miptree_aux_buffer *
1670 intel_gen8_hiz_buf_create(struct brw_context *brw,
1671 struct intel_mipmap_tree *mt)
1672 {
1673 unsigned z_width = mt->logical_width0;
1674 unsigned z_height = mt->logical_height0;
1675 const unsigned z_depth = MAX2(mt->logical_depth0, 1);
1676 unsigned hz_width, hz_height;
1677 struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
1678
1679 if (!buf)
1680 return NULL;
1681
1682 /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents
1683 * adjustments required for Z_Height and Z_Width based on multisampling.
1684 */
1685 if (brw->gen < 9) {
1686 switch (mt->num_samples) {
1687 case 0:
1688 case 1:
1689 break;
1690 case 2:
1691 case 4:
1692 z_width *= 2;
1693 z_height *= 2;
1694 break;
1695 case 8:
1696 z_width *= 4;
1697 z_height *= 2;
1698 break;
1699 default:
1700 unreachable("unsupported sample count");
1701 }
1702 }
1703
1704 const unsigned vertical_align = 8; /* 'j' in the docs */
1705 const unsigned H0 = z_height;
1706 const unsigned h0 = ALIGN(H0, vertical_align);
1707 const unsigned h1 = ALIGN(minify(H0, 1), vertical_align);
1708 const unsigned Z0 = z_depth;
1709
1710 /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */
1711 hz_width = ALIGN(z_width, 16);
1712
1713 unsigned H_i = H0;
1714 unsigned Z_i = Z0;
1715 unsigned sum_h_i = 0;
1716 unsigned hz_height_3d_sum = 0;
1717 for (unsigned level = mt->first_level; level <= mt->last_level; ++level) {
1718 unsigned i = level - mt->first_level;
1719 unsigned h_i = ALIGN(H_i, vertical_align);
1720 /* sum(i=2 to m; h_i) */
1721 if (i >= 2) {
1722 sum_h_i += h_i;
1723 }
1724 /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1725 hz_height_3d_sum += h_i * Z_i;
1726 H_i = minify(H_i, 1);
1727 Z_i = minify(Z_i, 1);
1728 }
1729 /* HZ_QPitch = h0 + max(h1, sum(i=2 to m; h_i)) */
1730 buf->qpitch = h0 + MAX2(h1, sum_h_i);
1731
1732 if (mt->target == GL_TEXTURE_3D) {
1733 /* (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1734 hz_height = DIV_ROUND_UP(hz_height_3d_sum, 2);
1735 } else {
1736 /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * Z_Depth */
1737 hz_height = DIV_ROUND_UP(buf->qpitch, 2 * 8) * 8 * Z0;
1738 if (mt->target == GL_TEXTURE_CUBE_MAP_ARRAY ||
1739 mt->target == GL_TEXTURE_CUBE_MAP) {
1740 /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * 6 * Z_Depth
1741 *
1742 * We can can just take our hz_height calculation from above, and
1743 * multiply by 6 for the cube map and cube map array types.
1744 */
1745 hz_height *= 6;
1746 }
1747 }
1748
1749 unsigned long pitch;
1750 uint32_t tiling = I915_TILING_Y;
1751 buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
1752 hz_width, hz_height, 1,
1753 &tiling, &pitch,
1754 BO_ALLOC_FOR_RENDER);
1755 if (!buf->bo) {
1756 free(buf);
1757 return NULL;
1758 } else if (tiling != I915_TILING_Y) {
1759 drm_intel_bo_unreference(buf->bo);
1760 free(buf);
1761 return NULL;
1762 }
1763
1764 buf->pitch = pitch;
1765
1766 return buf;
1767 }
1768
1769
1770 static struct intel_miptree_aux_buffer *
1771 intel_hiz_miptree_buf_create(struct brw_context *brw,
1772 struct intel_mipmap_tree *mt)
1773 {
1774 struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
1775 uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
1776
1777 if (brw->gen == 6)
1778 layout_flags |= MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD;
1779
1780 if (!buf)
1781 return NULL;
1782
1783 layout_flags |= MIPTREE_LAYOUT_TILING_ANY;
1784 buf->mt = intel_miptree_create(brw,
1785 mt->target,
1786 mt->format,
1787 mt->first_level,
1788 mt->last_level,
1789 mt->logical_width0,
1790 mt->logical_height0,
1791 mt->logical_depth0,
1792 mt->num_samples,
1793 layout_flags);
1794 if (!buf->mt) {
1795 free(buf);
1796 return NULL;
1797 }
1798
1799 buf->bo = buf->mt->bo;
1800 buf->pitch = buf->mt->pitch;
1801 buf->qpitch = buf->mt->qpitch;
1802
1803 return buf;
1804 }
1805
1806 bool
1807 intel_miptree_wants_hiz_buffer(struct brw_context *brw,
1808 struct intel_mipmap_tree *mt)
1809 {
1810 if (!brw->has_hiz)
1811 return false;
1812
1813 if (mt->hiz_buf != NULL)
1814 return false;
1815
1816 if (mt->disable_aux_buffers)
1817 return false;
1818
1819 switch (mt->format) {
1820 case MESA_FORMAT_Z_FLOAT32:
1821 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT:
1822 case MESA_FORMAT_Z24_UNORM_X8_UINT:
1823 case MESA_FORMAT_Z24_UNORM_S8_UINT:
1824 case MESA_FORMAT_Z_UNORM16:
1825 return true;
1826 default:
1827 return false;
1828 }
1829 }
1830
1831 bool
1832 intel_miptree_alloc_hiz(struct brw_context *brw,
1833 struct intel_mipmap_tree *mt)
1834 {
1835 assert(mt->hiz_buf == NULL);
1836 assert(!mt->disable_aux_buffers);
1837
1838 if (brw->gen == 7) {
1839 mt->hiz_buf = intel_gen7_hiz_buf_create(brw, mt);
1840 } else if (brw->gen >= 8) {
1841 mt->hiz_buf = intel_gen8_hiz_buf_create(brw, mt);
1842 } else {
1843 mt->hiz_buf = intel_hiz_miptree_buf_create(brw, mt);
1844 }
1845
1846 if (!mt->hiz_buf)
1847 return false;
1848
1849 /* Mark that all slices need a HiZ resolve. */
1850 for (unsigned level = mt->first_level; level <= mt->last_level; ++level) {
1851 if (!intel_miptree_level_enable_hiz(brw, mt, level))
1852 continue;
1853
1854 for (unsigned layer = 0; layer < mt->level[level].depth; ++layer) {
1855 struct intel_resolve_map *m = malloc(sizeof(struct intel_resolve_map));
1856 exec_node_init(&m->link);
1857 m->level = level;
1858 m->layer = layer;
1859 m->need = GEN6_HIZ_OP_HIZ_RESOLVE;
1860
1861 exec_list_push_tail(&mt->hiz_map, &m->link);
1862 }
1863 }
1864
1865 return true;
1866 }
1867
1868 /**
1869 * Does the miptree slice have hiz enabled?
1870 */
1871 bool
1872 intel_miptree_level_has_hiz(struct intel_mipmap_tree *mt, uint32_t level)
1873 {
1874 intel_miptree_check_level_layer(mt, level, 0);
1875 return mt->level[level].has_hiz;
1876 }
1877
1878 void
1879 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
1880 uint32_t level,
1881 uint32_t layer)
1882 {
1883 if (!intel_miptree_level_has_hiz(mt, level))
1884 return;
1885
1886 intel_resolve_map_set(&mt->hiz_map,
1887 level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
1888 }
1889
1890
1891 void
1892 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
1893 uint32_t level,
1894 uint32_t layer)
1895 {
1896 if (!intel_miptree_level_has_hiz(mt, level))
1897 return;
1898
1899 intel_resolve_map_set(&mt->hiz_map,
1900 level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
1901 }
1902
1903 void
1904 intel_miptree_set_all_slices_need_depth_resolve(struct intel_mipmap_tree *mt,
1905 uint32_t level)
1906 {
1907 uint32_t layer;
1908 uint32_t end_layer = mt->level[level].depth;
1909
1910 for (layer = 0; layer < end_layer; layer++) {
1911 intel_miptree_slice_set_needs_depth_resolve(mt, level, layer);
1912 }
1913 }
1914
1915 static bool
1916 intel_miptree_slice_resolve(struct brw_context *brw,
1917 struct intel_mipmap_tree *mt,
1918 uint32_t level,
1919 uint32_t layer,
1920 enum gen6_hiz_op need)
1921 {
1922 intel_miptree_check_level_layer(mt, level, layer);
1923
1924 struct intel_resolve_map *item =
1925 intel_resolve_map_get(&mt->hiz_map, level, layer);
1926
1927 if (!item || item->need != need)
1928 return false;
1929
1930 intel_hiz_exec(brw, mt, level, layer, need);
1931 intel_resolve_map_remove(item);
1932 return true;
1933 }
1934
1935 bool
1936 intel_miptree_slice_resolve_hiz(struct brw_context *brw,
1937 struct intel_mipmap_tree *mt,
1938 uint32_t level,
1939 uint32_t layer)
1940 {
1941 return intel_miptree_slice_resolve(brw, mt, level, layer,
1942 GEN6_HIZ_OP_HIZ_RESOLVE);
1943 }
1944
1945 bool
1946 intel_miptree_slice_resolve_depth(struct brw_context *brw,
1947 struct intel_mipmap_tree *mt,
1948 uint32_t level,
1949 uint32_t layer)
1950 {
1951 return intel_miptree_slice_resolve(brw, mt, level, layer,
1952 GEN6_HIZ_OP_DEPTH_RESOLVE);
1953 }
1954
1955 static bool
1956 intel_miptree_all_slices_resolve(struct brw_context *brw,
1957 struct intel_mipmap_tree *mt,
1958 enum gen6_hiz_op need)
1959 {
1960 bool did_resolve = false;
1961
1962 foreach_list_typed_safe(struct intel_resolve_map, map, link, &mt->hiz_map) {
1963 if (map->need != need)
1964 continue;
1965
1966 intel_hiz_exec(brw, mt, map->level, map->layer, need);
1967 intel_resolve_map_remove(map);
1968 did_resolve = true;
1969 }
1970
1971 return did_resolve;
1972 }
1973
1974 bool
1975 intel_miptree_all_slices_resolve_hiz(struct brw_context *brw,
1976 struct intel_mipmap_tree *mt)
1977 {
1978 return intel_miptree_all_slices_resolve(brw, mt,
1979 GEN6_HIZ_OP_HIZ_RESOLVE);
1980 }
1981
1982 bool
1983 intel_miptree_all_slices_resolve_depth(struct brw_context *brw,
1984 struct intel_mipmap_tree *mt)
1985 {
1986 return intel_miptree_all_slices_resolve(brw, mt,
1987 GEN6_HIZ_OP_DEPTH_RESOLVE);
1988 }
1989
1990
1991 void
1992 intel_miptree_resolve_color(struct brw_context *brw,
1993 struct intel_mipmap_tree *mt)
1994 {
1995 switch (mt->fast_clear_state) {
1996 case INTEL_FAST_CLEAR_STATE_NO_MCS:
1997 case INTEL_FAST_CLEAR_STATE_RESOLVED:
1998 /* No resolve needed */
1999 break;
2000 case INTEL_FAST_CLEAR_STATE_UNRESOLVED:
2001 case INTEL_FAST_CLEAR_STATE_CLEAR:
2002 /* Fast color clear resolves only make sense for non-MSAA buffers. */
2003 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE)
2004 brw_meta_resolve_color(brw, mt);
2005 break;
2006 }
2007 }
2008
2009
2010 /**
2011 * Make it possible to share the BO backing the given miptree with another
2012 * process or another miptree.
2013 *
2014 * Fast color clears are unsafe with shared buffers, so we need to resolve and
2015 * then discard the MCS buffer, if present. We also set the fast_clear_state
2016 * to INTEL_FAST_CLEAR_STATE_NO_MCS to ensure that no MCS buffer gets
2017 * allocated in the future.
2018 */
2019 void
2020 intel_miptree_make_shareable(struct brw_context *brw,
2021 struct intel_mipmap_tree *mt)
2022 {
2023 /* MCS buffers are also used for multisample buffers, but we can't resolve
2024 * away a multisample MCS buffer because it's an integral part of how the
2025 * pixel data is stored. Fortunately this code path should never be
2026 * reached for multisample buffers.
2027 */
2028 assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE);
2029
2030 if (mt->mcs_mt) {
2031 intel_miptree_resolve_color(brw, mt);
2032 intel_miptree_release(&mt->mcs_mt);
2033 mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_NO_MCS;
2034 }
2035 }
2036
2037
2038 /**
2039 * \brief Get pointer offset into stencil buffer.
2040 *
2041 * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
2042 * must decode the tile's layout in software.
2043 *
2044 * See
2045 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
2046 * Format.
2047 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
2048 *
2049 * Even though the returned offset is always positive, the return type is
2050 * signed due to
2051 * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
2052 * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
2053 */
2054 static intptr_t
2055 intel_offset_S8(uint32_t stride, uint32_t x, uint32_t y, bool swizzled)
2056 {
2057 uint32_t tile_size = 4096;
2058 uint32_t tile_width = 64;
2059 uint32_t tile_height = 64;
2060 uint32_t row_size = 64 * stride;
2061
2062 uint32_t tile_x = x / tile_width;
2063 uint32_t tile_y = y / tile_height;
2064
2065 /* The byte's address relative to the tile's base addres. */
2066 uint32_t byte_x = x % tile_width;
2067 uint32_t byte_y = y % tile_height;
2068
2069 uintptr_t u = tile_y * row_size
2070 + tile_x * tile_size
2071 + 512 * (byte_x / 8)
2072 + 64 * (byte_y / 8)
2073 + 32 * ((byte_y / 4) % 2)
2074 + 16 * ((byte_x / 4) % 2)
2075 + 8 * ((byte_y / 2) % 2)
2076 + 4 * ((byte_x / 2) % 2)
2077 + 2 * (byte_y % 2)
2078 + 1 * (byte_x % 2);
2079
2080 if (swizzled) {
2081 /* adjust for bit6 swizzling */
2082 if (((byte_x / 8) % 2) == 1) {
2083 if (((byte_y / 8) % 2) == 0) {
2084 u += 64;
2085 } else {
2086 u -= 64;
2087 }
2088 }
2089 }
2090
2091 return u;
2092 }
2093
2094 void
2095 intel_miptree_updownsample(struct brw_context *brw,
2096 struct intel_mipmap_tree *src,
2097 struct intel_mipmap_tree *dst)
2098 {
2099 if (brw->gen < 8) {
2100 brw_blorp_blit_miptrees(brw,
2101 src, 0 /* level */, 0 /* layer */, src->format,
2102 dst, 0 /* level */, 0 /* layer */, dst->format,
2103 0, 0,
2104 src->logical_width0, src->logical_height0,
2105 0, 0,
2106 dst->logical_width0, dst->logical_height0,
2107 GL_NEAREST, false, false /*mirror x, y*/);
2108 } else if (src->format == MESA_FORMAT_S_UINT8) {
2109 brw_meta_stencil_updownsample(brw, src, dst);
2110 } else {
2111 brw_meta_updownsample(brw, src, dst);
2112 }
2113
2114 if (src->stencil_mt) {
2115 if (brw->gen >= 8) {
2116 brw_meta_stencil_updownsample(brw, src->stencil_mt, dst);
2117 return;
2118 }
2119
2120 brw_blorp_blit_miptrees(brw,
2121 src->stencil_mt, 0 /* level */, 0 /* layer */,
2122 src->stencil_mt->format,
2123 dst->stencil_mt, 0 /* level */, 0 /* layer */,
2124 dst->stencil_mt->format,
2125 0, 0,
2126 src->logical_width0, src->logical_height0,
2127 0, 0,
2128 dst->logical_width0, dst->logical_height0,
2129 GL_NEAREST, false, false /*mirror x, y*/);
2130 }
2131 }
2132
2133 void *
2134 intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
2135 {
2136 /* CPU accesses to color buffers don't understand fast color clears, so
2137 * resolve any pending fast color clears before we map.
2138 */
2139 intel_miptree_resolve_color(brw, mt);
2140
2141 drm_intel_bo *bo = mt->bo;
2142
2143 if (drm_intel_bo_references(brw->batch.bo, bo))
2144 intel_batchbuffer_flush(brw);
2145
2146 if (mt->tiling != I915_TILING_NONE)
2147 brw_bo_map_gtt(brw, bo, "miptree");
2148 else
2149 brw_bo_map(brw, bo, true, "miptree");
2150
2151 return bo->virtual;
2152 }
2153
2154 void
2155 intel_miptree_unmap_raw(struct intel_mipmap_tree *mt)
2156 {
2157 drm_intel_bo_unmap(mt->bo);
2158 }
2159
2160 static void
2161 intel_miptree_map_gtt(struct brw_context *brw,
2162 struct intel_mipmap_tree *mt,
2163 struct intel_miptree_map *map,
2164 unsigned int level, unsigned int slice)
2165 {
2166 unsigned int bw, bh;
2167 void *base;
2168 unsigned int image_x, image_y;
2169 intptr_t x = map->x;
2170 intptr_t y = map->y;
2171
2172 /* For compressed formats, the stride is the number of bytes per
2173 * row of blocks. intel_miptree_get_image_offset() already does
2174 * the divide.
2175 */
2176 _mesa_get_format_block_size(mt->format, &bw, &bh);
2177 assert(y % bh == 0);
2178 assert(x % bw == 0);
2179 y /= bh;
2180 x /= bw;
2181
2182 base = intel_miptree_map_raw(brw, mt) + mt->offset;
2183
2184 if (base == NULL)
2185 map->ptr = NULL;
2186 else {
2187 /* Note that in the case of cube maps, the caller must have passed the
2188 * slice number referencing the face.
2189 */
2190 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2191 x += image_x;
2192 y += image_y;
2193
2194 map->stride = mt->pitch;
2195 map->ptr = base + y * map->stride + x * mt->cpp;
2196 }
2197
2198 DBG("%s: %d,%d %dx%d from mt %p (%s) "
2199 "%"PRIiPTR",%"PRIiPTR" = %p/%d\n", __func__,
2200 map->x, map->y, map->w, map->h,
2201 mt, _mesa_get_format_name(mt->format),
2202 x, y, map->ptr, map->stride);
2203 }
2204
2205 static void
2206 intel_miptree_unmap_gtt(struct intel_mipmap_tree *mt)
2207 {
2208 intel_miptree_unmap_raw(mt);
2209 }
2210
2211 static void
2212 intel_miptree_map_blit(struct brw_context *brw,
2213 struct intel_mipmap_tree *mt,
2214 struct intel_miptree_map *map,
2215 unsigned int level, unsigned int slice)
2216 {
2217 map->linear_mt = intel_miptree_create(brw, GL_TEXTURE_2D, mt->format,
2218 /* first_level */ 0,
2219 /* last_level */ 0,
2220 map->w, map->h, 1,
2221 /* samples */ 0,
2222 MIPTREE_LAYOUT_TILING_NONE);
2223
2224 if (!map->linear_mt) {
2225 fprintf(stderr, "Failed to allocate blit temporary\n");
2226 goto fail;
2227 }
2228 map->stride = map->linear_mt->pitch;
2229
2230 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2231 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2232 * invalidate is set, since we'll be writing the whole rectangle from our
2233 * temporary buffer back out.
2234 */
2235 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
2236 if (!intel_miptree_blit(brw,
2237 mt, level, slice,
2238 map->x, map->y, false,
2239 map->linear_mt, 0, 0,
2240 0, 0, false,
2241 map->w, map->h, GL_COPY)) {
2242 fprintf(stderr, "Failed to blit\n");
2243 goto fail;
2244 }
2245 }
2246
2247 map->ptr = intel_miptree_map_raw(brw, map->linear_mt);
2248
2249 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
2250 map->x, map->y, map->w, map->h,
2251 mt, _mesa_get_format_name(mt->format),
2252 level, slice, map->ptr, map->stride);
2253
2254 return;
2255
2256 fail:
2257 intel_miptree_release(&map->linear_mt);
2258 map->ptr = NULL;
2259 map->stride = 0;
2260 }
2261
2262 static void
2263 intel_miptree_unmap_blit(struct brw_context *brw,
2264 struct intel_mipmap_tree *mt,
2265 struct intel_miptree_map *map,
2266 unsigned int level,
2267 unsigned int slice)
2268 {
2269 struct gl_context *ctx = &brw->ctx;
2270
2271 intel_miptree_unmap_raw(map->linear_mt);
2272
2273 if (map->mode & GL_MAP_WRITE_BIT) {
2274 bool ok = intel_miptree_blit(brw,
2275 map->linear_mt, 0, 0,
2276 0, 0, false,
2277 mt, level, slice,
2278 map->x, map->y, false,
2279 map->w, map->h, GL_COPY);
2280 WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
2281 }
2282
2283 intel_miptree_release(&map->linear_mt);
2284 }
2285
2286 /**
2287 * "Map" a buffer by copying it to an untiled temporary using MOVNTDQA.
2288 */
2289 #if defined(USE_SSE41)
2290 static void
2291 intel_miptree_map_movntdqa(struct brw_context *brw,
2292 struct intel_mipmap_tree *mt,
2293 struct intel_miptree_map *map,
2294 unsigned int level, unsigned int slice)
2295 {
2296 assert(map->mode & GL_MAP_READ_BIT);
2297 assert(!(map->mode & GL_MAP_WRITE_BIT));
2298
2299 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
2300 map->x, map->y, map->w, map->h,
2301 mt, _mesa_get_format_name(mt->format),
2302 level, slice, map->ptr, map->stride);
2303
2304 /* Map the original image */
2305 uint32_t image_x;
2306 uint32_t image_y;
2307 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2308 image_x += map->x;
2309 image_y += map->y;
2310
2311 void *src = intel_miptree_map_raw(brw, mt);
2312 if (!src)
2313 return;
2314 src += image_y * mt->pitch;
2315 src += image_x * mt->cpp;
2316
2317 /* Due to the pixel offsets for the particular image being mapped, our
2318 * src pointer may not be 16-byte aligned. However, if the pitch is
2319 * divisible by 16, then the amount by which it's misaligned will remain
2320 * consistent from row to row.
2321 */
2322 assert((mt->pitch % 16) == 0);
2323 const int misalignment = ((uintptr_t) src) & 15;
2324
2325 /* Create an untiled temporary buffer for the mapping. */
2326 const unsigned width_bytes = _mesa_format_row_stride(mt->format, map->w);
2327
2328 map->stride = ALIGN(misalignment + width_bytes, 16);
2329
2330 map->buffer = _mesa_align_malloc(map->stride * map->h, 16);
2331 /* Offset the destination so it has the same misalignment as src. */
2332 map->ptr = map->buffer + misalignment;
2333
2334 assert((((uintptr_t) map->ptr) & 15) == misalignment);
2335
2336 for (uint32_t y = 0; y < map->h; y++) {
2337 void *dst_ptr = map->ptr + y * map->stride;
2338 void *src_ptr = src + y * mt->pitch;
2339
2340 _mesa_streaming_load_memcpy(dst_ptr, src_ptr, width_bytes);
2341 }
2342
2343 intel_miptree_unmap_raw(mt);
2344 }
2345
2346 static void
2347 intel_miptree_unmap_movntdqa(struct brw_context *brw,
2348 struct intel_mipmap_tree *mt,
2349 struct intel_miptree_map *map,
2350 unsigned int level,
2351 unsigned int slice)
2352 {
2353 _mesa_align_free(map->buffer);
2354 map->buffer = NULL;
2355 map->ptr = NULL;
2356 }
2357 #endif
2358
2359 static void
2360 intel_miptree_map_s8(struct brw_context *brw,
2361 struct intel_mipmap_tree *mt,
2362 struct intel_miptree_map *map,
2363 unsigned int level, unsigned int slice)
2364 {
2365 map->stride = map->w;
2366 map->buffer = map->ptr = malloc(map->stride * map->h);
2367 if (!map->buffer)
2368 return;
2369
2370 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2371 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2372 * invalidate is set, since we'll be writing the whole rectangle from our
2373 * temporary buffer back out.
2374 */
2375 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
2376 uint8_t *untiled_s8_map = map->ptr;
2377 uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt);
2378 unsigned int image_x, image_y;
2379
2380 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2381
2382 for (uint32_t y = 0; y < map->h; y++) {
2383 for (uint32_t x = 0; x < map->w; x++) {
2384 ptrdiff_t offset = intel_offset_S8(mt->pitch,
2385 x + image_x + map->x,
2386 y + image_y + map->y,
2387 brw->has_swizzling);
2388 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
2389 }
2390 }
2391
2392 intel_miptree_unmap_raw(mt);
2393
2394 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __func__,
2395 map->x, map->y, map->w, map->h,
2396 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
2397 } else {
2398 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__,
2399 map->x, map->y, map->w, map->h,
2400 mt, map->ptr, map->stride);
2401 }
2402 }
2403
2404 static void
2405 intel_miptree_unmap_s8(struct brw_context *brw,
2406 struct intel_mipmap_tree *mt,
2407 struct intel_miptree_map *map,
2408 unsigned int level,
2409 unsigned int slice)
2410 {
2411 if (map->mode & GL_MAP_WRITE_BIT) {
2412 unsigned int image_x, image_y;
2413 uint8_t *untiled_s8_map = map->ptr;
2414 uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt);
2415
2416 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2417
2418 for (uint32_t y = 0; y < map->h; y++) {
2419 for (uint32_t x = 0; x < map->w; x++) {
2420 ptrdiff_t offset = intel_offset_S8(mt->pitch,
2421 x + map->x,
2422 y + map->y,
2423 brw->has_swizzling);
2424 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
2425 }
2426 }
2427
2428 intel_miptree_unmap_raw(mt);
2429 }
2430
2431 free(map->buffer);
2432 }
2433
2434 static void
2435 intel_miptree_map_etc(struct brw_context *brw,
2436 struct intel_mipmap_tree *mt,
2437 struct intel_miptree_map *map,
2438 unsigned int level,
2439 unsigned int slice)
2440 {
2441 assert(mt->etc_format != MESA_FORMAT_NONE);
2442 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) {
2443 assert(mt->format == MESA_FORMAT_R8G8B8X8_UNORM);
2444 }
2445
2446 assert(map->mode & GL_MAP_WRITE_BIT);
2447 assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
2448
2449 map->stride = _mesa_format_row_stride(mt->etc_format, map->w);
2450 map->buffer = malloc(_mesa_format_image_size(mt->etc_format,
2451 map->w, map->h, 1));
2452 map->ptr = map->buffer;
2453 }
2454
2455 static void
2456 intel_miptree_unmap_etc(struct brw_context *brw,
2457 struct intel_mipmap_tree *mt,
2458 struct intel_miptree_map *map,
2459 unsigned int level,
2460 unsigned int slice)
2461 {
2462 uint32_t image_x;
2463 uint32_t image_y;
2464 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2465
2466 image_x += map->x;
2467 image_y += map->y;
2468
2469 uint8_t *dst = intel_miptree_map_raw(brw, mt)
2470 + image_y * mt->pitch
2471 + image_x * mt->cpp;
2472
2473 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8)
2474 _mesa_etc1_unpack_rgba8888(dst, mt->pitch,
2475 map->ptr, map->stride,
2476 map->w, map->h);
2477 else
2478 _mesa_unpack_etc2_format(dst, mt->pitch,
2479 map->ptr, map->stride,
2480 map->w, map->h, mt->etc_format);
2481
2482 intel_miptree_unmap_raw(mt);
2483 free(map->buffer);
2484 }
2485
2486 /**
2487 * Mapping function for packed depth/stencil miptrees backed by real separate
2488 * miptrees for depth and stencil.
2489 *
2490 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
2491 * separate from the depth buffer. Yet at the GL API level, we have to expose
2492 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
2493 * be able to map that memory for texture storage and glReadPixels-type
2494 * operations. We give Mesa core that access by mallocing a temporary and
2495 * copying the data between the actual backing store and the temporary.
2496 */
2497 static void
2498 intel_miptree_map_depthstencil(struct brw_context *brw,
2499 struct intel_mipmap_tree *mt,
2500 struct intel_miptree_map *map,
2501 unsigned int level, unsigned int slice)
2502 {
2503 struct intel_mipmap_tree *z_mt = mt;
2504 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
2505 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z_FLOAT32;
2506 int packed_bpp = map_z32f_x24s8 ? 8 : 4;
2507
2508 map->stride = map->w * packed_bpp;
2509 map->buffer = map->ptr = malloc(map->stride * map->h);
2510 if (!map->buffer)
2511 return;
2512
2513 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2514 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2515 * invalidate is set, since we'll be writing the whole rectangle from our
2516 * temporary buffer back out.
2517 */
2518 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
2519 uint32_t *packed_map = map->ptr;
2520 uint8_t *s_map = intel_miptree_map_raw(brw, s_mt);
2521 uint32_t *z_map = intel_miptree_map_raw(brw, z_mt);
2522 unsigned int s_image_x, s_image_y;
2523 unsigned int z_image_x, z_image_y;
2524
2525 intel_miptree_get_image_offset(s_mt, level, slice,
2526 &s_image_x, &s_image_y);
2527 intel_miptree_get_image_offset(z_mt, level, slice,
2528 &z_image_x, &z_image_y);
2529
2530 for (uint32_t y = 0; y < map->h; y++) {
2531 for (uint32_t x = 0; x < map->w; x++) {
2532 int map_x = map->x + x, map_y = map->y + y;
2533 ptrdiff_t s_offset = intel_offset_S8(s_mt->pitch,
2534 map_x + s_image_x,
2535 map_y + s_image_y,
2536 brw->has_swizzling);
2537 ptrdiff_t z_offset = ((map_y + z_image_y) *
2538 (z_mt->pitch / 4) +
2539 (map_x + z_image_x));
2540 uint8_t s = s_map[s_offset];
2541 uint32_t z = z_map[z_offset];
2542
2543 if (map_z32f_x24s8) {
2544 packed_map[(y * map->w + x) * 2 + 0] = z;
2545 packed_map[(y * map->w + x) * 2 + 1] = s;
2546 } else {
2547 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
2548 }
2549 }
2550 }
2551
2552 intel_miptree_unmap_raw(s_mt);
2553 intel_miptree_unmap_raw(z_mt);
2554
2555 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
2556 __func__,
2557 map->x, map->y, map->w, map->h,
2558 z_mt, map->x + z_image_x, map->y + z_image_y,
2559 s_mt, map->x + s_image_x, map->y + s_image_y,
2560 map->ptr, map->stride);
2561 } else {
2562 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__,
2563 map->x, map->y, map->w, map->h,
2564 mt, map->ptr, map->stride);
2565 }
2566 }
2567
2568 static void
2569 intel_miptree_unmap_depthstencil(struct brw_context *brw,
2570 struct intel_mipmap_tree *mt,
2571 struct intel_miptree_map *map,
2572 unsigned int level,
2573 unsigned int slice)
2574 {
2575 struct intel_mipmap_tree *z_mt = mt;
2576 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
2577 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z_FLOAT32;
2578
2579 if (map->mode & GL_MAP_WRITE_BIT) {
2580 uint32_t *packed_map = map->ptr;
2581 uint8_t *s_map = intel_miptree_map_raw(brw, s_mt);
2582 uint32_t *z_map = intel_miptree_map_raw(brw, z_mt);
2583 unsigned int s_image_x, s_image_y;
2584 unsigned int z_image_x, z_image_y;
2585
2586 intel_miptree_get_image_offset(s_mt, level, slice,
2587 &s_image_x, &s_image_y);
2588 intel_miptree_get_image_offset(z_mt, level, slice,
2589 &z_image_x, &z_image_y);
2590
2591 for (uint32_t y = 0; y < map->h; y++) {
2592 for (uint32_t x = 0; x < map->w; x++) {
2593 ptrdiff_t s_offset = intel_offset_S8(s_mt->pitch,
2594 x + s_image_x + map->x,
2595 y + s_image_y + map->y,
2596 brw->has_swizzling);
2597 ptrdiff_t z_offset = ((y + z_image_y + map->y) *
2598 (z_mt->pitch / 4) +
2599 (x + z_image_x + map->x));
2600
2601 if (map_z32f_x24s8) {
2602 z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
2603 s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
2604 } else {
2605 uint32_t packed = packed_map[y * map->w + x];
2606 s_map[s_offset] = packed >> 24;
2607 z_map[z_offset] = packed;
2608 }
2609 }
2610 }
2611
2612 intel_miptree_unmap_raw(s_mt);
2613 intel_miptree_unmap_raw(z_mt);
2614
2615 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
2616 __func__,
2617 map->x, map->y, map->w, map->h,
2618 z_mt, _mesa_get_format_name(z_mt->format),
2619 map->x + z_image_x, map->y + z_image_y,
2620 s_mt, map->x + s_image_x, map->y + s_image_y,
2621 map->ptr, map->stride);
2622 }
2623
2624 free(map->buffer);
2625 }
2626
2627 /**
2628 * Create and attach a map to the miptree at (level, slice). Return the
2629 * attached map.
2630 */
2631 static struct intel_miptree_map*
2632 intel_miptree_attach_map(struct intel_mipmap_tree *mt,
2633 unsigned int level,
2634 unsigned int slice,
2635 unsigned int x,
2636 unsigned int y,
2637 unsigned int w,
2638 unsigned int h,
2639 GLbitfield mode)
2640 {
2641 struct intel_miptree_map *map = calloc(1, sizeof(*map));
2642
2643 if (!map)
2644 return NULL;
2645
2646 assert(mt->level[level].slice[slice].map == NULL);
2647 mt->level[level].slice[slice].map = map;
2648
2649 map->mode = mode;
2650 map->x = x;
2651 map->y = y;
2652 map->w = w;
2653 map->h = h;
2654
2655 return map;
2656 }
2657
2658 /**
2659 * Release the map at (level, slice).
2660 */
2661 static void
2662 intel_miptree_release_map(struct intel_mipmap_tree *mt,
2663 unsigned int level,
2664 unsigned int slice)
2665 {
2666 struct intel_miptree_map **map;
2667
2668 map = &mt->level[level].slice[slice].map;
2669 free(*map);
2670 *map = NULL;
2671 }
2672
2673 static bool
2674 can_blit_slice(struct intel_mipmap_tree *mt,
2675 unsigned int level, unsigned int slice)
2676 {
2677 uint32_t image_x;
2678 uint32_t image_y;
2679 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2680 if (image_x >= 32768 || image_y >= 32768)
2681 return false;
2682
2683 /* See intel_miptree_blit() for details on the 32k pitch limit. */
2684 if (mt->pitch >= 32768)
2685 return false;
2686
2687 return true;
2688 }
2689
2690 static bool
2691 use_intel_mipree_map_blit(struct brw_context *brw,
2692 struct intel_mipmap_tree *mt,
2693 GLbitfield mode,
2694 unsigned int level,
2695 unsigned int slice)
2696 {
2697 if (brw->has_llc &&
2698 /* It's probably not worth swapping to the blit ring because of
2699 * all the overhead involved.
2700 */
2701 !(mode & GL_MAP_WRITE_BIT) &&
2702 !mt->compressed &&
2703 (mt->tiling == I915_TILING_X ||
2704 /* Prior to Sandybridge, the blitter can't handle Y tiling */
2705 (brw->gen >= 6 && mt->tiling == I915_TILING_Y)) &&
2706 can_blit_slice(mt, level, slice))
2707 return true;
2708
2709 if (mt->tiling != I915_TILING_NONE &&
2710 mt->bo->size >= brw->max_gtt_map_object_size) {
2711 assert(can_blit_slice(mt, level, slice));
2712 return true;
2713 }
2714
2715 return false;
2716 }
2717
2718 /**
2719 * Parameter \a out_stride has type ptrdiff_t not because the buffer stride may
2720 * exceed 32 bits but to diminish the likelihood subtle bugs in pointer
2721 * arithmetic overflow.
2722 *
2723 * If you call this function and use \a out_stride, then you're doing pointer
2724 * arithmetic on \a out_ptr. The type of \a out_stride doesn't prevent all
2725 * bugs. The caller must still take care to avoid 32-bit overflow errors in
2726 * all arithmetic expressions that contain buffer offsets and pixel sizes,
2727 * which usually have type uint32_t or GLuint.
2728 */
2729 void
2730 intel_miptree_map(struct brw_context *brw,
2731 struct intel_mipmap_tree *mt,
2732 unsigned int level,
2733 unsigned int slice,
2734 unsigned int x,
2735 unsigned int y,
2736 unsigned int w,
2737 unsigned int h,
2738 GLbitfield mode,
2739 void **out_ptr,
2740 ptrdiff_t *out_stride)
2741 {
2742 struct intel_miptree_map *map;
2743
2744 assert(mt->num_samples <= 1);
2745
2746 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
2747 if (!map){
2748 *out_ptr = NULL;
2749 *out_stride = 0;
2750 return;
2751 }
2752
2753 intel_miptree_slice_resolve_depth(brw, mt, level, slice);
2754 if (map->mode & GL_MAP_WRITE_BIT) {
2755 intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
2756 }
2757
2758 if (mt->format == MESA_FORMAT_S_UINT8) {
2759 intel_miptree_map_s8(brw, mt, map, level, slice);
2760 } else if (mt->etc_format != MESA_FORMAT_NONE &&
2761 !(mode & BRW_MAP_DIRECT_BIT)) {
2762 intel_miptree_map_etc(brw, mt, map, level, slice);
2763 } else if (mt->stencil_mt && !(mode & BRW_MAP_DIRECT_BIT)) {
2764 intel_miptree_map_depthstencil(brw, mt, map, level, slice);
2765 } else if (use_intel_mipree_map_blit(brw, mt, mode, level, slice)) {
2766 intel_miptree_map_blit(brw, mt, map, level, slice);
2767 #if defined(USE_SSE41)
2768 } else if (!(mode & GL_MAP_WRITE_BIT) &&
2769 !mt->compressed && cpu_has_sse4_1 &&
2770 (mt->pitch % 16 == 0)) {
2771 intel_miptree_map_movntdqa(brw, mt, map, level, slice);
2772 #endif
2773 } else {
2774 intel_miptree_map_gtt(brw, mt, map, level, slice);
2775 }
2776
2777 *out_ptr = map->ptr;
2778 *out_stride = map->stride;
2779
2780 if (map->ptr == NULL)
2781 intel_miptree_release_map(mt, level, slice);
2782 }
2783
2784 void
2785 intel_miptree_unmap(struct brw_context *brw,
2786 struct intel_mipmap_tree *mt,
2787 unsigned int level,
2788 unsigned int slice)
2789 {
2790 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
2791
2792 assert(mt->num_samples <= 1);
2793
2794 if (!map)
2795 return;
2796
2797 DBG("%s: mt %p (%s) level %d slice %d\n", __func__,
2798 mt, _mesa_get_format_name(mt->format), level, slice);
2799
2800 if (mt->format == MESA_FORMAT_S_UINT8) {
2801 intel_miptree_unmap_s8(brw, mt, map, level, slice);
2802 } else if (mt->etc_format != MESA_FORMAT_NONE &&
2803 !(map->mode & BRW_MAP_DIRECT_BIT)) {
2804 intel_miptree_unmap_etc(brw, mt, map, level, slice);
2805 } else if (mt->stencil_mt && !(map->mode & BRW_MAP_DIRECT_BIT)) {
2806 intel_miptree_unmap_depthstencil(brw, mt, map, level, slice);
2807 } else if (map->linear_mt) {
2808 intel_miptree_unmap_blit(brw, mt, map, level, slice);
2809 #if defined(USE_SSE41)
2810 } else if (map->buffer && cpu_has_sse4_1) {
2811 intel_miptree_unmap_movntdqa(brw, mt, map, level, slice);
2812 #endif
2813 } else {
2814 intel_miptree_unmap_gtt(mt);
2815 }
2816
2817 intel_miptree_release_map(mt, level, slice);
2818 }