i965/draw: Account for BaseInstance in VBO bounds
[mesa.git] / src / mesa / drivers / dri / i965 / intel_mipmap_tree.c
1 /*
2 * Copyright 2006 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include <GL/gl.h>
27 #include <GL/internal/dri_interface.h>
28
29 #include "intel_batchbuffer.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_resolve_map.h"
32 #include "intel_tex.h"
33 #include "intel_blit.h"
34 #include "intel_fbo.h"
35
36 #include "brw_blorp.h"
37 #include "brw_context.h"
38 #include "brw_state.h"
39
40 #include "main/enums.h"
41 #include "main/fbobject.h"
42 #include "main/formats.h"
43 #include "main/glformats.h"
44 #include "main/texcompress_etc.h"
45 #include "main/teximage.h"
46 #include "main/streaming-load-memcpy.h"
47 #include "x86/common_x86_asm.h"
48
49 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
50
51 static void *intel_miptree_map_raw(struct brw_context *brw,
52 struct intel_mipmap_tree *mt);
53
54 static void intel_miptree_unmap_raw(struct intel_mipmap_tree *mt);
55
56 static bool
57 intel_miptree_alloc_mcs(struct brw_context *brw,
58 struct intel_mipmap_tree *mt,
59 GLuint num_samples);
60
61 /**
62 * Determine which MSAA layout should be used by the MSAA surface being
63 * created, based on the chip generation and the surface type.
64 */
65 static enum intel_msaa_layout
66 compute_msaa_layout(struct brw_context *brw, mesa_format format,
67 bool disable_aux_buffers)
68 {
69 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
70 if (brw->gen < 7)
71 return INTEL_MSAA_LAYOUT_IMS;
72
73 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
74 switch (_mesa_get_format_base_format(format)) {
75 case GL_DEPTH_COMPONENT:
76 case GL_STENCIL_INDEX:
77 case GL_DEPTH_STENCIL:
78 return INTEL_MSAA_LAYOUT_IMS;
79 default:
80 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
81 *
82 * This field must be set to 0 for all SINT MSRTs when all RT channels
83 * are not written
84 *
85 * In practice this means that we have to disable MCS for all signed
86 * integer MSAA buffers. The alternative, to disable MCS only when one
87 * of the render target channels is disabled, is impractical because it
88 * would require converting between CMS and UMS MSAA layouts on the fly,
89 * which is expensive.
90 */
91 if (brw->gen == 7 && _mesa_get_format_datatype(format) == GL_INT) {
92 return INTEL_MSAA_LAYOUT_UMS;
93 } else if (disable_aux_buffers) {
94 /* We can't use the CMS layout because it uses an aux buffer, the MCS
95 * buffer. So fallback to UMS, which is identical to CMS without the
96 * MCS. */
97 return INTEL_MSAA_LAYOUT_UMS;
98 } else {
99 return INTEL_MSAA_LAYOUT_CMS;
100 }
101 }
102 }
103
104
105 /**
106 * For single-sampled render targets ("non-MSRT"), the MCS buffer is a
107 * scaled-down bitfield representation of the color buffer which is capable of
108 * recording when blocks of the color buffer are equal to the clear value.
109 * This function returns the block size that will be used by the MCS buffer
110 * corresponding to a certain color miptree.
111 *
112 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
113 * beneath the "Fast Color Clear" bullet (p327):
114 *
115 * The following table describes the RT alignment
116 *
117 * Pixels Lines
118 * TiledY RT CL
119 * bpp
120 * 32 8 4
121 * 64 4 4
122 * 128 2 4
123 * TiledX RT CL
124 * bpp
125 * 32 16 2
126 * 64 8 2
127 * 128 4 2
128 *
129 * This alignment has the following uses:
130 *
131 * - For figuring out the size of the MCS buffer. Each 4k tile in the MCS
132 * buffer contains 128 blocks horizontally and 256 blocks vertically.
133 *
134 * - For figuring out alignment restrictions for a fast clear operation. Fast
135 * clear operations must always clear aligned multiples of 16 blocks
136 * horizontally and 32 blocks vertically.
137 *
138 * - For scaling down the coordinates sent through the render pipeline during
139 * a fast clear. X coordinates must be scaled down by 8 times the block
140 * width, and Y coordinates by 16 times the block height.
141 *
142 * - For scaling down the coordinates sent through the render pipeline during
143 * a "Render Target Resolve" operation. X coordinates must be scaled down
144 * by half the block width, and Y coordinates by half the block height.
145 */
146 void
147 intel_get_non_msrt_mcs_alignment(const struct intel_mipmap_tree *mt,
148 unsigned *width_px, unsigned *height)
149 {
150 switch (mt->tiling) {
151 default:
152 unreachable("Non-MSRT MCS requires X or Y tiling");
153 /* In release builds, fall through */
154 case I915_TILING_Y:
155 *width_px = 32 / mt->cpp;
156 *height = 4;
157 break;
158 case I915_TILING_X:
159 *width_px = 64 / mt->cpp;
160 *height = 2;
161 }
162 }
163
164 bool
165 intel_tiling_supports_non_msrt_mcs(const struct brw_context *brw,
166 unsigned tiling)
167 {
168 /* From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render
169 * Target(s)", beneath the "Fast Color Clear" bullet (p326):
170 *
171 * - Support is limited to tiled render targets.
172 *
173 * Gen9 changes the restriction to Y-tile only.
174 */
175 if (brw->gen >= 9)
176 return tiling == I915_TILING_Y;
177 else if (brw->gen >= 7)
178 return tiling != I915_TILING_NONE;
179 else
180 return false;
181 }
182
183 /**
184 * For a single-sampled render target ("non-MSRT"), determine if an MCS buffer
185 * can be used. This doesn't (and should not) inspect any of the properties of
186 * the miptree's BO.
187 *
188 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
189 * beneath the "Fast Color Clear" bullet (p326):
190 *
191 * - Support is for non-mip-mapped and non-array surface types only.
192 *
193 * And then later, on p327:
194 *
195 * - MCS buffer for non-MSRT is supported only for RT formats 32bpp,
196 * 64bpp, and 128bpp.
197 *
198 * From the Skylake documentation, it is made clear that X-tiling is no longer
199 * supported:
200 *
201 * - MCS and Lossless compression is supported for TiledY/TileYs/TileYf
202 * non-MSRTs only.
203 */
204 bool
205 intel_miptree_supports_non_msrt_fast_clear(struct brw_context *brw,
206 const struct intel_mipmap_tree *mt)
207 {
208 /* MCS support does not exist prior to Gen7 */
209 if (brw->gen < 7)
210 return false;
211
212 if (mt->disable_aux_buffers)
213 return false;
214
215 /* This function applies only to non-multisampled render targets. */
216 if (mt->num_samples > 1)
217 return false;
218
219 /* MCS is only supported for color buffers */
220 switch (_mesa_get_format_base_format(mt->format)) {
221 case GL_DEPTH_COMPONENT:
222 case GL_DEPTH_STENCIL:
223 case GL_STENCIL_INDEX:
224 return false;
225 }
226
227 if (mt->cpp != 4 && mt->cpp != 8 && mt->cpp != 16)
228 return false;
229 if (mt->first_level != 0 || mt->last_level != 0) {
230 if (brw->gen >= 8) {
231 perf_debug("Multi-LOD fast clear - giving up (%dx%dx%d).\n",
232 mt->logical_width0, mt->logical_height0, mt->last_level);
233 }
234
235 return false;
236 }
237
238 /* Check for layered surfaces. */
239 if (mt->physical_depth0 != 1) {
240 /* Multisample surfaces with the CMS layout are not layered surfaces,
241 * yet still have physical_depth0 > 1. Assert that we don't
242 * accidentally reject a multisampled surface here. We should have
243 * rejected it earlier by explicitly checking the sample count.
244 */
245 assert(mt->num_samples <= 1);
246
247 if (brw->gen >= 8) {
248 perf_debug("Layered fast clear - giving up. (%dx%d%d)\n",
249 mt->logical_width0, mt->logical_height0,
250 mt->physical_depth0);
251 }
252
253 return false;
254 }
255
256 /* There's no point in using an MCS buffer if the surface isn't in a
257 * renderable format.
258 */
259 if (!brw->format_supported_as_render_target[mt->format])
260 return false;
261
262 if (brw->gen >= 9) {
263 mesa_format linear_format = _mesa_get_srgb_format_linear(mt->format);
264 const uint32_t brw_format = brw_format_for_mesa_format(linear_format);
265 return brw_losslessly_compressible_format(brw, brw_format);
266 } else
267 return true;
268 }
269
270 /* On Gen9 support for color buffer compression was extended to single
271 * sampled surfaces. This is a helper considering both auxiliary buffer
272 * type and number of samples telling if the given miptree represents
273 * the new single sampled case - also called lossless compression.
274 */
275 bool
276 intel_miptree_is_lossless_compressed(const struct brw_context *brw,
277 const struct intel_mipmap_tree *mt)
278 {
279 /* Only available from Gen9 onwards. */
280 if (brw->gen < 9)
281 return false;
282
283 /* Compression always requires auxiliary buffer. */
284 if (!mt->mcs_mt)
285 return false;
286
287 /* Single sample compression is represented re-using msaa compression
288 * layout type: "Compressed Multisampled Surfaces".
289 */
290 if (mt->msaa_layout != INTEL_MSAA_LAYOUT_CMS)
291 return false;
292
293 /* And finally distinguish between msaa and single sample case. */
294 return mt->num_samples <= 1;
295 }
296
297 bool
298 intel_miptree_supports_lossless_compressed(struct brw_context *brw,
299 const struct intel_mipmap_tree *mt)
300 {
301 /* For now compression is only enabled for integer formats even though
302 * there exist supported floating point formats also. This is a heuristic
303 * decision based on current public benchmarks. In none of the cases these
304 * formats provided any improvement but a few cases were seen to regress.
305 * Hence these are left to to be enabled in the future when they are known
306 * to improve things.
307 */
308 if (_mesa_get_format_datatype(mt->format) == GL_FLOAT)
309 return false;
310
311 /* Fast clear mechanism and lossless compression go hand in hand. */
312 if (!intel_miptree_supports_non_msrt_fast_clear(brw, mt))
313 return false;
314
315 /* Fast clear can be also used to clear srgb surfaces by using equivalent
316 * linear format. This trick, however, can't be extended to be used with
317 * lossless compression and therefore a check is needed to see if the format
318 * really is linear.
319 */
320 return _mesa_get_srgb_format_linear(mt->format) == mt->format;
321 }
322
323 /**
324 * Determine depth format corresponding to a depth+stencil format,
325 * for separate stencil.
326 */
327 mesa_format
328 intel_depth_format_for_depthstencil_format(mesa_format format) {
329 switch (format) {
330 case MESA_FORMAT_Z24_UNORM_S8_UINT:
331 return MESA_FORMAT_Z24_UNORM_X8_UINT;
332 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT:
333 return MESA_FORMAT_Z_FLOAT32;
334 default:
335 return format;
336 }
337 }
338
339
340 /**
341 * @param for_bo Indicates that the caller is
342 * intel_miptree_create_for_bo(). If true, then do not create
343 * \c stencil_mt.
344 */
345 static struct intel_mipmap_tree *
346 intel_miptree_create_layout(struct brw_context *brw,
347 GLenum target,
348 mesa_format format,
349 GLuint first_level,
350 GLuint last_level,
351 GLuint width0,
352 GLuint height0,
353 GLuint depth0,
354 GLuint num_samples,
355 uint32_t layout_flags)
356 {
357 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
358 if (!mt)
359 return NULL;
360
361 DBG("%s target %s format %s level %d..%d slices %d <-- %p\n", __func__,
362 _mesa_enum_to_string(target),
363 _mesa_get_format_name(format),
364 first_level, last_level, depth0, mt);
365
366 if (target == GL_TEXTURE_1D_ARRAY) {
367 /* For a 1D Array texture the OpenGL API will treat the height0
368 * parameter as the number of array slices. For Intel hardware, we treat
369 * the 1D array as a 2D Array with a height of 1.
370 *
371 * So, when we first come through this path to create a 1D Array
372 * texture, height0 stores the number of slices, and depth0 is 1. In
373 * this case, we want to swap height0 and depth0.
374 *
375 * Since some miptrees will be created based on the base miptree, we may
376 * come through this path and see height0 as 1 and depth0 being the
377 * number of slices. In this case we don't need to do the swap.
378 */
379 assert(height0 == 1 || depth0 == 1);
380 if (height0 > 1) {
381 depth0 = height0;
382 height0 = 1;
383 }
384 }
385
386 mt->target = target;
387 mt->format = format;
388 mt->first_level = first_level;
389 mt->last_level = last_level;
390 mt->logical_width0 = width0;
391 mt->logical_height0 = height0;
392 mt->logical_depth0 = depth0;
393 mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_NO_MCS;
394 mt->disable_aux_buffers = (layout_flags & MIPTREE_LAYOUT_DISABLE_AUX) != 0;
395 mt->is_scanout = (layout_flags & MIPTREE_LAYOUT_FOR_SCANOUT) != 0;
396 exec_list_make_empty(&mt->hiz_map);
397 mt->cpp = _mesa_get_format_bytes(format);
398 mt->num_samples = num_samples;
399 mt->compressed = _mesa_is_format_compressed(format);
400 mt->msaa_layout = INTEL_MSAA_LAYOUT_NONE;
401 mt->refcount = 1;
402
403 if (num_samples > 1) {
404 /* Adjust width/height/depth for MSAA */
405 mt->msaa_layout = compute_msaa_layout(brw, format,
406 mt->disable_aux_buffers);
407 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
408 /* From the Ivybridge PRM, Volume 1, Part 1, page 108:
409 * "If the surface is multisampled and it is a depth or stencil
410 * surface or Multisampled Surface StorageFormat in SURFACE_STATE is
411 * MSFMT_DEPTH_STENCIL, WL and HL must be adjusted as follows before
412 * proceeding:
413 *
414 * +----------------------------------------------------------------+
415 * | Num Multisamples | W_l = | H_l = |
416 * +----------------------------------------------------------------+
417 * | 2 | ceiling(W_l / 2) * 4 | H_l (no adjustment) |
418 * | 4 | ceiling(W_l / 2) * 4 | ceiling(H_l / 2) * 4 |
419 * | 8 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 4 |
420 * | 16 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 8 |
421 * +----------------------------------------------------------------+
422 * "
423 *
424 * Note that MSFMT_DEPTH_STENCIL just means the IMS (interleaved)
425 * format rather than UMS/CMS (array slices). The Sandybridge PRM,
426 * Volume 1, Part 1, Page 111 has the same formula for 4x MSAA.
427 *
428 * Another more complicated explanation for these adjustments comes
429 * from the Sandybridge PRM, volume 4, part 1, page 31:
430 *
431 * "Any of the other messages (sample*, LOD, load4) used with a
432 * (4x) multisampled surface will in-effect sample a surface with
433 * double the height and width as that indicated in the surface
434 * state. Each pixel position on the original-sized surface is
435 * replaced with a 2x2 of samples with the following arrangement:
436 *
437 * sample 0 sample 2
438 * sample 1 sample 3"
439 *
440 * Thus, when sampling from a multisampled texture, it behaves as
441 * though the layout in memory for (x,y,sample) is:
442 *
443 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
444 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
445 *
446 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
447 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
448 *
449 * However, the actual layout of multisampled data in memory is:
450 *
451 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
452 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
453 *
454 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
455 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
456 *
457 * This pattern repeats for each 2x2 pixel block.
458 *
459 * As a result, when calculating the size of our 4-sample buffer for
460 * an odd width or height, we have to align before scaling up because
461 * sample 3 is in that bottom right 2x2 block.
462 */
463 switch (num_samples) {
464 case 2:
465 assert(brw->gen >= 8);
466 width0 = ALIGN(width0, 2) * 2;
467 height0 = ALIGN(height0, 2);
468 break;
469 case 4:
470 width0 = ALIGN(width0, 2) * 2;
471 height0 = ALIGN(height0, 2) * 2;
472 break;
473 case 8:
474 width0 = ALIGN(width0, 2) * 4;
475 height0 = ALIGN(height0, 2) * 2;
476 break;
477 case 16:
478 width0 = ALIGN(width0, 2) * 4;
479 height0 = ALIGN(height0, 2) * 4;
480 break;
481 default:
482 /* num_samples should already have been quantized to 0, 1, 2, 4, 8
483 * or 16.
484 */
485 unreachable("not reached");
486 }
487 } else {
488 /* Non-interleaved */
489 depth0 *= num_samples;
490 }
491 }
492
493 /* Set array_layout to ALL_SLICES_AT_EACH_LOD when array_spacing_lod0 can
494 * be used. array_spacing_lod0 is only used for non-IMS MSAA surfaces on
495 * Gen 7 and 8. On Gen 8 and 9 this layout is not available but it is still
496 * used on Gen8 to make it pick a qpitch value which doesn't include space
497 * for the mipmaps. On Gen9 this is not necessary because it will
498 * automatically pick a packed qpitch value whenever mt->first_level ==
499 * mt->last_level.
500 * TODO: can we use it elsewhere?
501 * TODO: also disable this on Gen8 and pick the qpitch value like Gen9
502 */
503 if (brw->gen >= 9) {
504 mt->array_layout = ALL_LOD_IN_EACH_SLICE;
505 } else {
506 switch (mt->msaa_layout) {
507 case INTEL_MSAA_LAYOUT_NONE:
508 case INTEL_MSAA_LAYOUT_IMS:
509 mt->array_layout = ALL_LOD_IN_EACH_SLICE;
510 break;
511 case INTEL_MSAA_LAYOUT_UMS:
512 case INTEL_MSAA_LAYOUT_CMS:
513 mt->array_layout = ALL_SLICES_AT_EACH_LOD;
514 break;
515 }
516 }
517
518 if (target == GL_TEXTURE_CUBE_MAP) {
519 assert(depth0 == 1);
520 depth0 = 6;
521 }
522
523 mt->physical_width0 = width0;
524 mt->physical_height0 = height0;
525 mt->physical_depth0 = depth0;
526
527 if (!(layout_flags & MIPTREE_LAYOUT_FOR_BO) &&
528 _mesa_get_format_base_format(format) == GL_DEPTH_STENCIL &&
529 (brw->must_use_separate_stencil ||
530 (brw->has_separate_stencil &&
531 intel_miptree_wants_hiz_buffer(brw, mt)))) {
532 uint32_t stencil_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
533 if (brw->gen == 6) {
534 stencil_flags |= MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD |
535 MIPTREE_LAYOUT_TILING_ANY;
536 }
537
538 mt->stencil_mt = intel_miptree_create(brw,
539 mt->target,
540 MESA_FORMAT_S_UINT8,
541 mt->first_level,
542 mt->last_level,
543 mt->logical_width0,
544 mt->logical_height0,
545 mt->logical_depth0,
546 num_samples,
547 stencil_flags);
548
549 if (!mt->stencil_mt) {
550 intel_miptree_release(&mt);
551 return NULL;
552 }
553
554 /* Fix up the Z miptree format for how we're splitting out separate
555 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
556 */
557 mt->format = intel_depth_format_for_depthstencil_format(mt->format);
558 mt->cpp = 4;
559
560 if (format == mt->format) {
561 _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
562 _mesa_get_format_name(mt->format));
563 }
564 }
565
566 if (layout_flags & MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD)
567 mt->array_layout = ALL_SLICES_AT_EACH_LOD;
568
569 /*
570 * Obey HALIGN_16 constraints for Gen8 and Gen9 buffers which are
571 * multisampled or have an AUX buffer attached to it.
572 *
573 * GEN | MSRT | AUX_CCS_* or AUX_MCS
574 * -------------------------------------------
575 * 9 | HALIGN_16 | HALIGN_16
576 * 8 | HALIGN_ANY | HALIGN_16
577 * 7 | ? | ?
578 * 6 | ? | ?
579 */
580 if (intel_miptree_supports_non_msrt_fast_clear(brw, mt)) {
581 if (brw->gen >= 9 || (brw->gen == 8 && num_samples <= 1))
582 layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
583 } else if (brw->gen >= 9 && num_samples > 1) {
584 layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
585 } else {
586 const UNUSED bool is_lossless_compressed_aux =
587 brw->gen >= 9 && num_samples == 1 &&
588 mt->format == MESA_FORMAT_R_UINT32;
589
590 /* For now, nothing else has this requirement */
591 assert(is_lossless_compressed_aux ||
592 (layout_flags & MIPTREE_LAYOUT_FORCE_HALIGN16) == 0);
593 }
594
595 brw_miptree_layout(brw, mt, layout_flags);
596
597 if (mt->disable_aux_buffers)
598 assert(mt->msaa_layout != INTEL_MSAA_LAYOUT_CMS);
599
600 return mt;
601 }
602
603
604 /**
605 * Choose an appropriate uncompressed format for a requested
606 * compressed format, if unsupported.
607 */
608 mesa_format
609 intel_lower_compressed_format(struct brw_context *brw, mesa_format format)
610 {
611 /* No need to lower ETC formats on these platforms,
612 * they are supported natively.
613 */
614 if (brw->gen >= 8 || brw->is_baytrail)
615 return format;
616
617 switch (format) {
618 case MESA_FORMAT_ETC1_RGB8:
619 return MESA_FORMAT_R8G8B8X8_UNORM;
620 case MESA_FORMAT_ETC2_RGB8:
621 return MESA_FORMAT_R8G8B8X8_UNORM;
622 case MESA_FORMAT_ETC2_SRGB8:
623 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC:
624 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1:
625 return MESA_FORMAT_B8G8R8A8_SRGB;
626 case MESA_FORMAT_ETC2_RGBA8_EAC:
627 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1:
628 return MESA_FORMAT_R8G8B8A8_UNORM;
629 case MESA_FORMAT_ETC2_R11_EAC:
630 return MESA_FORMAT_R_UNORM16;
631 case MESA_FORMAT_ETC2_SIGNED_R11_EAC:
632 return MESA_FORMAT_R_SNORM16;
633 case MESA_FORMAT_ETC2_RG11_EAC:
634 return MESA_FORMAT_R16G16_UNORM;
635 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC:
636 return MESA_FORMAT_R16G16_SNORM;
637 default:
638 /* Non ETC1 / ETC2 format */
639 return format;
640 }
641 }
642
643 /* This function computes Yf/Ys tiled bo size, alignment and pitch. */
644 static unsigned long
645 intel_get_yf_ys_bo_size(struct intel_mipmap_tree *mt, unsigned *alignment,
646 unsigned long *pitch)
647 {
648 uint32_t tile_width, tile_height;
649 unsigned long stride, size, aligned_y;
650
651 assert(mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE);
652 intel_get_tile_dims(mt->tiling, mt->tr_mode, mt->cpp,
653 &tile_width, &tile_height);
654
655 aligned_y = ALIGN(mt->total_height, tile_height);
656 stride = mt->total_width * mt->cpp;
657 stride = ALIGN(stride, tile_width);
658 size = stride * aligned_y;
659
660 if (mt->tr_mode == INTEL_MIPTREE_TRMODE_YF) {
661 assert(size % 4096 == 0);
662 *alignment = 4096;
663 } else {
664 assert(size % (64 * 1024) == 0);
665 *alignment = 64 * 1024;
666 }
667 *pitch = stride;
668 return size;
669 }
670
671 static struct intel_mipmap_tree *
672 miptree_create(struct brw_context *brw,
673 GLenum target,
674 mesa_format format,
675 GLuint first_level,
676 GLuint last_level,
677 GLuint width0,
678 GLuint height0,
679 GLuint depth0,
680 GLuint num_samples,
681 uint32_t layout_flags)
682 {
683 struct intel_mipmap_tree *mt;
684 mesa_format tex_format = format;
685 mesa_format etc_format = MESA_FORMAT_NONE;
686 uint32_t alloc_flags = 0;
687
688 format = intel_lower_compressed_format(brw, format);
689
690 etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
691
692 assert((layout_flags & MIPTREE_LAYOUT_DISABLE_AUX) == 0);
693 assert((layout_flags & MIPTREE_LAYOUT_FOR_BO) == 0);
694 mt = intel_miptree_create_layout(brw, target, format,
695 first_level, last_level, width0,
696 height0, depth0, num_samples,
697 layout_flags);
698 /*
699 * pitch == 0 || height == 0 indicates the null texture
700 */
701 if (!mt || !mt->total_width || !mt->total_height) {
702 intel_miptree_release(&mt);
703 return NULL;
704 }
705
706 if (mt->tiling == (I915_TILING_Y | I915_TILING_X))
707 mt->tiling = I915_TILING_Y;
708
709 if (layout_flags & MIPTREE_LAYOUT_ACCELERATED_UPLOAD)
710 alloc_flags |= BO_ALLOC_FOR_RENDER;
711
712 unsigned long pitch;
713 mt->etc_format = etc_format;
714
715 if (mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE) {
716 unsigned alignment = 0;
717 unsigned long size;
718 size = intel_get_yf_ys_bo_size(mt, &alignment, &pitch);
719 assert(size);
720 mt->bo = drm_intel_bo_alloc_for_render(brw->bufmgr, "miptree",
721 size, alignment);
722 } else {
723 if (format == MESA_FORMAT_S_UINT8) {
724 /* Align to size of W tile, 64x64. */
725 mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
726 ALIGN(mt->total_width, 64),
727 ALIGN(mt->total_height, 64),
728 mt->cpp, &mt->tiling, &pitch,
729 alloc_flags);
730 } else {
731 mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
732 mt->total_width, mt->total_height,
733 mt->cpp, &mt->tiling, &pitch,
734 alloc_flags);
735 }
736 }
737
738 mt->pitch = pitch;
739
740 return mt;
741 }
742
743 struct intel_mipmap_tree *
744 intel_miptree_create(struct brw_context *brw,
745 GLenum target,
746 mesa_format format,
747 GLuint first_level,
748 GLuint last_level,
749 GLuint width0,
750 GLuint height0,
751 GLuint depth0,
752 GLuint num_samples,
753 uint32_t layout_flags)
754 {
755 struct intel_mipmap_tree *mt = miptree_create(
756 brw, target, format,
757 first_level, last_level,
758 width0, height0, depth0, num_samples,
759 layout_flags);
760
761 /* If the BO is too large to fit in the aperture, we need to use the
762 * BLT engine to support it. Prior to Sandybridge, the BLT paths can't
763 * handle Y-tiling, so we need to fall back to X.
764 */
765 if (brw->gen < 6 && mt->bo->size >= brw->max_gtt_map_object_size &&
766 mt->tiling == I915_TILING_Y) {
767 unsigned long pitch = mt->pitch;
768 const uint32_t alloc_flags =
769 (layout_flags & MIPTREE_LAYOUT_ACCELERATED_UPLOAD) ?
770 BO_ALLOC_FOR_RENDER : 0;
771 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
772 mt->total_width, mt->total_height);
773
774 mt->tiling = I915_TILING_X;
775 drm_intel_bo_unreference(mt->bo);
776 mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
777 mt->total_width, mt->total_height, mt->cpp,
778 &mt->tiling, &pitch, alloc_flags);
779 mt->pitch = pitch;
780 }
781
782 mt->offset = 0;
783
784 if (!mt->bo) {
785 intel_miptree_release(&mt);
786 return NULL;
787 }
788
789
790 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
791 assert(mt->num_samples > 1);
792 if (!intel_miptree_alloc_mcs(brw, mt, num_samples)) {
793 intel_miptree_release(&mt);
794 return NULL;
795 }
796 }
797
798 /* If this miptree is capable of supporting fast color clears, set
799 * fast_clear_state appropriately to ensure that fast clears will occur.
800 * Allocation of the MCS miptree will be deferred until the first fast
801 * clear actually occurs or when compressed single sampled buffer is
802 * written by the GPU for the first time.
803 */
804 if (intel_tiling_supports_non_msrt_mcs(brw, mt->tiling) &&
805 intel_miptree_supports_non_msrt_fast_clear(brw, mt)) {
806 mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED;
807 assert(brw->gen < 8 || mt->halign == 16 || num_samples <= 1);
808 }
809
810 return mt;
811 }
812
813 struct intel_mipmap_tree *
814 intel_miptree_create_for_bo(struct brw_context *brw,
815 drm_intel_bo *bo,
816 mesa_format format,
817 uint32_t offset,
818 uint32_t width,
819 uint32_t height,
820 uint32_t depth,
821 int pitch,
822 uint32_t layout_flags)
823 {
824 struct intel_mipmap_tree *mt;
825 uint32_t tiling, swizzle;
826 GLenum target;
827
828 drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
829
830 /* Nothing will be able to use this miptree with the BO if the offset isn't
831 * aligned.
832 */
833 if (tiling != I915_TILING_NONE)
834 assert(offset % 4096 == 0);
835
836 /* miptrees can't handle negative pitch. If you need flipping of images,
837 * that's outside of the scope of the mt.
838 */
839 assert(pitch >= 0);
840
841 target = depth > 1 ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D;
842
843 /* The BO already has a tiling format and we shouldn't confuse the lower
844 * layers by making it try to find a tiling format again.
845 */
846 assert((layout_flags & MIPTREE_LAYOUT_TILING_ANY) == 0);
847 assert((layout_flags & MIPTREE_LAYOUT_TILING_NONE) == 0);
848
849 layout_flags |= MIPTREE_LAYOUT_FOR_BO;
850 mt = intel_miptree_create_layout(brw, target, format,
851 0, 0,
852 width, height, depth, 0,
853 layout_flags);
854 if (!mt)
855 return NULL;
856
857 drm_intel_bo_reference(bo);
858 mt->bo = bo;
859 mt->pitch = pitch;
860 mt->offset = offset;
861 mt->tiling = tiling;
862
863 return mt;
864 }
865
866 /**
867 * For a singlesample renderbuffer, this simply wraps the given BO with a
868 * miptree.
869 *
870 * For a multisample renderbuffer, this wraps the window system's
871 * (singlesample) BO with a singlesample miptree attached to the
872 * intel_renderbuffer, then creates a multisample miptree attached to irb->mt
873 * that will contain the actual rendering (which is lazily resolved to
874 * irb->singlesample_mt).
875 */
876 void
877 intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
878 struct intel_renderbuffer *irb,
879 drm_intel_bo *bo,
880 uint32_t width, uint32_t height,
881 uint32_t pitch)
882 {
883 struct intel_mipmap_tree *singlesample_mt = NULL;
884 struct intel_mipmap_tree *multisample_mt = NULL;
885 struct gl_renderbuffer *rb = &irb->Base.Base;
886 mesa_format format = rb->Format;
887 int num_samples = rb->NumSamples;
888
889 /* Only the front and back buffers, which are color buffers, are allocated
890 * through the image loader.
891 */
892 assert(_mesa_get_format_base_format(format) == GL_RGB ||
893 _mesa_get_format_base_format(format) == GL_RGBA);
894
895 singlesample_mt = intel_miptree_create_for_bo(intel,
896 bo,
897 format,
898 0,
899 width,
900 height,
901 1,
902 pitch,
903 MIPTREE_LAYOUT_FOR_SCANOUT);
904 if (!singlesample_mt)
905 goto fail;
906
907 /* If this miptree is capable of supporting fast color clears, set
908 * mcs_state appropriately to ensure that fast clears will occur.
909 * Allocation of the MCS miptree will be deferred until the first fast
910 * clear actually occurs.
911 */
912 if (intel_tiling_supports_non_msrt_mcs(intel, singlesample_mt->tiling) &&
913 intel_miptree_supports_non_msrt_fast_clear(intel, singlesample_mt)) {
914 singlesample_mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED;
915 }
916
917 if (num_samples == 0) {
918 intel_miptree_release(&irb->mt);
919 irb->mt = singlesample_mt;
920
921 assert(!irb->singlesample_mt);
922 } else {
923 intel_miptree_release(&irb->singlesample_mt);
924 irb->singlesample_mt = singlesample_mt;
925
926 if (!irb->mt ||
927 irb->mt->logical_width0 != width ||
928 irb->mt->logical_height0 != height) {
929 multisample_mt = intel_miptree_create_for_renderbuffer(intel,
930 format,
931 width,
932 height,
933 num_samples);
934 if (!multisample_mt)
935 goto fail;
936
937 irb->need_downsample = false;
938 intel_miptree_release(&irb->mt);
939 irb->mt = multisample_mt;
940 }
941 }
942 return;
943
944 fail:
945 intel_miptree_release(&irb->singlesample_mt);
946 intel_miptree_release(&irb->mt);
947 return;
948 }
949
950 struct intel_mipmap_tree*
951 intel_miptree_create_for_renderbuffer(struct brw_context *brw,
952 mesa_format format,
953 uint32_t width,
954 uint32_t height,
955 uint32_t num_samples)
956 {
957 struct intel_mipmap_tree *mt;
958 uint32_t depth = 1;
959 bool ok;
960 GLenum target = num_samples > 1 ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
961 const uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD |
962 MIPTREE_LAYOUT_TILING_ANY |
963 MIPTREE_LAYOUT_FOR_SCANOUT;
964
965 mt = intel_miptree_create(brw, target, format, 0, 0,
966 width, height, depth, num_samples,
967 layout_flags);
968 if (!mt)
969 goto fail;
970
971 if (intel_miptree_wants_hiz_buffer(brw, mt)) {
972 ok = intel_miptree_alloc_hiz(brw, mt);
973 if (!ok)
974 goto fail;
975 }
976
977 return mt;
978
979 fail:
980 intel_miptree_release(&mt);
981 return NULL;
982 }
983
984 void
985 intel_miptree_reference(struct intel_mipmap_tree **dst,
986 struct intel_mipmap_tree *src)
987 {
988 if (*dst == src)
989 return;
990
991 intel_miptree_release(dst);
992
993 if (src) {
994 src->refcount++;
995 DBG("%s %p refcount now %d\n", __func__, src, src->refcount);
996 }
997
998 *dst = src;
999 }
1000
1001
1002 void
1003 intel_miptree_release(struct intel_mipmap_tree **mt)
1004 {
1005 if (!*mt)
1006 return;
1007
1008 DBG("%s %p refcount will be %d\n", __func__, *mt, (*mt)->refcount - 1);
1009 if (--(*mt)->refcount <= 0) {
1010 GLuint i;
1011
1012 DBG("%s deleting %p\n", __func__, *mt);
1013
1014 drm_intel_bo_unreference((*mt)->bo);
1015 intel_miptree_release(&(*mt)->stencil_mt);
1016 if ((*mt)->hiz_buf) {
1017 if ((*mt)->hiz_buf->mt)
1018 intel_miptree_release(&(*mt)->hiz_buf->mt);
1019 else
1020 drm_intel_bo_unreference((*mt)->hiz_buf->bo);
1021 free((*mt)->hiz_buf);
1022 }
1023 intel_miptree_release(&(*mt)->mcs_mt);
1024 intel_resolve_map_clear(&(*mt)->hiz_map);
1025
1026 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
1027 free((*mt)->level[i].slice);
1028 }
1029
1030 free(*mt);
1031 }
1032 *mt = NULL;
1033 }
1034
1035
1036 void
1037 intel_get_image_dims(struct gl_texture_image *image,
1038 int *width, int *height, int *depth)
1039 {
1040 switch (image->TexObject->Target) {
1041 case GL_TEXTURE_1D_ARRAY:
1042 /* For a 1D Array texture the OpenGL API will treat the image height as
1043 * the number of array slices. For Intel hardware, we treat the 1D array
1044 * as a 2D Array with a height of 1. So, here we want to swap image
1045 * height and depth.
1046 */
1047 *width = image->Width;
1048 *height = 1;
1049 *depth = image->Height;
1050 break;
1051 default:
1052 *width = image->Width;
1053 *height = image->Height;
1054 *depth = image->Depth;
1055 break;
1056 }
1057 }
1058
1059 /**
1060 * Can the image be pulled into a unified mipmap tree? This mirrors
1061 * the completeness test in a lot of ways.
1062 *
1063 * Not sure whether I want to pass gl_texture_image here.
1064 */
1065 bool
1066 intel_miptree_match_image(struct intel_mipmap_tree *mt,
1067 struct gl_texture_image *image)
1068 {
1069 struct intel_texture_image *intelImage = intel_texture_image(image);
1070 GLuint level = intelImage->base.Base.Level;
1071 int width, height, depth;
1072
1073 /* glTexImage* choose the texture object based on the target passed in, and
1074 * objects can't change targets over their lifetimes, so this should be
1075 * true.
1076 */
1077 assert(image->TexObject->Target == mt->target);
1078
1079 mesa_format mt_format = mt->format;
1080 if (mt->format == MESA_FORMAT_Z24_UNORM_X8_UINT && mt->stencil_mt)
1081 mt_format = MESA_FORMAT_Z24_UNORM_S8_UINT;
1082 if (mt->format == MESA_FORMAT_Z_FLOAT32 && mt->stencil_mt)
1083 mt_format = MESA_FORMAT_Z32_FLOAT_S8X24_UINT;
1084 if (mt->etc_format != MESA_FORMAT_NONE)
1085 mt_format = mt->etc_format;
1086
1087 if (image->TexFormat != mt_format)
1088 return false;
1089
1090 intel_get_image_dims(image, &width, &height, &depth);
1091
1092 if (mt->target == GL_TEXTURE_CUBE_MAP)
1093 depth = 6;
1094
1095 int level_depth = mt->level[level].depth;
1096 if (mt->num_samples > 1) {
1097 switch (mt->msaa_layout) {
1098 case INTEL_MSAA_LAYOUT_NONE:
1099 case INTEL_MSAA_LAYOUT_IMS:
1100 break;
1101 case INTEL_MSAA_LAYOUT_UMS:
1102 case INTEL_MSAA_LAYOUT_CMS:
1103 level_depth /= mt->num_samples;
1104 break;
1105 }
1106 }
1107
1108 /* Test image dimensions against the base level image adjusted for
1109 * minification. This will also catch images not present in the
1110 * tree, changed targets, etc.
1111 */
1112 if (width != minify(mt->logical_width0, level - mt->first_level) ||
1113 height != minify(mt->logical_height0, level - mt->first_level) ||
1114 depth != level_depth) {
1115 return false;
1116 }
1117
1118 if (image->NumSamples != mt->num_samples)
1119 return false;
1120
1121 return true;
1122 }
1123
1124
1125 void
1126 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
1127 GLuint level,
1128 GLuint x, GLuint y, GLuint d)
1129 {
1130 mt->level[level].depth = d;
1131 mt->level[level].level_x = x;
1132 mt->level[level].level_y = y;
1133
1134 DBG("%s level %d, depth %d, offset %d,%d\n", __func__,
1135 level, d, x, y);
1136
1137 assert(mt->level[level].slice == NULL);
1138
1139 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
1140 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
1141 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
1142 }
1143
1144
1145 void
1146 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
1147 GLuint level, GLuint img,
1148 GLuint x, GLuint y)
1149 {
1150 if (img == 0 && level == 0)
1151 assert(x == 0 && y == 0);
1152
1153 assert(img < mt->level[level].depth);
1154
1155 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
1156 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
1157
1158 DBG("%s level %d img %d pos %d,%d\n",
1159 __func__, level, img,
1160 mt->level[level].slice[img].x_offset,
1161 mt->level[level].slice[img].y_offset);
1162 }
1163
1164 void
1165 intel_miptree_get_image_offset(const struct intel_mipmap_tree *mt,
1166 GLuint level, GLuint slice,
1167 GLuint *x, GLuint *y)
1168 {
1169 assert(slice < mt->level[level].depth);
1170
1171 *x = mt->level[level].slice[slice].x_offset;
1172 *y = mt->level[level].slice[slice].y_offset;
1173 }
1174
1175
1176 /**
1177 * This function computes the tile_w (in bytes) and tile_h (in rows) of
1178 * different tiling patterns. If the BO is untiled, tile_w is set to cpp
1179 * and tile_h is set to 1.
1180 */
1181 void
1182 intel_get_tile_dims(uint32_t tiling, uint32_t tr_mode, uint32_t cpp,
1183 uint32_t *tile_w, uint32_t *tile_h)
1184 {
1185 if (tr_mode == INTEL_MIPTREE_TRMODE_NONE) {
1186 switch (tiling) {
1187 case I915_TILING_X:
1188 *tile_w = 512;
1189 *tile_h = 8;
1190 break;
1191 case I915_TILING_Y:
1192 *tile_w = 128;
1193 *tile_h = 32;
1194 break;
1195 case I915_TILING_NONE:
1196 *tile_w = cpp;
1197 *tile_h = 1;
1198 break;
1199 default:
1200 unreachable("not reached");
1201 }
1202 } else {
1203 uint32_t aspect_ratio = 1;
1204 assert(_mesa_is_pow_two(cpp));
1205
1206 switch (cpp) {
1207 case 1:
1208 *tile_h = 64;
1209 break;
1210 case 2:
1211 case 4:
1212 *tile_h = 32;
1213 break;
1214 case 8:
1215 case 16:
1216 *tile_h = 16;
1217 break;
1218 default:
1219 unreachable("not reached");
1220 }
1221
1222 if (cpp == 2 || cpp == 8)
1223 aspect_ratio = 2;
1224
1225 if (tr_mode == INTEL_MIPTREE_TRMODE_YS)
1226 *tile_h *= 4;
1227
1228 *tile_w = *tile_h * aspect_ratio * cpp;
1229 }
1230 }
1231
1232
1233 /**
1234 * This function computes masks that may be used to select the bits of the X
1235 * and Y coordinates that indicate the offset within a tile. If the BO is
1236 * untiled, the masks are set to 0.
1237 */
1238 void
1239 intel_get_tile_masks(uint32_t tiling, uint32_t tr_mode, uint32_t cpp,
1240 bool map_stencil_as_y_tiled,
1241 uint32_t *mask_x, uint32_t *mask_y)
1242 {
1243 uint32_t tile_w_bytes, tile_h;
1244 if (map_stencil_as_y_tiled)
1245 tiling = I915_TILING_Y;
1246
1247 intel_get_tile_dims(tiling, tr_mode, cpp, &tile_w_bytes, &tile_h);
1248
1249 *mask_x = tile_w_bytes / cpp - 1;
1250 *mask_y = tile_h - 1;
1251 }
1252
1253 /**
1254 * Compute the offset (in bytes) from the start of the BO to the given x
1255 * and y coordinate. For tiled BOs, caller must ensure that x and y are
1256 * multiples of the tile size.
1257 */
1258 uint32_t
1259 intel_miptree_get_aligned_offset(const struct intel_mipmap_tree *mt,
1260 uint32_t x, uint32_t y,
1261 bool map_stencil_as_y_tiled)
1262 {
1263 int cpp = mt->cpp;
1264 uint32_t pitch = mt->pitch;
1265 uint32_t tiling = mt->tiling;
1266
1267 if (map_stencil_as_y_tiled) {
1268 tiling = I915_TILING_Y;
1269
1270 /* When mapping a W-tiled stencil buffer as Y-tiled, each 64-high W-tile
1271 * gets transformed into a 32-high Y-tile. Accordingly, the pitch of
1272 * the resulting surface is twice the pitch of the original miptree,
1273 * since each row in the Y-tiled view corresponds to two rows in the
1274 * actual W-tiled surface. So we need to correct the pitch before
1275 * computing the offsets.
1276 */
1277 pitch *= 2;
1278 }
1279
1280 switch (tiling) {
1281 default:
1282 unreachable("not reached");
1283 case I915_TILING_NONE:
1284 return y * pitch + x * cpp;
1285 case I915_TILING_X:
1286 assert((x % (512 / cpp)) == 0);
1287 assert((y % 8) == 0);
1288 return y * pitch + x / (512 / cpp) * 4096;
1289 case I915_TILING_Y:
1290 assert((x % (128 / cpp)) == 0);
1291 assert((y % 32) == 0);
1292 return y * pitch + x / (128 / cpp) * 4096;
1293 }
1294 }
1295
1296 /**
1297 * Rendering with tiled buffers requires that the base address of the buffer
1298 * be aligned to a page boundary. For renderbuffers, and sometimes with
1299 * textures, we may want the surface to point at a texture image level that
1300 * isn't at a page boundary.
1301 *
1302 * This function returns an appropriately-aligned base offset
1303 * according to the tiling restrictions, plus any required x/y offset
1304 * from there.
1305 */
1306 uint32_t
1307 intel_miptree_get_tile_offsets(const struct intel_mipmap_tree *mt,
1308 GLuint level, GLuint slice,
1309 uint32_t *tile_x,
1310 uint32_t *tile_y)
1311 {
1312 uint32_t x, y;
1313 uint32_t mask_x, mask_y;
1314
1315 intel_get_tile_masks(mt->tiling, mt->tr_mode, mt->cpp, false, &mask_x, &mask_y);
1316 intel_miptree_get_image_offset(mt, level, slice, &x, &y);
1317
1318 *tile_x = x & mask_x;
1319 *tile_y = y & mask_y;
1320
1321 return intel_miptree_get_aligned_offset(mt, x & ~mask_x, y & ~mask_y, false);
1322 }
1323
1324 static void
1325 intel_miptree_copy_slice_sw(struct brw_context *brw,
1326 struct intel_mipmap_tree *dst_mt,
1327 struct intel_mipmap_tree *src_mt,
1328 int level,
1329 int slice,
1330 int width,
1331 int height)
1332 {
1333 void *src, *dst;
1334 ptrdiff_t src_stride, dst_stride;
1335 int cpp = dst_mt->cpp;
1336
1337 intel_miptree_map(brw, src_mt,
1338 level, slice,
1339 0, 0,
1340 width, height,
1341 GL_MAP_READ_BIT | BRW_MAP_DIRECT_BIT,
1342 &src, &src_stride);
1343
1344 intel_miptree_map(brw, dst_mt,
1345 level, slice,
1346 0, 0,
1347 width, height,
1348 GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT |
1349 BRW_MAP_DIRECT_BIT,
1350 &dst, &dst_stride);
1351
1352 DBG("sw blit %s mt %p %p/%"PRIdPTR" -> %s mt %p %p/%"PRIdPTR" (%dx%d)\n",
1353 _mesa_get_format_name(src_mt->format),
1354 src_mt, src, src_stride,
1355 _mesa_get_format_name(dst_mt->format),
1356 dst_mt, dst, dst_stride,
1357 width, height);
1358
1359 int row_size = cpp * width;
1360 if (src_stride == row_size &&
1361 dst_stride == row_size) {
1362 memcpy(dst, src, row_size * height);
1363 } else {
1364 for (int i = 0; i < height; i++) {
1365 memcpy(dst, src, row_size);
1366 dst += dst_stride;
1367 src += src_stride;
1368 }
1369 }
1370
1371 intel_miptree_unmap(brw, dst_mt, level, slice);
1372 intel_miptree_unmap(brw, src_mt, level, slice);
1373
1374 /* Don't forget to copy the stencil data over, too. We could have skipped
1375 * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
1376 * shuffling the two data sources in/out of temporary storage instead of
1377 * the direct mapping we get this way.
1378 */
1379 if (dst_mt->stencil_mt) {
1380 assert(src_mt->stencil_mt);
1381 intel_miptree_copy_slice_sw(brw, dst_mt->stencil_mt, src_mt->stencil_mt,
1382 level, slice, width, height);
1383 }
1384 }
1385
1386 static void
1387 intel_miptree_copy_slice(struct brw_context *brw,
1388 struct intel_mipmap_tree *dst_mt,
1389 struct intel_mipmap_tree *src_mt,
1390 int level,
1391 int face,
1392 int depth)
1393
1394 {
1395 mesa_format format = src_mt->format;
1396 uint32_t width = minify(src_mt->physical_width0, level - src_mt->first_level);
1397 uint32_t height = minify(src_mt->physical_height0, level - src_mt->first_level);
1398 int slice;
1399
1400 if (face > 0)
1401 slice = face;
1402 else
1403 slice = depth;
1404
1405 assert(depth < src_mt->level[level].depth);
1406 assert(src_mt->format == dst_mt->format);
1407
1408 if (dst_mt->compressed) {
1409 unsigned int i, j;
1410 _mesa_get_format_block_size(dst_mt->format, &i, &j);
1411 height = ALIGN_NPOT(height, j) / j;
1412 width = ALIGN_NPOT(width, i) / i;
1413 }
1414
1415 /* If it's a packed depth/stencil buffer with separate stencil, the blit
1416 * below won't apply since we can't do the depth's Y tiling or the
1417 * stencil's W tiling in the blitter.
1418 */
1419 if (src_mt->stencil_mt) {
1420 intel_miptree_copy_slice_sw(brw,
1421 dst_mt, src_mt,
1422 level, slice,
1423 width, height);
1424 return;
1425 }
1426
1427 uint32_t dst_x, dst_y, src_x, src_y;
1428 intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
1429 intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
1430
1431 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
1432 _mesa_get_format_name(src_mt->format),
1433 src_mt, src_x, src_y, src_mt->pitch,
1434 _mesa_get_format_name(dst_mt->format),
1435 dst_mt, dst_x, dst_y, dst_mt->pitch,
1436 width, height);
1437
1438 if (!intel_miptree_blit(brw,
1439 src_mt, level, slice, 0, 0, false,
1440 dst_mt, level, slice, 0, 0, false,
1441 width, height, GL_COPY)) {
1442 perf_debug("miptree validate blit for %s failed\n",
1443 _mesa_get_format_name(format));
1444
1445 intel_miptree_copy_slice_sw(brw, dst_mt, src_mt, level, slice,
1446 width, height);
1447 }
1448 }
1449
1450 /**
1451 * Copies the image's current data to the given miptree, and associates that
1452 * miptree with the image.
1453 *
1454 * If \c invalidate is true, then the actual image data does not need to be
1455 * copied, but the image still needs to be associated to the new miptree (this
1456 * is set to true if we're about to clear the image).
1457 */
1458 void
1459 intel_miptree_copy_teximage(struct brw_context *brw,
1460 struct intel_texture_image *intelImage,
1461 struct intel_mipmap_tree *dst_mt,
1462 bool invalidate)
1463 {
1464 struct intel_mipmap_tree *src_mt = intelImage->mt;
1465 struct intel_texture_object *intel_obj =
1466 intel_texture_object(intelImage->base.Base.TexObject);
1467 int level = intelImage->base.Base.Level;
1468 int face = intelImage->base.Base.Face;
1469
1470 GLuint depth;
1471 if (intel_obj->base.Target == GL_TEXTURE_1D_ARRAY)
1472 depth = intelImage->base.Base.Height;
1473 else
1474 depth = intelImage->base.Base.Depth;
1475
1476 if (!invalidate) {
1477 for (int slice = 0; slice < depth; slice++) {
1478 intel_miptree_copy_slice(brw, dst_mt, src_mt, level, face, slice);
1479 }
1480 }
1481
1482 intel_miptree_reference(&intelImage->mt, dst_mt);
1483 intel_obj->needs_validate = true;
1484 }
1485
1486 static void
1487 intel_miptree_init_mcs(struct brw_context *brw,
1488 struct intel_mipmap_tree *mt,
1489 int init_value)
1490 {
1491 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
1492 *
1493 * When MCS buffer is enabled and bound to MSRT, it is required that it
1494 * is cleared prior to any rendering.
1495 *
1496 * Since we don't use the MCS buffer for any purpose other than rendering,
1497 * it makes sense to just clear it immediately upon allocation.
1498 *
1499 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
1500 */
1501 void *data = intel_miptree_map_raw(brw, mt->mcs_mt);
1502 memset(data, init_value, mt->mcs_mt->total_height * mt->mcs_mt->pitch);
1503 intel_miptree_unmap_raw(mt->mcs_mt);
1504 mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_CLEAR;
1505 }
1506
1507 static bool
1508 intel_miptree_alloc_mcs(struct brw_context *brw,
1509 struct intel_mipmap_tree *mt,
1510 GLuint num_samples)
1511 {
1512 assert(brw->gen >= 7); /* MCS only used on Gen7+ */
1513 assert(mt->mcs_mt == NULL);
1514 assert(!mt->disable_aux_buffers);
1515
1516 /* Choose the correct format for the MCS buffer. All that really matters
1517 * is that we allocate the right buffer size, since we'll always be
1518 * accessing this miptree using MCS-specific hardware mechanisms, which
1519 * infer the correct format based on num_samples.
1520 */
1521 mesa_format format;
1522 switch (num_samples) {
1523 case 2:
1524 case 4:
1525 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
1526 * each sample).
1527 */
1528 format = MESA_FORMAT_R_UNORM8;
1529 break;
1530 case 8:
1531 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
1532 * for each sample, plus 8 padding bits).
1533 */
1534 format = MESA_FORMAT_R_UINT32;
1535 break;
1536 case 16:
1537 /* 64 bits/pixel are required for MCS data when using 16x MSAA (4 bits
1538 * for each sample).
1539 */
1540 format = MESA_FORMAT_RG_UINT32;
1541 break;
1542 default:
1543 unreachable("Unrecognized sample count in intel_miptree_alloc_mcs");
1544 };
1545
1546 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
1547 *
1548 * "The MCS surface must be stored as Tile Y."
1549 */
1550 const uint32_t mcs_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD |
1551 MIPTREE_LAYOUT_TILING_Y;
1552 mt->mcs_mt = miptree_create(brw,
1553 mt->target,
1554 format,
1555 mt->first_level,
1556 mt->last_level,
1557 mt->logical_width0,
1558 mt->logical_height0,
1559 mt->logical_depth0,
1560 0 /* num_samples */,
1561 mcs_flags);
1562
1563 intel_miptree_init_mcs(brw, mt, 0xFF);
1564
1565 return mt->mcs_mt;
1566 }
1567
1568
1569 bool
1570 intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw,
1571 struct intel_mipmap_tree *mt)
1572 {
1573 assert(mt->mcs_mt == NULL);
1574 assert(!mt->disable_aux_buffers);
1575
1576 /* The format of the MCS buffer is opaque to the driver; all that matters
1577 * is that we get its size and pitch right. We'll pretend that the format
1578 * is R32. Since an MCS tile covers 128 blocks horizontally, and a Y-tiled
1579 * R32 buffer is 32 pixels across, we'll need to scale the width down by
1580 * the block width and then a further factor of 4. Since an MCS tile
1581 * covers 256 blocks vertically, and a Y-tiled R32 buffer is 32 rows high,
1582 * we'll need to scale the height down by the block height and then a
1583 * further factor of 8.
1584 */
1585 const mesa_format format = MESA_FORMAT_R_UINT32;
1586 unsigned block_width_px;
1587 unsigned block_height;
1588 intel_get_non_msrt_mcs_alignment(mt, &block_width_px, &block_height);
1589 unsigned width_divisor = block_width_px * 4;
1590 unsigned height_divisor = block_height * 8;
1591
1592 /* The Skylake MCS is twice as tall as the Broadwell MCS.
1593 *
1594 * In pre-Skylake, each bit in the MCS contained the state of 2 cachelines
1595 * in the main surface. In Skylake, it's two bits. The extra bit
1596 * doubles the MCS height, not width, because in Skylake the MCS is always
1597 * Y-tiled.
1598 */
1599 if (brw->gen >= 9)
1600 height_divisor /= 2;
1601
1602 unsigned mcs_width =
1603 ALIGN(mt->logical_width0, width_divisor) / width_divisor;
1604 unsigned mcs_height =
1605 ALIGN(mt->logical_height0, height_divisor) / height_divisor;
1606 assert(mt->logical_depth0 == 1);
1607 uint32_t layout_flags = MIPTREE_LAYOUT_TILING_Y;
1608
1609 if (brw->gen >= 8) {
1610 layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
1611 }
1612
1613 /* On Gen9+ clients are not currently capable of consuming compressed
1614 * single-sampled buffers. Disabling compression allows us to skip
1615 * resolves.
1616 */
1617 const bool is_lossless_compressed =
1618 brw->gen >= 9 && !mt->is_scanout &&
1619 intel_miptree_supports_lossless_compressed(brw, mt);
1620
1621 /* In case of compression mcs buffer needs to be initialised requiring the
1622 * buffer to be immediately mapped to cpu space for writing. Therefore do
1623 * not use the gpu access flag which can cause an unnecessary delay if the
1624 * backing pages happened to be just used by the GPU.
1625 */
1626 if (!is_lossless_compressed)
1627 layout_flags |= MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
1628
1629 mt->mcs_mt = miptree_create(brw,
1630 mt->target,
1631 format,
1632 mt->first_level,
1633 mt->last_level,
1634 mcs_width,
1635 mcs_height,
1636 mt->logical_depth0,
1637 0 /* num_samples */,
1638 layout_flags);
1639
1640 /* From Gen9 onwards single-sampled (non-msrt) auxiliary buffers are
1641 * used for lossless compression which requires similar initialisation
1642 * as multi-sample compression.
1643 */
1644 if (is_lossless_compressed) {
1645 /* Hardware sets the auxiliary buffer to all zeroes when it does full
1646 * resolve. Initialize it accordingly in case the first renderer is
1647 * cpu (or other none compression aware party).
1648 *
1649 * This is also explicitly stated in the spec (MCS Buffer for Render
1650 * Target(s)):
1651 * "If Software wants to enable Color Compression without Fast clear,
1652 * Software needs to initialize MCS with zeros."
1653 */
1654 intel_miptree_init_mcs(brw, mt, 0);
1655 mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED;
1656 mt->msaa_layout = INTEL_MSAA_LAYOUT_CMS;
1657 }
1658
1659 return mt->mcs_mt;
1660 }
1661
1662 void
1663 intel_miptree_prepare_mcs(struct brw_context *brw,
1664 struct intel_mipmap_tree *mt)
1665 {
1666 if (mt->mcs_mt)
1667 return;
1668
1669 if (brw->gen < 9)
1670 return;
1671
1672 /* Single sample compression is represented re-using msaa compression
1673 * layout type: "Compressed Multisampled Surfaces".
1674 */
1675 if (mt->msaa_layout != INTEL_MSAA_LAYOUT_CMS || mt->num_samples > 1)
1676 return;
1677
1678 /* Clients are not currently capable of consuming compressed
1679 * single-sampled buffers.
1680 */
1681 if (mt->is_scanout)
1682 return;
1683
1684 assert(intel_tiling_supports_non_msrt_mcs(brw, mt->tiling) ||
1685 intel_miptree_supports_lossless_compressed(brw, mt));
1686
1687 /* Consider if lossless compression is supported but the needed
1688 * auxiliary buffer doesn't exist yet.
1689 *
1690 * Failing to allocate the auxiliary buffer means running out of
1691 * memory. The pointer to the aux miptree is left NULL which should
1692 * signal non-compressed behavior.
1693 */
1694 if (!intel_miptree_alloc_non_msrt_mcs(brw, mt)) {
1695 _mesa_warning(NULL,
1696 "Failed to allocated aux buffer for lossless"
1697 " compressed %p %u:%u %s\n",
1698 mt, mt->logical_width0, mt->logical_height0,
1699 _mesa_get_format_name(mt->format));
1700 }
1701 }
1702
1703 /**
1704 * Helper for intel_miptree_alloc_hiz() that sets
1705 * \c mt->level[level].has_hiz. Return true if and only if
1706 * \c has_hiz was set.
1707 */
1708 static bool
1709 intel_miptree_level_enable_hiz(struct brw_context *brw,
1710 struct intel_mipmap_tree *mt,
1711 uint32_t level)
1712 {
1713 assert(mt->hiz_buf);
1714
1715 if (brw->gen >= 8 || brw->is_haswell) {
1716 uint32_t width = minify(mt->physical_width0, level);
1717 uint32_t height = minify(mt->physical_height0, level);
1718
1719 /* Disable HiZ for LOD > 0 unless the width is 8 aligned
1720 * and the height is 4 aligned. This allows our HiZ support
1721 * to fulfill Haswell restrictions for HiZ ops. For LOD == 0,
1722 * we can grow the width & height to allow the HiZ op to
1723 * force the proper size alignments.
1724 */
1725 if (level > 0 && ((width & 7) || (height & 3))) {
1726 DBG("mt %p level %d: HiZ DISABLED\n", mt, level);
1727 return false;
1728 }
1729 }
1730
1731 DBG("mt %p level %d: HiZ enabled\n", mt, level);
1732 mt->level[level].has_hiz = true;
1733 return true;
1734 }
1735
1736
1737 /**
1738 * Helper for intel_miptree_alloc_hiz() that determines the required hiz
1739 * buffer dimensions and allocates a bo for the hiz buffer.
1740 */
1741 static struct intel_miptree_aux_buffer *
1742 intel_gen7_hiz_buf_create(struct brw_context *brw,
1743 struct intel_mipmap_tree *mt)
1744 {
1745 unsigned z_width = mt->logical_width0;
1746 unsigned z_height = mt->logical_height0;
1747 const unsigned z_depth = MAX2(mt->logical_depth0, 1);
1748 unsigned hz_width, hz_height;
1749 struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
1750
1751 if (!buf)
1752 return NULL;
1753
1754 /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents
1755 * adjustments required for Z_Height and Z_Width based on multisampling.
1756 */
1757 switch (mt->num_samples) {
1758 case 0:
1759 case 1:
1760 break;
1761 case 2:
1762 case 4:
1763 z_width *= 2;
1764 z_height *= 2;
1765 break;
1766 case 8:
1767 z_width *= 4;
1768 z_height *= 2;
1769 break;
1770 default:
1771 unreachable("unsupported sample count");
1772 }
1773
1774 const unsigned vertical_align = 8; /* 'j' in the docs */
1775 const unsigned H0 = z_height;
1776 const unsigned h0 = ALIGN(H0, vertical_align);
1777 const unsigned h1 = ALIGN(minify(H0, 1), vertical_align);
1778 const unsigned Z0 = z_depth;
1779
1780 /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */
1781 hz_width = ALIGN(z_width, 16);
1782
1783 if (mt->target == GL_TEXTURE_3D) {
1784 unsigned H_i = H0;
1785 unsigned Z_i = Z0;
1786 hz_height = 0;
1787 for (unsigned level = mt->first_level; level <= mt->last_level; ++level) {
1788 unsigned h_i = ALIGN(H_i, vertical_align);
1789 /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1790 hz_height += h_i * Z_i;
1791 H_i = minify(H_i, 1);
1792 Z_i = minify(Z_i, 1);
1793 }
1794 /* HZ_Height =
1795 * (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i)))
1796 */
1797 hz_height = DIV_ROUND_UP(hz_height, 2);
1798 } else {
1799 const unsigned hz_qpitch = h0 + h1 + (12 * vertical_align);
1800 if (mt->target == GL_TEXTURE_CUBE_MAP_ARRAY ||
1801 mt->target == GL_TEXTURE_CUBE_MAP) {
1802 /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth * 6/2) /8 ) * 8 */
1803 hz_height = DIV_ROUND_UP(hz_qpitch * Z0 * 6, 2 * 8) * 8;
1804 } else {
1805 /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth/2) /8 ) * 8 */
1806 hz_height = DIV_ROUND_UP(hz_qpitch * Z0, 2 * 8) * 8;
1807 }
1808 }
1809
1810 unsigned long pitch;
1811 uint32_t tiling = I915_TILING_Y;
1812 buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
1813 hz_width, hz_height, 1,
1814 &tiling, &pitch,
1815 BO_ALLOC_FOR_RENDER);
1816 if (!buf->bo) {
1817 free(buf);
1818 return NULL;
1819 } else if (tiling != I915_TILING_Y) {
1820 drm_intel_bo_unreference(buf->bo);
1821 free(buf);
1822 return NULL;
1823 }
1824
1825 buf->pitch = pitch;
1826
1827 return buf;
1828 }
1829
1830
1831 /**
1832 * Helper for intel_miptree_alloc_hiz() that determines the required hiz
1833 * buffer dimensions and allocates a bo for the hiz buffer.
1834 */
1835 static struct intel_miptree_aux_buffer *
1836 intel_gen8_hiz_buf_create(struct brw_context *brw,
1837 struct intel_mipmap_tree *mt)
1838 {
1839 unsigned z_width = mt->logical_width0;
1840 unsigned z_height = mt->logical_height0;
1841 const unsigned z_depth = MAX2(mt->logical_depth0, 1);
1842 unsigned hz_width, hz_height;
1843 struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
1844
1845 if (!buf)
1846 return NULL;
1847
1848 /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents
1849 * adjustments required for Z_Height and Z_Width based on multisampling.
1850 */
1851 if (brw->gen < 9) {
1852 switch (mt->num_samples) {
1853 case 0:
1854 case 1:
1855 break;
1856 case 2:
1857 case 4:
1858 z_width *= 2;
1859 z_height *= 2;
1860 break;
1861 case 8:
1862 z_width *= 4;
1863 z_height *= 2;
1864 break;
1865 default:
1866 unreachable("unsupported sample count");
1867 }
1868 }
1869
1870 const unsigned vertical_align = 8; /* 'j' in the docs */
1871 const unsigned H0 = z_height;
1872 const unsigned h0 = ALIGN(H0, vertical_align);
1873 const unsigned h1 = ALIGN(minify(H0, 1), vertical_align);
1874 const unsigned Z0 = z_depth;
1875
1876 /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */
1877 hz_width = ALIGN(z_width, 16);
1878
1879 unsigned H_i = H0;
1880 unsigned Z_i = Z0;
1881 unsigned sum_h_i = 0;
1882 unsigned hz_height_3d_sum = 0;
1883 for (unsigned level = mt->first_level; level <= mt->last_level; ++level) {
1884 unsigned i = level - mt->first_level;
1885 unsigned h_i = ALIGN(H_i, vertical_align);
1886 /* sum(i=2 to m; h_i) */
1887 if (i >= 2) {
1888 sum_h_i += h_i;
1889 }
1890 /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1891 hz_height_3d_sum += h_i * Z_i;
1892 H_i = minify(H_i, 1);
1893 Z_i = minify(Z_i, 1);
1894 }
1895 /* HZ_QPitch = h0 + max(h1, sum(i=2 to m; h_i)) */
1896 buf->qpitch = h0 + MAX2(h1, sum_h_i);
1897
1898 if (mt->target == GL_TEXTURE_3D) {
1899 /* (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1900 hz_height = DIV_ROUND_UP(hz_height_3d_sum, 2);
1901 } else {
1902 /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * Z_Depth */
1903 hz_height = DIV_ROUND_UP(buf->qpitch, 2 * 8) * 8 * Z0;
1904 if (mt->target == GL_TEXTURE_CUBE_MAP_ARRAY ||
1905 mt->target == GL_TEXTURE_CUBE_MAP) {
1906 /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * 6 * Z_Depth
1907 *
1908 * We can can just take our hz_height calculation from above, and
1909 * multiply by 6 for the cube map and cube map array types.
1910 */
1911 hz_height *= 6;
1912 }
1913 }
1914
1915 unsigned long pitch;
1916 uint32_t tiling = I915_TILING_Y;
1917 buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
1918 hz_width, hz_height, 1,
1919 &tiling, &pitch,
1920 BO_ALLOC_FOR_RENDER);
1921 if (!buf->bo) {
1922 free(buf);
1923 return NULL;
1924 } else if (tiling != I915_TILING_Y) {
1925 drm_intel_bo_unreference(buf->bo);
1926 free(buf);
1927 return NULL;
1928 }
1929
1930 buf->pitch = pitch;
1931
1932 return buf;
1933 }
1934
1935
1936 static struct intel_miptree_aux_buffer *
1937 intel_hiz_miptree_buf_create(struct brw_context *brw,
1938 struct intel_mipmap_tree *mt)
1939 {
1940 struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
1941 uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
1942
1943 if (brw->gen == 6)
1944 layout_flags |= MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD;
1945
1946 if (!buf)
1947 return NULL;
1948
1949 layout_flags |= MIPTREE_LAYOUT_TILING_ANY;
1950 buf->mt = intel_miptree_create(brw,
1951 mt->target,
1952 mt->format,
1953 mt->first_level,
1954 mt->last_level,
1955 mt->logical_width0,
1956 mt->logical_height0,
1957 mt->logical_depth0,
1958 mt->num_samples,
1959 layout_flags);
1960 if (!buf->mt) {
1961 free(buf);
1962 return NULL;
1963 }
1964
1965 buf->bo = buf->mt->bo;
1966 buf->pitch = buf->mt->pitch;
1967 buf->qpitch = buf->mt->qpitch;
1968
1969 return buf;
1970 }
1971
1972 bool
1973 intel_miptree_wants_hiz_buffer(struct brw_context *brw,
1974 struct intel_mipmap_tree *mt)
1975 {
1976 if (!brw->has_hiz)
1977 return false;
1978
1979 if (mt->hiz_buf != NULL)
1980 return false;
1981
1982 if (mt->disable_aux_buffers)
1983 return false;
1984
1985 switch (mt->format) {
1986 case MESA_FORMAT_Z_FLOAT32:
1987 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT:
1988 case MESA_FORMAT_Z24_UNORM_X8_UINT:
1989 case MESA_FORMAT_Z24_UNORM_S8_UINT:
1990 case MESA_FORMAT_Z_UNORM16:
1991 return true;
1992 default:
1993 return false;
1994 }
1995 }
1996
1997 bool
1998 intel_miptree_alloc_hiz(struct brw_context *brw,
1999 struct intel_mipmap_tree *mt)
2000 {
2001 assert(mt->hiz_buf == NULL);
2002 assert(!mt->disable_aux_buffers);
2003
2004 if (brw->gen == 7) {
2005 mt->hiz_buf = intel_gen7_hiz_buf_create(brw, mt);
2006 } else if (brw->gen >= 8) {
2007 mt->hiz_buf = intel_gen8_hiz_buf_create(brw, mt);
2008 } else {
2009 mt->hiz_buf = intel_hiz_miptree_buf_create(brw, mt);
2010 }
2011
2012 if (!mt->hiz_buf)
2013 return false;
2014
2015 /* Mark that all slices need a HiZ resolve. */
2016 for (unsigned level = mt->first_level; level <= mt->last_level; ++level) {
2017 if (!intel_miptree_level_enable_hiz(brw, mt, level))
2018 continue;
2019
2020 for (unsigned layer = 0; layer < mt->level[level].depth; ++layer) {
2021 struct intel_resolve_map *m = malloc(sizeof(struct intel_resolve_map));
2022 exec_node_init(&m->link);
2023 m->level = level;
2024 m->layer = layer;
2025 m->need = GEN6_HIZ_OP_HIZ_RESOLVE;
2026
2027 exec_list_push_tail(&mt->hiz_map, &m->link);
2028 }
2029 }
2030
2031 return true;
2032 }
2033
2034 /**
2035 * Does the miptree slice have hiz enabled?
2036 */
2037 bool
2038 intel_miptree_level_has_hiz(struct intel_mipmap_tree *mt, uint32_t level)
2039 {
2040 intel_miptree_check_level_layer(mt, level, 0);
2041 return mt->level[level].has_hiz;
2042 }
2043
2044 void
2045 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
2046 uint32_t level,
2047 uint32_t layer)
2048 {
2049 if (!intel_miptree_level_has_hiz(mt, level))
2050 return;
2051
2052 intel_resolve_map_set(&mt->hiz_map,
2053 level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
2054 }
2055
2056
2057 void
2058 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
2059 uint32_t level,
2060 uint32_t layer)
2061 {
2062 if (!intel_miptree_level_has_hiz(mt, level))
2063 return;
2064
2065 intel_resolve_map_set(&mt->hiz_map,
2066 level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
2067 }
2068
2069 void
2070 intel_miptree_set_all_slices_need_depth_resolve(struct intel_mipmap_tree *mt,
2071 uint32_t level)
2072 {
2073 uint32_t layer;
2074 uint32_t end_layer = mt->level[level].depth;
2075
2076 for (layer = 0; layer < end_layer; layer++) {
2077 intel_miptree_slice_set_needs_depth_resolve(mt, level, layer);
2078 }
2079 }
2080
2081 static bool
2082 intel_miptree_slice_resolve(struct brw_context *brw,
2083 struct intel_mipmap_tree *mt,
2084 uint32_t level,
2085 uint32_t layer,
2086 enum gen6_hiz_op need)
2087 {
2088 intel_miptree_check_level_layer(mt, level, layer);
2089
2090 struct intel_resolve_map *item =
2091 intel_resolve_map_get(&mt->hiz_map, level, layer);
2092
2093 if (!item || item->need != need)
2094 return false;
2095
2096 intel_hiz_exec(brw, mt, level, layer, need);
2097 intel_resolve_map_remove(item);
2098 return true;
2099 }
2100
2101 bool
2102 intel_miptree_slice_resolve_hiz(struct brw_context *brw,
2103 struct intel_mipmap_tree *mt,
2104 uint32_t level,
2105 uint32_t layer)
2106 {
2107 return intel_miptree_slice_resolve(brw, mt, level, layer,
2108 GEN6_HIZ_OP_HIZ_RESOLVE);
2109 }
2110
2111 bool
2112 intel_miptree_slice_resolve_depth(struct brw_context *brw,
2113 struct intel_mipmap_tree *mt,
2114 uint32_t level,
2115 uint32_t layer)
2116 {
2117 return intel_miptree_slice_resolve(brw, mt, level, layer,
2118 GEN6_HIZ_OP_DEPTH_RESOLVE);
2119 }
2120
2121 static bool
2122 intel_miptree_all_slices_resolve(struct brw_context *brw,
2123 struct intel_mipmap_tree *mt,
2124 enum gen6_hiz_op need)
2125 {
2126 bool did_resolve = false;
2127
2128 foreach_list_typed_safe(struct intel_resolve_map, map, link, &mt->hiz_map) {
2129 if (map->need != need)
2130 continue;
2131
2132 intel_hiz_exec(brw, mt, map->level, map->layer, need);
2133 intel_resolve_map_remove(map);
2134 did_resolve = true;
2135 }
2136
2137 return did_resolve;
2138 }
2139
2140 bool
2141 intel_miptree_all_slices_resolve_hiz(struct brw_context *brw,
2142 struct intel_mipmap_tree *mt)
2143 {
2144 return intel_miptree_all_slices_resolve(brw, mt,
2145 GEN6_HIZ_OP_HIZ_RESOLVE);
2146 }
2147
2148 bool
2149 intel_miptree_all_slices_resolve_depth(struct brw_context *brw,
2150 struct intel_mipmap_tree *mt)
2151 {
2152 return intel_miptree_all_slices_resolve(brw, mt,
2153 GEN6_HIZ_OP_DEPTH_RESOLVE);
2154 }
2155
2156
2157 void
2158 intel_miptree_resolve_color(struct brw_context *brw,
2159 struct intel_mipmap_tree *mt,
2160 int flags)
2161 {
2162 /* From gen9 onwards there is new compression scheme for single sampled
2163 * surfaces called "lossless compressed". These don't need to be always
2164 * resolved.
2165 */
2166 if ((flags & INTEL_MIPTREE_IGNORE_CCS_E) &&
2167 intel_miptree_is_lossless_compressed(brw, mt))
2168 return;
2169
2170 switch (mt->fast_clear_state) {
2171 case INTEL_FAST_CLEAR_STATE_NO_MCS:
2172 case INTEL_FAST_CLEAR_STATE_RESOLVED:
2173 /* No resolve needed */
2174 break;
2175 case INTEL_FAST_CLEAR_STATE_UNRESOLVED:
2176 case INTEL_FAST_CLEAR_STATE_CLEAR:
2177 /* Fast color clear resolves only make sense for non-MSAA buffers. */
2178 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE ||
2179 intel_miptree_is_lossless_compressed(brw, mt)) {
2180 brw_blorp_resolve_color(brw, mt);
2181 }
2182 break;
2183 }
2184 }
2185
2186
2187 /**
2188 * Make it possible to share the BO backing the given miptree with another
2189 * process or another miptree.
2190 *
2191 * Fast color clears are unsafe with shared buffers, so we need to resolve and
2192 * then discard the MCS buffer, if present. We also set the fast_clear_state
2193 * to INTEL_FAST_CLEAR_STATE_NO_MCS to ensure that no MCS buffer gets
2194 * allocated in the future.
2195 */
2196 void
2197 intel_miptree_make_shareable(struct brw_context *brw,
2198 struct intel_mipmap_tree *mt)
2199 {
2200 /* MCS buffers are also used for multisample buffers, but we can't resolve
2201 * away a multisample MCS buffer because it's an integral part of how the
2202 * pixel data is stored. Fortunately this code path should never be
2203 * reached for multisample buffers.
2204 */
2205 assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE);
2206
2207 if (mt->mcs_mt) {
2208 intel_miptree_resolve_color(brw, mt, 0);
2209 intel_miptree_release(&mt->mcs_mt);
2210 mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_NO_MCS;
2211 }
2212 }
2213
2214
2215 /**
2216 * \brief Get pointer offset into stencil buffer.
2217 *
2218 * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
2219 * must decode the tile's layout in software.
2220 *
2221 * See
2222 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
2223 * Format.
2224 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
2225 *
2226 * Even though the returned offset is always positive, the return type is
2227 * signed due to
2228 * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
2229 * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
2230 */
2231 static intptr_t
2232 intel_offset_S8(uint32_t stride, uint32_t x, uint32_t y, bool swizzled)
2233 {
2234 uint32_t tile_size = 4096;
2235 uint32_t tile_width = 64;
2236 uint32_t tile_height = 64;
2237 uint32_t row_size = 64 * stride;
2238
2239 uint32_t tile_x = x / tile_width;
2240 uint32_t tile_y = y / tile_height;
2241
2242 /* The byte's address relative to the tile's base addres. */
2243 uint32_t byte_x = x % tile_width;
2244 uint32_t byte_y = y % tile_height;
2245
2246 uintptr_t u = tile_y * row_size
2247 + tile_x * tile_size
2248 + 512 * (byte_x / 8)
2249 + 64 * (byte_y / 8)
2250 + 32 * ((byte_y / 4) % 2)
2251 + 16 * ((byte_x / 4) % 2)
2252 + 8 * ((byte_y / 2) % 2)
2253 + 4 * ((byte_x / 2) % 2)
2254 + 2 * (byte_y % 2)
2255 + 1 * (byte_x % 2);
2256
2257 if (swizzled) {
2258 /* adjust for bit6 swizzling */
2259 if (((byte_x / 8) % 2) == 1) {
2260 if (((byte_y / 8) % 2) == 0) {
2261 u += 64;
2262 } else {
2263 u -= 64;
2264 }
2265 }
2266 }
2267
2268 return u;
2269 }
2270
2271 void
2272 intel_miptree_updownsample(struct brw_context *brw,
2273 struct intel_mipmap_tree *src,
2274 struct intel_mipmap_tree *dst)
2275 {
2276 brw_blorp_blit_miptrees(brw,
2277 src, 0 /* level */, 0 /* layer */,
2278 src->format, SWIZZLE_XYZW,
2279 dst, 0 /* level */, 0 /* layer */, dst->format,
2280 0, 0,
2281 src->logical_width0, src->logical_height0,
2282 0, 0,
2283 dst->logical_width0, dst->logical_height0,
2284 GL_NEAREST, false, false /*mirror x, y*/,
2285 false, false);
2286
2287 if (src->stencil_mt) {
2288 brw_blorp_blit_miptrees(brw,
2289 src->stencil_mt, 0 /* level */, 0 /* layer */,
2290 src->stencil_mt->format, SWIZZLE_XYZW,
2291 dst->stencil_mt, 0 /* level */, 0 /* layer */,
2292 dst->stencil_mt->format,
2293 0, 0,
2294 src->logical_width0, src->logical_height0,
2295 0, 0,
2296 dst->logical_width0, dst->logical_height0,
2297 GL_NEAREST, false, false /*mirror x, y*/,
2298 false, false /* decode/encode srgb */);
2299 }
2300 }
2301
2302 static void *
2303 intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
2304 {
2305 /* CPU accesses to color buffers don't understand fast color clears, so
2306 * resolve any pending fast color clears before we map.
2307 */
2308 intel_miptree_resolve_color(brw, mt, 0);
2309
2310 drm_intel_bo *bo = mt->bo;
2311
2312 if (drm_intel_bo_references(brw->batch.bo, bo))
2313 intel_batchbuffer_flush(brw);
2314
2315 if (mt->tiling != I915_TILING_NONE)
2316 brw_bo_map_gtt(brw, bo, "miptree");
2317 else
2318 brw_bo_map(brw, bo, true, "miptree");
2319
2320 return bo->virtual;
2321 }
2322
2323 static void
2324 intel_miptree_unmap_raw(struct intel_mipmap_tree *mt)
2325 {
2326 drm_intel_bo_unmap(mt->bo);
2327 }
2328
2329 static void
2330 intel_miptree_map_gtt(struct brw_context *brw,
2331 struct intel_mipmap_tree *mt,
2332 struct intel_miptree_map *map,
2333 unsigned int level, unsigned int slice)
2334 {
2335 unsigned int bw, bh;
2336 void *base;
2337 unsigned int image_x, image_y;
2338 intptr_t x = map->x;
2339 intptr_t y = map->y;
2340
2341 /* For compressed formats, the stride is the number of bytes per
2342 * row of blocks. intel_miptree_get_image_offset() already does
2343 * the divide.
2344 */
2345 _mesa_get_format_block_size(mt->format, &bw, &bh);
2346 assert(y % bh == 0);
2347 assert(x % bw == 0);
2348 y /= bh;
2349 x /= bw;
2350
2351 base = intel_miptree_map_raw(brw, mt) + mt->offset;
2352
2353 if (base == NULL)
2354 map->ptr = NULL;
2355 else {
2356 /* Note that in the case of cube maps, the caller must have passed the
2357 * slice number referencing the face.
2358 */
2359 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2360 x += image_x;
2361 y += image_y;
2362
2363 map->stride = mt->pitch;
2364 map->ptr = base + y * map->stride + x * mt->cpp;
2365 }
2366
2367 DBG("%s: %d,%d %dx%d from mt %p (%s) "
2368 "%"PRIiPTR",%"PRIiPTR" = %p/%d\n", __func__,
2369 map->x, map->y, map->w, map->h,
2370 mt, _mesa_get_format_name(mt->format),
2371 x, y, map->ptr, map->stride);
2372 }
2373
2374 static void
2375 intel_miptree_unmap_gtt(struct intel_mipmap_tree *mt)
2376 {
2377 intel_miptree_unmap_raw(mt);
2378 }
2379
2380 static void
2381 intel_miptree_map_blit(struct brw_context *brw,
2382 struct intel_mipmap_tree *mt,
2383 struct intel_miptree_map *map,
2384 unsigned int level, unsigned int slice)
2385 {
2386 map->linear_mt = intel_miptree_create(brw, GL_TEXTURE_2D, mt->format,
2387 /* first_level */ 0,
2388 /* last_level */ 0,
2389 map->w, map->h, 1,
2390 /* samples */ 0,
2391 MIPTREE_LAYOUT_TILING_NONE);
2392
2393 if (!map->linear_mt) {
2394 fprintf(stderr, "Failed to allocate blit temporary\n");
2395 goto fail;
2396 }
2397 map->stride = map->linear_mt->pitch;
2398
2399 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2400 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2401 * invalidate is set, since we'll be writing the whole rectangle from our
2402 * temporary buffer back out.
2403 */
2404 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
2405 if (!intel_miptree_blit(brw,
2406 mt, level, slice,
2407 map->x, map->y, false,
2408 map->linear_mt, 0, 0,
2409 0, 0, false,
2410 map->w, map->h, GL_COPY)) {
2411 fprintf(stderr, "Failed to blit\n");
2412 goto fail;
2413 }
2414 }
2415
2416 map->ptr = intel_miptree_map_raw(brw, map->linear_mt);
2417
2418 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
2419 map->x, map->y, map->w, map->h,
2420 mt, _mesa_get_format_name(mt->format),
2421 level, slice, map->ptr, map->stride);
2422
2423 return;
2424
2425 fail:
2426 intel_miptree_release(&map->linear_mt);
2427 map->ptr = NULL;
2428 map->stride = 0;
2429 }
2430
2431 static void
2432 intel_miptree_unmap_blit(struct brw_context *brw,
2433 struct intel_mipmap_tree *mt,
2434 struct intel_miptree_map *map,
2435 unsigned int level,
2436 unsigned int slice)
2437 {
2438 struct gl_context *ctx = &brw->ctx;
2439
2440 intel_miptree_unmap_raw(map->linear_mt);
2441
2442 if (map->mode & GL_MAP_WRITE_BIT) {
2443 bool ok = intel_miptree_blit(brw,
2444 map->linear_mt, 0, 0,
2445 0, 0, false,
2446 mt, level, slice,
2447 map->x, map->y, false,
2448 map->w, map->h, GL_COPY);
2449 WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
2450 }
2451
2452 intel_miptree_release(&map->linear_mt);
2453 }
2454
2455 /**
2456 * "Map" a buffer by copying it to an untiled temporary using MOVNTDQA.
2457 */
2458 #if defined(USE_SSE41)
2459 static void
2460 intel_miptree_map_movntdqa(struct brw_context *brw,
2461 struct intel_mipmap_tree *mt,
2462 struct intel_miptree_map *map,
2463 unsigned int level, unsigned int slice)
2464 {
2465 assert(map->mode & GL_MAP_READ_BIT);
2466 assert(!(map->mode & GL_MAP_WRITE_BIT));
2467
2468 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
2469 map->x, map->y, map->w, map->h,
2470 mt, _mesa_get_format_name(mt->format),
2471 level, slice, map->ptr, map->stride);
2472
2473 /* Map the original image */
2474 uint32_t image_x;
2475 uint32_t image_y;
2476 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2477 image_x += map->x;
2478 image_y += map->y;
2479
2480 void *src = intel_miptree_map_raw(brw, mt);
2481 if (!src)
2482 return;
2483 src += image_y * mt->pitch;
2484 src += image_x * mt->cpp;
2485
2486 /* Due to the pixel offsets for the particular image being mapped, our
2487 * src pointer may not be 16-byte aligned. However, if the pitch is
2488 * divisible by 16, then the amount by which it's misaligned will remain
2489 * consistent from row to row.
2490 */
2491 assert((mt->pitch % 16) == 0);
2492 const int misalignment = ((uintptr_t) src) & 15;
2493
2494 /* Create an untiled temporary buffer for the mapping. */
2495 const unsigned width_bytes = _mesa_format_row_stride(mt->format, map->w);
2496
2497 map->stride = ALIGN(misalignment + width_bytes, 16);
2498
2499 map->buffer = _mesa_align_malloc(map->stride * map->h, 16);
2500 /* Offset the destination so it has the same misalignment as src. */
2501 map->ptr = map->buffer + misalignment;
2502
2503 assert((((uintptr_t) map->ptr) & 15) == misalignment);
2504
2505 for (uint32_t y = 0; y < map->h; y++) {
2506 void *dst_ptr = map->ptr + y * map->stride;
2507 void *src_ptr = src + y * mt->pitch;
2508
2509 _mesa_streaming_load_memcpy(dst_ptr, src_ptr, width_bytes);
2510 }
2511
2512 intel_miptree_unmap_raw(mt);
2513 }
2514
2515 static void
2516 intel_miptree_unmap_movntdqa(struct brw_context *brw,
2517 struct intel_mipmap_tree *mt,
2518 struct intel_miptree_map *map,
2519 unsigned int level,
2520 unsigned int slice)
2521 {
2522 _mesa_align_free(map->buffer);
2523 map->buffer = NULL;
2524 map->ptr = NULL;
2525 }
2526 #endif
2527
2528 static void
2529 intel_miptree_map_s8(struct brw_context *brw,
2530 struct intel_mipmap_tree *mt,
2531 struct intel_miptree_map *map,
2532 unsigned int level, unsigned int slice)
2533 {
2534 map->stride = map->w;
2535 map->buffer = map->ptr = malloc(map->stride * map->h);
2536 if (!map->buffer)
2537 return;
2538
2539 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2540 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2541 * invalidate is set, since we'll be writing the whole rectangle from our
2542 * temporary buffer back out.
2543 */
2544 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
2545 uint8_t *untiled_s8_map = map->ptr;
2546 uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt);
2547 unsigned int image_x, image_y;
2548
2549 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2550
2551 for (uint32_t y = 0; y < map->h; y++) {
2552 for (uint32_t x = 0; x < map->w; x++) {
2553 ptrdiff_t offset = intel_offset_S8(mt->pitch,
2554 x + image_x + map->x,
2555 y + image_y + map->y,
2556 brw->has_swizzling);
2557 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
2558 }
2559 }
2560
2561 intel_miptree_unmap_raw(mt);
2562
2563 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __func__,
2564 map->x, map->y, map->w, map->h,
2565 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
2566 } else {
2567 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__,
2568 map->x, map->y, map->w, map->h,
2569 mt, map->ptr, map->stride);
2570 }
2571 }
2572
2573 static void
2574 intel_miptree_unmap_s8(struct brw_context *brw,
2575 struct intel_mipmap_tree *mt,
2576 struct intel_miptree_map *map,
2577 unsigned int level,
2578 unsigned int slice)
2579 {
2580 if (map->mode & GL_MAP_WRITE_BIT) {
2581 unsigned int image_x, image_y;
2582 uint8_t *untiled_s8_map = map->ptr;
2583 uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt);
2584
2585 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2586
2587 for (uint32_t y = 0; y < map->h; y++) {
2588 for (uint32_t x = 0; x < map->w; x++) {
2589 ptrdiff_t offset = intel_offset_S8(mt->pitch,
2590 image_x + x + map->x,
2591 image_y + y + map->y,
2592 brw->has_swizzling);
2593 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
2594 }
2595 }
2596
2597 intel_miptree_unmap_raw(mt);
2598 }
2599
2600 free(map->buffer);
2601 }
2602
2603 static void
2604 intel_miptree_map_etc(struct brw_context *brw,
2605 struct intel_mipmap_tree *mt,
2606 struct intel_miptree_map *map,
2607 unsigned int level,
2608 unsigned int slice)
2609 {
2610 assert(mt->etc_format != MESA_FORMAT_NONE);
2611 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) {
2612 assert(mt->format == MESA_FORMAT_R8G8B8X8_UNORM);
2613 }
2614
2615 assert(map->mode & GL_MAP_WRITE_BIT);
2616 assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
2617
2618 map->stride = _mesa_format_row_stride(mt->etc_format, map->w);
2619 map->buffer = malloc(_mesa_format_image_size(mt->etc_format,
2620 map->w, map->h, 1));
2621 map->ptr = map->buffer;
2622 }
2623
2624 static void
2625 intel_miptree_unmap_etc(struct brw_context *brw,
2626 struct intel_mipmap_tree *mt,
2627 struct intel_miptree_map *map,
2628 unsigned int level,
2629 unsigned int slice)
2630 {
2631 uint32_t image_x;
2632 uint32_t image_y;
2633 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2634
2635 image_x += map->x;
2636 image_y += map->y;
2637
2638 uint8_t *dst = intel_miptree_map_raw(brw, mt)
2639 + image_y * mt->pitch
2640 + image_x * mt->cpp;
2641
2642 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8)
2643 _mesa_etc1_unpack_rgba8888(dst, mt->pitch,
2644 map->ptr, map->stride,
2645 map->w, map->h);
2646 else
2647 _mesa_unpack_etc2_format(dst, mt->pitch,
2648 map->ptr, map->stride,
2649 map->w, map->h, mt->etc_format);
2650
2651 intel_miptree_unmap_raw(mt);
2652 free(map->buffer);
2653 }
2654
2655 /**
2656 * Mapping function for packed depth/stencil miptrees backed by real separate
2657 * miptrees for depth and stencil.
2658 *
2659 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
2660 * separate from the depth buffer. Yet at the GL API level, we have to expose
2661 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
2662 * be able to map that memory for texture storage and glReadPixels-type
2663 * operations. We give Mesa core that access by mallocing a temporary and
2664 * copying the data between the actual backing store and the temporary.
2665 */
2666 static void
2667 intel_miptree_map_depthstencil(struct brw_context *brw,
2668 struct intel_mipmap_tree *mt,
2669 struct intel_miptree_map *map,
2670 unsigned int level, unsigned int slice)
2671 {
2672 struct intel_mipmap_tree *z_mt = mt;
2673 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
2674 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z_FLOAT32;
2675 int packed_bpp = map_z32f_x24s8 ? 8 : 4;
2676
2677 map->stride = map->w * packed_bpp;
2678 map->buffer = map->ptr = malloc(map->stride * map->h);
2679 if (!map->buffer)
2680 return;
2681
2682 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2683 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2684 * invalidate is set, since we'll be writing the whole rectangle from our
2685 * temporary buffer back out.
2686 */
2687 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
2688 uint32_t *packed_map = map->ptr;
2689 uint8_t *s_map = intel_miptree_map_raw(brw, s_mt);
2690 uint32_t *z_map = intel_miptree_map_raw(brw, z_mt);
2691 unsigned int s_image_x, s_image_y;
2692 unsigned int z_image_x, z_image_y;
2693
2694 intel_miptree_get_image_offset(s_mt, level, slice,
2695 &s_image_x, &s_image_y);
2696 intel_miptree_get_image_offset(z_mt, level, slice,
2697 &z_image_x, &z_image_y);
2698
2699 for (uint32_t y = 0; y < map->h; y++) {
2700 for (uint32_t x = 0; x < map->w; x++) {
2701 int map_x = map->x + x, map_y = map->y + y;
2702 ptrdiff_t s_offset = intel_offset_S8(s_mt->pitch,
2703 map_x + s_image_x,
2704 map_y + s_image_y,
2705 brw->has_swizzling);
2706 ptrdiff_t z_offset = ((map_y + z_image_y) *
2707 (z_mt->pitch / 4) +
2708 (map_x + z_image_x));
2709 uint8_t s = s_map[s_offset];
2710 uint32_t z = z_map[z_offset];
2711
2712 if (map_z32f_x24s8) {
2713 packed_map[(y * map->w + x) * 2 + 0] = z;
2714 packed_map[(y * map->w + x) * 2 + 1] = s;
2715 } else {
2716 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
2717 }
2718 }
2719 }
2720
2721 intel_miptree_unmap_raw(s_mt);
2722 intel_miptree_unmap_raw(z_mt);
2723
2724 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
2725 __func__,
2726 map->x, map->y, map->w, map->h,
2727 z_mt, map->x + z_image_x, map->y + z_image_y,
2728 s_mt, map->x + s_image_x, map->y + s_image_y,
2729 map->ptr, map->stride);
2730 } else {
2731 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__,
2732 map->x, map->y, map->w, map->h,
2733 mt, map->ptr, map->stride);
2734 }
2735 }
2736
2737 static void
2738 intel_miptree_unmap_depthstencil(struct brw_context *brw,
2739 struct intel_mipmap_tree *mt,
2740 struct intel_miptree_map *map,
2741 unsigned int level,
2742 unsigned int slice)
2743 {
2744 struct intel_mipmap_tree *z_mt = mt;
2745 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
2746 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z_FLOAT32;
2747
2748 if (map->mode & GL_MAP_WRITE_BIT) {
2749 uint32_t *packed_map = map->ptr;
2750 uint8_t *s_map = intel_miptree_map_raw(brw, s_mt);
2751 uint32_t *z_map = intel_miptree_map_raw(brw, z_mt);
2752 unsigned int s_image_x, s_image_y;
2753 unsigned int z_image_x, z_image_y;
2754
2755 intel_miptree_get_image_offset(s_mt, level, slice,
2756 &s_image_x, &s_image_y);
2757 intel_miptree_get_image_offset(z_mt, level, slice,
2758 &z_image_x, &z_image_y);
2759
2760 for (uint32_t y = 0; y < map->h; y++) {
2761 for (uint32_t x = 0; x < map->w; x++) {
2762 ptrdiff_t s_offset = intel_offset_S8(s_mt->pitch,
2763 x + s_image_x + map->x,
2764 y + s_image_y + map->y,
2765 brw->has_swizzling);
2766 ptrdiff_t z_offset = ((y + z_image_y + map->y) *
2767 (z_mt->pitch / 4) +
2768 (x + z_image_x + map->x));
2769
2770 if (map_z32f_x24s8) {
2771 z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
2772 s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
2773 } else {
2774 uint32_t packed = packed_map[y * map->w + x];
2775 s_map[s_offset] = packed >> 24;
2776 z_map[z_offset] = packed;
2777 }
2778 }
2779 }
2780
2781 intel_miptree_unmap_raw(s_mt);
2782 intel_miptree_unmap_raw(z_mt);
2783
2784 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
2785 __func__,
2786 map->x, map->y, map->w, map->h,
2787 z_mt, _mesa_get_format_name(z_mt->format),
2788 map->x + z_image_x, map->y + z_image_y,
2789 s_mt, map->x + s_image_x, map->y + s_image_y,
2790 map->ptr, map->stride);
2791 }
2792
2793 free(map->buffer);
2794 }
2795
2796 /**
2797 * Create and attach a map to the miptree at (level, slice). Return the
2798 * attached map.
2799 */
2800 static struct intel_miptree_map*
2801 intel_miptree_attach_map(struct intel_mipmap_tree *mt,
2802 unsigned int level,
2803 unsigned int slice,
2804 unsigned int x,
2805 unsigned int y,
2806 unsigned int w,
2807 unsigned int h,
2808 GLbitfield mode)
2809 {
2810 struct intel_miptree_map *map = calloc(1, sizeof(*map));
2811
2812 if (!map)
2813 return NULL;
2814
2815 assert(mt->level[level].slice[slice].map == NULL);
2816 mt->level[level].slice[slice].map = map;
2817
2818 map->mode = mode;
2819 map->x = x;
2820 map->y = y;
2821 map->w = w;
2822 map->h = h;
2823
2824 return map;
2825 }
2826
2827 /**
2828 * Release the map at (level, slice).
2829 */
2830 static void
2831 intel_miptree_release_map(struct intel_mipmap_tree *mt,
2832 unsigned int level,
2833 unsigned int slice)
2834 {
2835 struct intel_miptree_map **map;
2836
2837 map = &mt->level[level].slice[slice].map;
2838 free(*map);
2839 *map = NULL;
2840 }
2841
2842 static bool
2843 can_blit_slice(struct intel_mipmap_tree *mt,
2844 unsigned int level, unsigned int slice)
2845 {
2846 uint32_t image_x;
2847 uint32_t image_y;
2848 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2849 if (image_x >= 32768 || image_y >= 32768)
2850 return false;
2851
2852 /* See intel_miptree_blit() for details on the 32k pitch limit. */
2853 if (mt->pitch >= 32768)
2854 return false;
2855
2856 return true;
2857 }
2858
2859 static bool
2860 use_intel_mipree_map_blit(struct brw_context *brw,
2861 struct intel_mipmap_tree *mt,
2862 GLbitfield mode,
2863 unsigned int level,
2864 unsigned int slice)
2865 {
2866 if (brw->has_llc &&
2867 /* It's probably not worth swapping to the blit ring because of
2868 * all the overhead involved. But, we must use blitter for the
2869 * surfaces with INTEL_MIPTREE_TRMODE_{YF,YS}.
2870 */
2871 (!(mode & GL_MAP_WRITE_BIT) ||
2872 mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE) &&
2873 !mt->compressed &&
2874 (mt->tiling == I915_TILING_X ||
2875 /* Prior to Sandybridge, the blitter can't handle Y tiling */
2876 (brw->gen >= 6 && mt->tiling == I915_TILING_Y) ||
2877 /* Fast copy blit on skl+ supports all tiling formats. */
2878 brw->gen >= 9) &&
2879 can_blit_slice(mt, level, slice))
2880 return true;
2881
2882 if (mt->tiling != I915_TILING_NONE &&
2883 mt->bo->size >= brw->max_gtt_map_object_size) {
2884 assert(can_blit_slice(mt, level, slice));
2885 return true;
2886 }
2887
2888 return false;
2889 }
2890
2891 /**
2892 * Parameter \a out_stride has type ptrdiff_t not because the buffer stride may
2893 * exceed 32 bits but to diminish the likelihood subtle bugs in pointer
2894 * arithmetic overflow.
2895 *
2896 * If you call this function and use \a out_stride, then you're doing pointer
2897 * arithmetic on \a out_ptr. The type of \a out_stride doesn't prevent all
2898 * bugs. The caller must still take care to avoid 32-bit overflow errors in
2899 * all arithmetic expressions that contain buffer offsets and pixel sizes,
2900 * which usually have type uint32_t or GLuint.
2901 */
2902 void
2903 intel_miptree_map(struct brw_context *brw,
2904 struct intel_mipmap_tree *mt,
2905 unsigned int level,
2906 unsigned int slice,
2907 unsigned int x,
2908 unsigned int y,
2909 unsigned int w,
2910 unsigned int h,
2911 GLbitfield mode,
2912 void **out_ptr,
2913 ptrdiff_t *out_stride)
2914 {
2915 struct intel_miptree_map *map;
2916
2917 assert(mt->num_samples <= 1);
2918
2919 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
2920 if (!map){
2921 *out_ptr = NULL;
2922 *out_stride = 0;
2923 return;
2924 }
2925
2926 intel_miptree_slice_resolve_depth(brw, mt, level, slice);
2927 if (map->mode & GL_MAP_WRITE_BIT) {
2928 intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
2929 }
2930
2931 if (mt->format == MESA_FORMAT_S_UINT8) {
2932 intel_miptree_map_s8(brw, mt, map, level, slice);
2933 } else if (mt->etc_format != MESA_FORMAT_NONE &&
2934 !(mode & BRW_MAP_DIRECT_BIT)) {
2935 intel_miptree_map_etc(brw, mt, map, level, slice);
2936 } else if (mt->stencil_mt && !(mode & BRW_MAP_DIRECT_BIT)) {
2937 intel_miptree_map_depthstencil(brw, mt, map, level, slice);
2938 } else if (use_intel_mipree_map_blit(brw, mt, mode, level, slice)) {
2939 intel_miptree_map_blit(brw, mt, map, level, slice);
2940 #if defined(USE_SSE41)
2941 } else if (!(mode & GL_MAP_WRITE_BIT) &&
2942 !mt->compressed && cpu_has_sse4_1 &&
2943 (mt->pitch % 16 == 0)) {
2944 intel_miptree_map_movntdqa(brw, mt, map, level, slice);
2945 #endif
2946 } else {
2947 /* intel_miptree_map_gtt() doesn't support surfaces with Yf/Ys tiling. */
2948 assert(mt->tr_mode == INTEL_MIPTREE_TRMODE_NONE);
2949 intel_miptree_map_gtt(brw, mt, map, level, slice);
2950 }
2951
2952 *out_ptr = map->ptr;
2953 *out_stride = map->stride;
2954
2955 if (map->ptr == NULL)
2956 intel_miptree_release_map(mt, level, slice);
2957 }
2958
2959 void
2960 intel_miptree_unmap(struct brw_context *brw,
2961 struct intel_mipmap_tree *mt,
2962 unsigned int level,
2963 unsigned int slice)
2964 {
2965 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
2966
2967 assert(mt->num_samples <= 1);
2968
2969 if (!map)
2970 return;
2971
2972 DBG("%s: mt %p (%s) level %d slice %d\n", __func__,
2973 mt, _mesa_get_format_name(mt->format), level, slice);
2974
2975 if (mt->format == MESA_FORMAT_S_UINT8) {
2976 intel_miptree_unmap_s8(brw, mt, map, level, slice);
2977 } else if (mt->etc_format != MESA_FORMAT_NONE &&
2978 !(map->mode & BRW_MAP_DIRECT_BIT)) {
2979 intel_miptree_unmap_etc(brw, mt, map, level, slice);
2980 } else if (mt->stencil_mt && !(map->mode & BRW_MAP_DIRECT_BIT)) {
2981 intel_miptree_unmap_depthstencil(brw, mt, map, level, slice);
2982 } else if (map->linear_mt) {
2983 intel_miptree_unmap_blit(brw, mt, map, level, slice);
2984 #if defined(USE_SSE41)
2985 } else if (map->buffer && cpu_has_sse4_1) {
2986 intel_miptree_unmap_movntdqa(brw, mt, map, level, slice);
2987 #endif
2988 } else {
2989 intel_miptree_unmap_gtt(mt);
2990 }
2991
2992 intel_miptree_release_map(mt, level, slice);
2993 }