a75ac81994b8dbd557bfb9ec0d3ace1cf119983b
[mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <GL/gl.h>
29 #include <GL/internal/dri_interface.h>
30
31 #include "intel_batchbuffer.h"
32 #include "intel_chipset.h"
33 #include "intel_context.h"
34 #include "intel_mipmap_tree.h"
35 #include "intel_regions.h"
36 #include "intel_resolve_map.h"
37 #include "intel_tex_layout.h"
38 #include "intel_tex.h"
39 #include "intel_blit.h"
40
41 #ifndef I915
42 #include "brw_blorp.h"
43 #endif
44
45 #include "main/enums.h"
46 #include "main/formats.h"
47 #include "main/glformats.h"
48 #include "main/texcompress_etc.h"
49 #include "main/teximage.h"
50
51 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
52
53 static GLenum
54 target_to_target(GLenum target)
55 {
56 switch (target) {
57 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
58 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
59 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
60 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
61 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
62 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
63 return GL_TEXTURE_CUBE_MAP_ARB;
64 default:
65 return target;
66 }
67 }
68
69
70 /**
71 * Determine which MSAA layout should be used by the MSAA surface being
72 * created, based on the chip generation and the surface type.
73 */
74 static enum intel_msaa_layout
75 compute_msaa_layout(struct intel_context *intel, gl_format format, GLenum target)
76 {
77 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
78 if (intel->gen < 7)
79 return INTEL_MSAA_LAYOUT_IMS;
80
81 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
82 switch (_mesa_get_format_base_format(format)) {
83 case GL_DEPTH_COMPONENT:
84 case GL_STENCIL_INDEX:
85 case GL_DEPTH_STENCIL:
86 return INTEL_MSAA_LAYOUT_IMS;
87 default:
88 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
89 *
90 * This field must be set to 0 for all SINT MSRTs when all RT channels
91 * are not written
92 *
93 * In practice this means that we have to disable MCS for all signed
94 * integer MSAA buffers. The alternative, to disable MCS only when one
95 * of the render target channels is disabled, is impractical because it
96 * would require converting between CMS and UMS MSAA layouts on the fly,
97 * which is expensive.
98 */
99 if (_mesa_get_format_datatype(format) == GL_INT) {
100 /* TODO: is this workaround needed for future chipsets? */
101 assert(intel->gen == 7);
102 return INTEL_MSAA_LAYOUT_UMS;
103 } else {
104 /* For now, if we're going to be texturing from this surface,
105 * force UMS, so that the shader doesn't have to do different things
106 * based on whether there's a multisample control surface needing sampled first.
107 * We can't just blindly read the MCS surface in all cases because:
108 *
109 * From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
110 *
111 * If this field is disabled and the sampling engine <ld_mcs> message
112 * is issued on this surface, the MCS surface may be accessed. Software
113 * must ensure that the surface is defined to avoid GTT errors.
114 */
115 if (target == GL_TEXTURE_2D_MULTISAMPLE ||
116 target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
117 return INTEL_MSAA_LAYOUT_UMS;
118 } else {
119 return INTEL_MSAA_LAYOUT_CMS;
120 }
121 }
122 }
123 }
124
125
126 /**
127 * For single-sampled render targets ("non-MSRT"), the MCS buffer is a
128 * scaled-down bitfield representation of the color buffer which is capable of
129 * recording when blocks of the color buffer are equal to the clear value.
130 * This function returns the block size that will be used by the MCS buffer
131 * corresponding to a certain color miptree.
132 *
133 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
134 * beneath the "Fast Color Clear" bullet (p327):
135 *
136 * The following table describes the RT alignment
137 *
138 * Pixels Lines
139 * TiledY RT CL
140 * bpp
141 * 32 8 4
142 * 64 4 4
143 * 128 2 4
144 * TiledX RT CL
145 * bpp
146 * 32 16 2
147 * 64 8 2
148 * 128 4 2
149 *
150 * This alignment has the following uses:
151 *
152 * - For figuring out the size of the MCS buffer. Each 4k tile in the MCS
153 * buffer contains 128 blocks horizontally and 256 blocks vertically.
154 *
155 * - For figuring out alignment restrictions for a fast clear operation. Fast
156 * clear operations must always clear aligned multiples of 16 blocks
157 * horizontally and 32 blocks vertically.
158 *
159 * - For scaling down the coordinates sent through the render pipeline during
160 * a fast clear. X coordinates must be scaled down by 8 times the block
161 * width, and Y coordinates by 16 times the block height.
162 *
163 * - For scaling down the coordinates sent through the render pipeline during
164 * a "Render Target Resolve" operation. X coordinates must be scaled down
165 * by half the block width, and Y coordinates by half the block height.
166 */
167 void
168 intel_get_non_msrt_mcs_alignment(struct intel_context *intel,
169 struct intel_mipmap_tree *mt,
170 unsigned *width_px, unsigned *height)
171 {
172 switch (mt->region->tiling) {
173 default:
174 assert(!"Non-MSRT MCS requires X or Y tiling");
175 /* In release builds, fall through */
176 case I915_TILING_Y:
177 *width_px = 32 / mt->cpp;
178 *height = 4;
179 break;
180 case I915_TILING_X:
181 *width_px = 64 / mt->cpp;
182 *height = 2;
183 }
184 }
185
186
187 /**
188 * For a single-sampled render target ("non-MSRT"), determine if an MCS buffer
189 * can be used.
190 *
191 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
192 * beneath the "Fast Color Clear" bullet (p326):
193 *
194 * - Support is limited to tiled render targets.
195 * - Support is for non-mip-mapped and non-array surface types only.
196 *
197 * And then later, on p327:
198 *
199 * - MCS buffer for non-MSRT is supported only for RT formats 32bpp,
200 * 64bpp, and 128bpp.
201 */
202 bool
203 intel_is_non_msrt_mcs_buffer_supported(struct intel_context *intel,
204 struct intel_mipmap_tree *mt)
205 {
206 #ifdef I915
207 /* MCS is not supported on the i915 (pre-Gen4) driver */
208 return false;
209 #else
210 struct brw_context *brw = brw_context(&intel->ctx);
211
212 /* MCS support does not exist prior to Gen7 */
213 if (intel->gen < 7)
214 return false;
215
216 /* MCS is only supported for color buffers */
217 switch (_mesa_get_format_base_format(mt->format)) {
218 case GL_DEPTH_COMPONENT:
219 case GL_DEPTH_STENCIL:
220 case GL_STENCIL_INDEX:
221 return false;
222 }
223
224 if (mt->region->tiling != I915_TILING_X &&
225 mt->region->tiling != I915_TILING_Y)
226 return false;
227 if (mt->cpp != 4 && mt->cpp != 8 && mt->cpp != 16)
228 return false;
229 if (mt->first_level != 0 || mt->last_level != 0)
230 return false;
231 if (mt->physical_depth0 != 1)
232 return false;
233
234 /* There's no point in using an MCS buffer if the surface isn't in a
235 * renderable format.
236 */
237 if (!brw->format_supported_as_render_target[mt->format])
238 return false;
239
240 return true;
241 #endif
242 }
243
244
245 /**
246 * @param for_bo Indicates that the caller is
247 * intel_miptree_create_for_bo(). If true, then do not create
248 * \c stencil_mt.
249 */
250 struct intel_mipmap_tree *
251 intel_miptree_create_layout(struct intel_context *intel,
252 GLenum target,
253 gl_format format,
254 GLuint first_level,
255 GLuint last_level,
256 GLuint width0,
257 GLuint height0,
258 GLuint depth0,
259 bool for_bo,
260 GLuint num_samples)
261 {
262 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
263
264 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
265 _mesa_lookup_enum_by_nr(target),
266 _mesa_get_format_name(format),
267 first_level, last_level, mt);
268
269 mt->target = target_to_target(target);
270 mt->format = format;
271 mt->first_level = first_level;
272 mt->last_level = last_level;
273 mt->logical_width0 = width0;
274 mt->logical_height0 = height0;
275 mt->logical_depth0 = depth0;
276 #ifndef I915
277 mt->mcs_state = INTEL_MCS_STATE_NONE;
278 #endif
279
280 /* The cpp is bytes per (1, blockheight)-sized block for compressed
281 * textures. This is why you'll see divides by blockheight all over
282 */
283 unsigned bw, bh;
284 _mesa_get_format_block_size(format, &bw, &bh);
285 assert(_mesa_get_format_bytes(mt->format) % bw == 0);
286 mt->cpp = _mesa_get_format_bytes(mt->format) / bw;
287
288 mt->num_samples = num_samples;
289 mt->compressed = _mesa_is_format_compressed(format);
290 mt->msaa_layout = INTEL_MSAA_LAYOUT_NONE;
291 mt->refcount = 1;
292
293 if (num_samples > 1) {
294 /* Adjust width/height/depth for MSAA */
295 mt->msaa_layout = compute_msaa_layout(intel, format, mt->target);
296 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
297 /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
298 *
299 * "Any of the other messages (sample*, LOD, load4) used with a
300 * (4x) multisampled surface will in-effect sample a surface with
301 * double the height and width as that indicated in the surface
302 * state. Each pixel position on the original-sized surface is
303 * replaced with a 2x2 of samples with the following arrangement:
304 *
305 * sample 0 sample 2
306 * sample 1 sample 3"
307 *
308 * Thus, when sampling from a multisampled texture, it behaves as
309 * though the layout in memory for (x,y,sample) is:
310 *
311 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
312 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
313 *
314 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
315 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
316 *
317 * However, the actual layout of multisampled data in memory is:
318 *
319 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
320 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
321 *
322 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
323 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
324 *
325 * This pattern repeats for each 2x2 pixel block.
326 *
327 * As a result, when calculating the size of our 4-sample buffer for
328 * an odd width or height, we have to align before scaling up because
329 * sample 3 is in that bottom right 2x2 block.
330 */
331 switch (num_samples) {
332 case 4:
333 width0 = ALIGN(width0, 2) * 2;
334 height0 = ALIGN(height0, 2) * 2;
335 break;
336 case 8:
337 width0 = ALIGN(width0, 2) * 4;
338 height0 = ALIGN(height0, 2) * 2;
339 break;
340 default:
341 /* num_samples should already have been quantized to 0, 1, 4, or
342 * 8.
343 */
344 assert(false);
345 }
346 } else {
347 /* Non-interleaved */
348 depth0 *= num_samples;
349 }
350 }
351
352 /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we
353 * use it elsewhere?
354 */
355 switch (mt->msaa_layout) {
356 case INTEL_MSAA_LAYOUT_NONE:
357 case INTEL_MSAA_LAYOUT_IMS:
358 mt->array_spacing_lod0 = false;
359 break;
360 case INTEL_MSAA_LAYOUT_UMS:
361 case INTEL_MSAA_LAYOUT_CMS:
362 mt->array_spacing_lod0 = true;
363 break;
364 }
365
366 if (target == GL_TEXTURE_CUBE_MAP) {
367 assert(depth0 == 1);
368 depth0 = 6;
369 }
370
371 mt->physical_width0 = width0;
372 mt->physical_height0 = height0;
373 mt->physical_depth0 = depth0;
374
375 if (!for_bo &&
376 _mesa_get_format_base_format(format) == GL_DEPTH_STENCIL &&
377 (intel->must_use_separate_stencil ||
378 (intel->has_separate_stencil &&
379 intel->vtbl.is_hiz_depth_format(intel, format)))) {
380 mt->stencil_mt = intel_miptree_create(intel,
381 mt->target,
382 MESA_FORMAT_S8,
383 mt->first_level,
384 mt->last_level,
385 mt->logical_width0,
386 mt->logical_height0,
387 mt->logical_depth0,
388 true,
389 num_samples,
390 INTEL_MIPTREE_TILING_ANY);
391 if (!mt->stencil_mt) {
392 intel_miptree_release(&mt);
393 return NULL;
394 }
395
396 /* Fix up the Z miptree format for how we're splitting out separate
397 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
398 */
399 if (mt->format == MESA_FORMAT_S8_Z24) {
400 mt->format = MESA_FORMAT_X8_Z24;
401 } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
402 mt->format = MESA_FORMAT_Z32_FLOAT;
403 mt->cpp = 4;
404 } else {
405 _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
406 _mesa_get_format_name(mt->format));
407 }
408 }
409
410 intel_get_texture_alignment_unit(intel, mt->format,
411 &mt->align_w, &mt->align_h);
412
413 #ifdef I915
414 (void) intel;
415 if (intel->is_945)
416 i945_miptree_layout(mt);
417 else
418 i915_miptree_layout(mt);
419 #else
420 brw_miptree_layout(intel, mt);
421 #endif
422
423 return mt;
424 }
425
426 /**
427 * \brief Helper function for intel_miptree_create().
428 */
429 static uint32_t
430 intel_miptree_choose_tiling(struct intel_context *intel,
431 gl_format format,
432 uint32_t width0,
433 uint32_t num_samples,
434 enum intel_miptree_tiling_mode requested,
435 struct intel_mipmap_tree *mt)
436 {
437
438 if (format == MESA_FORMAT_S8) {
439 /* The stencil buffer is W tiled. However, we request from the kernel a
440 * non-tiled buffer because the GTT is incapable of W fencing.
441 */
442 return I915_TILING_NONE;
443 }
444
445 /* Some usages may want only one type of tiling, like depth miptrees (Y
446 * tiled), or temporary BOs for uploading data once (linear).
447 */
448 switch (requested) {
449 case INTEL_MIPTREE_TILING_ANY:
450 break;
451 case INTEL_MIPTREE_TILING_Y:
452 return I915_TILING_Y;
453 case INTEL_MIPTREE_TILING_NONE:
454 return I915_TILING_NONE;
455 }
456
457 if (num_samples > 1) {
458 /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
459 * Surface"):
460 *
461 * [DevSNB+]: For multi-sample render targets, this field must be
462 * 1. MSRTs can only be tiled.
463 *
464 * Our usual reason for preferring X tiling (fast blits using the
465 * blitting engine) doesn't apply to MSAA, since we'll generally be
466 * downsampling or upsampling when blitting between the MSAA buffer
467 * and another buffer, and the blitting engine doesn't support that.
468 * So use Y tiling, since it makes better use of the cache.
469 */
470 return I915_TILING_Y;
471 }
472
473 GLenum base_format = _mesa_get_format_base_format(format);
474 if (intel->gen >= 4 &&
475 (base_format == GL_DEPTH_COMPONENT ||
476 base_format == GL_DEPTH_STENCIL_EXT))
477 return I915_TILING_Y;
478
479 int minimum_pitch = mt->total_width * mt->cpp;
480
481 /* If the width is much smaller than a tile, don't bother tiling. */
482 if (minimum_pitch < 64)
483 return I915_TILING_NONE;
484
485 if (ALIGN(minimum_pitch, 512) >= 32768) {
486 perf_debug("%dx%d miptree too large to blit, falling back to untiled",
487 mt->total_width, mt->total_height);
488 return I915_TILING_NONE;
489 }
490
491 /* Pre-gen6 doesn't have BLORP to handle Y-tiling, so use X-tiling. */
492 if (intel->gen < 6)
493 return I915_TILING_X;
494
495 return I915_TILING_Y | I915_TILING_X;
496 }
497
498 struct intel_mipmap_tree *
499 intel_miptree_create(struct intel_context *intel,
500 GLenum target,
501 gl_format format,
502 GLuint first_level,
503 GLuint last_level,
504 GLuint width0,
505 GLuint height0,
506 GLuint depth0,
507 bool expect_accelerated_upload,
508 GLuint num_samples,
509 enum intel_miptree_tiling_mode requested_tiling)
510 {
511 struct intel_mipmap_tree *mt;
512 gl_format tex_format = format;
513 gl_format etc_format = MESA_FORMAT_NONE;
514 GLuint total_width, total_height;
515
516 if (!intel->is_baytrail) {
517 switch (format) {
518 case MESA_FORMAT_ETC1_RGB8:
519 format = MESA_FORMAT_RGBX8888_REV;
520 break;
521 case MESA_FORMAT_ETC2_RGB8:
522 format = MESA_FORMAT_RGBX8888_REV;
523 break;
524 case MESA_FORMAT_ETC2_SRGB8:
525 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC:
526 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1:
527 format = MESA_FORMAT_SARGB8;
528 break;
529 case MESA_FORMAT_ETC2_RGBA8_EAC:
530 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1:
531 format = MESA_FORMAT_RGBA8888_REV;
532 break;
533 case MESA_FORMAT_ETC2_R11_EAC:
534 format = MESA_FORMAT_R16;
535 break;
536 case MESA_FORMAT_ETC2_SIGNED_R11_EAC:
537 format = MESA_FORMAT_SIGNED_R16;
538 break;
539 case MESA_FORMAT_ETC2_RG11_EAC:
540 format = MESA_FORMAT_GR1616;
541 break;
542 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC:
543 format = MESA_FORMAT_SIGNED_GR1616;
544 break;
545 default:
546 /* Non ETC1 / ETC2 format */
547 break;
548 }
549 }
550
551 etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
552
553 mt = intel_miptree_create_layout(intel, target, format,
554 first_level, last_level, width0,
555 height0, depth0,
556 false, num_samples);
557 /*
558 * pitch == 0 || height == 0 indicates the null texture
559 */
560 if (!mt || !mt->total_width || !mt->total_height) {
561 intel_miptree_release(&mt);
562 return NULL;
563 }
564
565 total_width = mt->total_width;
566 total_height = mt->total_height;
567
568 if (format == MESA_FORMAT_S8) {
569 /* Align to size of W tile, 64x64. */
570 total_width = ALIGN(total_width, 64);
571 total_height = ALIGN(total_height, 64);
572 }
573
574 uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
575 num_samples, requested_tiling,
576 mt);
577 bool y_or_x = tiling == (I915_TILING_Y | I915_TILING_X);
578
579 mt->etc_format = etc_format;
580 mt->region = intel_region_alloc(intel->intelScreen,
581 y_or_x ? I915_TILING_Y : tiling,
582 mt->cpp,
583 total_width,
584 total_height,
585 expect_accelerated_upload);
586
587 /* If the region is too large to fit in the aperture, we need to use the
588 * BLT engine to support it. The BLT paths can't currently handle Y-tiling,
589 * so we need to fall back to X.
590 */
591 if (y_or_x && mt->region->bo->size >= intel->max_gtt_map_object_size) {
592 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
593 mt->total_width, mt->total_height);
594 intel_region_release(&mt->region);
595
596 mt->region = intel_region_alloc(intel->intelScreen,
597 I915_TILING_X,
598 mt->cpp,
599 total_width,
600 total_height,
601 expect_accelerated_upload);
602 }
603
604 mt->offset = 0;
605
606 if (!mt->region) {
607 intel_miptree_release(&mt);
608 return NULL;
609 }
610
611 return mt;
612 }
613
614 struct intel_mipmap_tree *
615 intel_miptree_create_for_bo(struct intel_context *intel,
616 drm_intel_bo *bo,
617 gl_format format,
618 uint32_t offset,
619 uint32_t width,
620 uint32_t height,
621 int pitch,
622 uint32_t tiling)
623 {
624 struct intel_mipmap_tree *mt;
625
626 struct intel_region *region = calloc(1, sizeof(*region));
627 if (!region)
628 return NULL;
629
630 /* Nothing will be able to use this miptree with the BO if the offset isn't
631 * aligned.
632 */
633 if (tiling != I915_TILING_NONE)
634 assert(offset % 4096 == 0);
635
636 /* miptrees can't handle negative pitch. If you need flipping of images,
637 * that's outside of the scope of the mt.
638 */
639 assert(pitch >= 0);
640
641 mt = intel_miptree_create_layout(intel, GL_TEXTURE_2D, format,
642 0, 0,
643 width, height, 1,
644 true, 0 /* num_samples */);
645 if (!mt)
646 return mt;
647
648 region->cpp = mt->cpp;
649 region->width = width;
650 region->height = height;
651 region->pitch = pitch;
652 region->refcount = 1;
653 drm_intel_bo_reference(bo);
654 region->bo = bo;
655 region->tiling = tiling;
656
657 mt->region = region;
658 mt->offset = offset;
659
660 return mt;
661 }
662
663
664 /**
665 * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
666 *
667 * For a multisample DRI2 buffer, this wraps the given region with
668 * a singlesample miptree, then creates a multisample miptree into which the
669 * singlesample miptree is embedded as a child.
670 */
671 struct intel_mipmap_tree*
672 intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
673 unsigned dri_attachment,
674 gl_format format,
675 uint32_t num_samples,
676 struct intel_region *region)
677 {
678 struct intel_mipmap_tree *singlesample_mt = NULL;
679 struct intel_mipmap_tree *multisample_mt = NULL;
680
681 /* Only the front and back buffers, which are color buffers, are shared
682 * through DRI2.
683 */
684 assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
685 dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
686 dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
687 assert(_mesa_get_format_base_format(format) == GL_RGB ||
688 _mesa_get_format_base_format(format) == GL_RGBA);
689
690 singlesample_mt = intel_miptree_create_for_bo(intel,
691 region->bo,
692 format,
693 0,
694 region->width,
695 region->height,
696 region->pitch,
697 region->tiling);
698 if (!singlesample_mt)
699 return NULL;
700 singlesample_mt->region->name = region->name;
701
702 if (num_samples == 0)
703 return singlesample_mt;
704
705 multisample_mt = intel_miptree_create_for_renderbuffer(intel,
706 format,
707 region->width,
708 region->height,
709 num_samples);
710 if (!multisample_mt) {
711 intel_miptree_release(&singlesample_mt);
712 return NULL;
713 }
714
715 multisample_mt->singlesample_mt = singlesample_mt;
716 multisample_mt->need_downsample = false;
717
718 if (intel->is_front_buffer_rendering &&
719 (dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
720 dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT)) {
721 intel_miptree_upsample(intel, multisample_mt);
722 }
723
724 return multisample_mt;
725 }
726
727 struct intel_mipmap_tree*
728 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
729 gl_format format,
730 uint32_t width,
731 uint32_t height,
732 uint32_t num_samples)
733 {
734 struct intel_mipmap_tree *mt;
735 uint32_t depth = 1;
736 bool ok;
737
738 mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
739 width, height, depth, true, num_samples,
740 INTEL_MIPTREE_TILING_ANY);
741 if (!mt)
742 goto fail;
743
744 if (intel->vtbl.is_hiz_depth_format(intel, format)) {
745 ok = intel_miptree_alloc_hiz(intel, mt);
746 if (!ok)
747 goto fail;
748 }
749
750 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
751 ok = intel_miptree_alloc_mcs(intel, mt, num_samples);
752 if (!ok)
753 goto fail;
754 }
755
756 return mt;
757
758 fail:
759 intel_miptree_release(&mt);
760 return NULL;
761 }
762
763 void
764 intel_miptree_reference(struct intel_mipmap_tree **dst,
765 struct intel_mipmap_tree *src)
766 {
767 if (*dst == src)
768 return;
769
770 intel_miptree_release(dst);
771
772 if (src) {
773 src->refcount++;
774 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
775 }
776
777 *dst = src;
778 }
779
780
781 void
782 intel_miptree_release(struct intel_mipmap_tree **mt)
783 {
784 if (!*mt)
785 return;
786
787 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
788 if (--(*mt)->refcount <= 0) {
789 GLuint i;
790
791 DBG("%s deleting %p\n", __FUNCTION__, *mt);
792
793 intel_region_release(&((*mt)->region));
794 intel_miptree_release(&(*mt)->stencil_mt);
795 intel_miptree_release(&(*mt)->hiz_mt);
796 #ifndef I915
797 intel_miptree_release(&(*mt)->mcs_mt);
798 #endif
799 intel_miptree_release(&(*mt)->singlesample_mt);
800 intel_resolve_map_clear(&(*mt)->hiz_map);
801
802 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
803 free((*mt)->level[i].slice);
804 }
805
806 free(*mt);
807 }
808 *mt = NULL;
809 }
810
811 void
812 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
813 int *width, int *height, int *depth)
814 {
815 switch (image->TexObject->Target) {
816 case GL_TEXTURE_1D_ARRAY:
817 *width = image->Width;
818 *height = 1;
819 *depth = image->Height;
820 break;
821 default:
822 *width = image->Width;
823 *height = image->Height;
824 *depth = image->Depth;
825 break;
826 }
827 }
828
829 /**
830 * Can the image be pulled into a unified mipmap tree? This mirrors
831 * the completeness test in a lot of ways.
832 *
833 * Not sure whether I want to pass gl_texture_image here.
834 */
835 bool
836 intel_miptree_match_image(struct intel_mipmap_tree *mt,
837 struct gl_texture_image *image)
838 {
839 struct intel_texture_image *intelImage = intel_texture_image(image);
840 GLuint level = intelImage->base.Base.Level;
841 int width, height, depth;
842
843 /* glTexImage* choose the texture object based on the target passed in, and
844 * objects can't change targets over their lifetimes, so this should be
845 * true.
846 */
847 assert(target_to_target(image->TexObject->Target) == mt->target);
848
849 gl_format mt_format = mt->format;
850 if (mt->format == MESA_FORMAT_X8_Z24 && mt->stencil_mt)
851 mt_format = MESA_FORMAT_S8_Z24;
852 if (mt->format == MESA_FORMAT_Z32_FLOAT && mt->stencil_mt)
853 mt_format = MESA_FORMAT_Z32_FLOAT_X24S8;
854 if (mt->etc_format != MESA_FORMAT_NONE)
855 mt_format = mt->etc_format;
856
857 if (image->TexFormat != mt_format)
858 return false;
859
860 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
861
862 if (mt->target == GL_TEXTURE_CUBE_MAP)
863 depth = 6;
864
865 /* Test image dimensions against the base level image adjusted for
866 * minification. This will also catch images not present in the
867 * tree, changed targets, etc.
868 */
869 if (mt->target == GL_TEXTURE_2D_MULTISAMPLE ||
870 mt->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
871 /* nonzero level here is always bogus */
872 assert(level == 0);
873
874 if (width != mt->logical_width0 ||
875 height != mt->logical_height0 ||
876 depth != mt->logical_depth0) {
877 return false;
878 }
879 }
880 else {
881 /* all normal textures, renderbuffers, etc */
882 if (width != mt->level[level].width ||
883 height != mt->level[level].height ||
884 depth != mt->level[level].depth) {
885 return false;
886 }
887 }
888
889 if (image->NumSamples != mt->num_samples)
890 return false;
891
892 return true;
893 }
894
895
896 void
897 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
898 GLuint level,
899 GLuint x, GLuint y,
900 GLuint w, GLuint h, GLuint d)
901 {
902 mt->level[level].width = w;
903 mt->level[level].height = h;
904 mt->level[level].depth = d;
905 mt->level[level].level_x = x;
906 mt->level[level].level_y = y;
907
908 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
909 level, w, h, d, x, y);
910
911 assert(mt->level[level].slice == NULL);
912
913 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
914 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
915 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
916 }
917
918
919 void
920 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
921 GLuint level, GLuint img,
922 GLuint x, GLuint y)
923 {
924 if (img == 0 && level == 0)
925 assert(x == 0 && y == 0);
926
927 assert(img < mt->level[level].depth);
928
929 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
930 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
931
932 DBG("%s level %d img %d pos %d,%d\n",
933 __FUNCTION__, level, img,
934 mt->level[level].slice[img].x_offset,
935 mt->level[level].slice[img].y_offset);
936 }
937
938 void
939 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
940 GLuint level, GLuint slice,
941 GLuint *x, GLuint *y)
942 {
943 assert(slice < mt->level[level].depth);
944
945 *x = mt->level[level].slice[slice].x_offset;
946 *y = mt->level[level].slice[slice].y_offset;
947 }
948
949 /**
950 * Rendering with tiled buffers requires that the base address of the buffer
951 * be aligned to a page boundary. For renderbuffers, and sometimes with
952 * textures, we may want the surface to point at a texture image level that
953 * isn't at a page boundary.
954 *
955 * This function returns an appropriately-aligned base offset
956 * according to the tiling restrictions, plus any required x/y offset
957 * from there.
958 */
959 uint32_t
960 intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
961 GLuint level, GLuint slice,
962 uint32_t *tile_x,
963 uint32_t *tile_y)
964 {
965 struct intel_region *region = mt->region;
966 uint32_t x, y;
967 uint32_t mask_x, mask_y;
968
969 intel_region_get_tile_masks(region, &mask_x, &mask_y, false);
970 intel_miptree_get_image_offset(mt, level, slice, &x, &y);
971
972 *tile_x = x & mask_x;
973 *tile_y = y & mask_y;
974
975 return intel_region_get_aligned_offset(region, x & ~mask_x, y & ~mask_y,
976 false);
977 }
978
979 static void
980 intel_miptree_copy_slice_sw(struct intel_context *intel,
981 struct intel_mipmap_tree *dst_mt,
982 struct intel_mipmap_tree *src_mt,
983 int level,
984 int slice,
985 int width,
986 int height)
987 {
988 void *src, *dst;
989 int src_stride, dst_stride;
990 int cpp = dst_mt->cpp;
991
992 intel_miptree_map(intel, src_mt,
993 level, slice,
994 0, 0,
995 width, height,
996 GL_MAP_READ_BIT | BRW_MAP_DIRECT_BIT,
997 &src, &src_stride);
998
999 intel_miptree_map(intel, dst_mt,
1000 level, slice,
1001 0, 0,
1002 width, height,
1003 GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT |
1004 BRW_MAP_DIRECT_BIT,
1005 &dst, &dst_stride);
1006
1007 DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
1008 _mesa_get_format_name(src_mt->format),
1009 src_mt, src, src_stride,
1010 _mesa_get_format_name(dst_mt->format),
1011 dst_mt, dst, dst_stride,
1012 width, height);
1013
1014 int row_size = cpp * width;
1015 if (src_stride == row_size &&
1016 dst_stride == row_size) {
1017 memcpy(dst, src, row_size * height);
1018 } else {
1019 for (int i = 0; i < height; i++) {
1020 memcpy(dst, src, row_size);
1021 dst += dst_stride;
1022 src += src_stride;
1023 }
1024 }
1025
1026 intel_miptree_unmap(intel, dst_mt, level, slice);
1027 intel_miptree_unmap(intel, src_mt, level, slice);
1028
1029 /* Don't forget to copy the stencil data over, too. We could have skipped
1030 * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
1031 * shuffling the two data sources in/out of temporary storage instead of
1032 * the direct mapping we get this way.
1033 */
1034 if (dst_mt->stencil_mt) {
1035 assert(src_mt->stencil_mt);
1036 intel_miptree_copy_slice_sw(intel, dst_mt->stencil_mt, src_mt->stencil_mt,
1037 level, slice, width, height);
1038 }
1039 }
1040
1041 static void
1042 intel_miptree_copy_slice(struct intel_context *intel,
1043 struct intel_mipmap_tree *dst_mt,
1044 struct intel_mipmap_tree *src_mt,
1045 int level,
1046 int face,
1047 int depth)
1048
1049 {
1050 gl_format format = src_mt->format;
1051 uint32_t width = src_mt->level[level].width;
1052 uint32_t height = src_mt->level[level].height;
1053 int slice;
1054
1055 if (face > 0)
1056 slice = face;
1057 else
1058 slice = depth;
1059
1060 assert(depth < src_mt->level[level].depth);
1061 assert(src_mt->format == dst_mt->format);
1062
1063 if (dst_mt->compressed) {
1064 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
1065 width = ALIGN(width, dst_mt->align_w);
1066 }
1067
1068 /* If it's a packed depth/stencil buffer with separate stencil, the blit
1069 * below won't apply since we can't do the depth's Y tiling or the
1070 * stencil's W tiling in the blitter.
1071 */
1072 if (src_mt->stencil_mt) {
1073 intel_miptree_copy_slice_sw(intel,
1074 dst_mt, src_mt,
1075 level, slice,
1076 width, height);
1077 return;
1078 }
1079
1080 uint32_t dst_x, dst_y, src_x, src_y;
1081 intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
1082 intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
1083
1084 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
1085 _mesa_get_format_name(src_mt->format),
1086 src_mt, src_x, src_y, src_mt->region->pitch,
1087 _mesa_get_format_name(dst_mt->format),
1088 dst_mt, dst_x, dst_y, dst_mt->region->pitch,
1089 width, height);
1090
1091 if (!intel_miptree_blit(intel,
1092 src_mt, level, slice, 0, 0, false,
1093 dst_mt, level, slice, 0, 0, false,
1094 width, height, GL_COPY)) {
1095 perf_debug("miptree validate blit for %s failed\n",
1096 _mesa_get_format_name(format));
1097
1098 intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
1099 width, height);
1100 }
1101 }
1102
1103 /**
1104 * Copies the image's current data to the given miptree, and associates that
1105 * miptree with the image.
1106 *
1107 * If \c invalidate is true, then the actual image data does not need to be
1108 * copied, but the image still needs to be associated to the new miptree (this
1109 * is set to true if we're about to clear the image).
1110 */
1111 void
1112 intel_miptree_copy_teximage(struct intel_context *intel,
1113 struct intel_texture_image *intelImage,
1114 struct intel_mipmap_tree *dst_mt,
1115 bool invalidate)
1116 {
1117 struct intel_mipmap_tree *src_mt = intelImage->mt;
1118 struct intel_texture_object *intel_obj =
1119 intel_texture_object(intelImage->base.Base.TexObject);
1120 int level = intelImage->base.Base.Level;
1121 int face = intelImage->base.Base.Face;
1122 GLuint depth = intelImage->base.Base.Depth;
1123
1124 if (!invalidate) {
1125 for (int slice = 0; slice < depth; slice++) {
1126 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
1127 }
1128 }
1129
1130 intel_miptree_reference(&intelImage->mt, dst_mt);
1131 intel_obj->needs_validate = true;
1132 }
1133
1134 bool
1135 intel_miptree_alloc_mcs(struct intel_context *intel,
1136 struct intel_mipmap_tree *mt,
1137 GLuint num_samples)
1138 {
1139 assert(intel->gen >= 7); /* MCS only used on Gen7+ */
1140 #ifdef I915
1141 return false;
1142 #else
1143 assert(mt->mcs_mt == NULL);
1144
1145 /* Choose the correct format for the MCS buffer. All that really matters
1146 * is that we allocate the right buffer size, since we'll always be
1147 * accessing this miptree using MCS-specific hardware mechanisms, which
1148 * infer the correct format based on num_samples.
1149 */
1150 gl_format format;
1151 switch (num_samples) {
1152 case 4:
1153 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
1154 * each sample).
1155 */
1156 format = MESA_FORMAT_R8;
1157 break;
1158 case 8:
1159 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
1160 * for each sample, plus 8 padding bits).
1161 */
1162 format = MESA_FORMAT_R_UINT32;
1163 break;
1164 default:
1165 assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
1166 return false;
1167 };
1168
1169 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
1170 *
1171 * "The MCS surface must be stored as Tile Y."
1172 */
1173 mt->mcs_state = INTEL_MCS_STATE_MSAA;
1174 mt->mcs_mt = intel_miptree_create(intel,
1175 mt->target,
1176 format,
1177 mt->first_level,
1178 mt->last_level,
1179 mt->logical_width0,
1180 mt->logical_height0,
1181 mt->logical_depth0,
1182 true,
1183 0 /* num_samples */,
1184 INTEL_MIPTREE_TILING_Y);
1185
1186 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
1187 *
1188 * When MCS buffer is enabled and bound to MSRT, it is required that it
1189 * is cleared prior to any rendering.
1190 *
1191 * Since we don't use the MCS buffer for any purpose other than rendering,
1192 * it makes sense to just clear it immediately upon allocation.
1193 *
1194 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
1195 */
1196 void *data = intel_miptree_map_raw(intel, mt->mcs_mt);
1197 memset(data, 0xff, mt->mcs_mt->region->bo->size);
1198 intel_miptree_unmap_raw(intel, mt->mcs_mt);
1199
1200 return mt->mcs_mt;
1201 #endif
1202 }
1203
1204 /**
1205 * Helper for intel_miptree_alloc_hiz() that sets
1206 * \c mt->level[level].slice[layer].has_hiz. Return true if and only if
1207 * \c has_hiz was set.
1208 */
1209 static bool
1210 intel_miptree_slice_enable_hiz(struct intel_context *intel,
1211 struct intel_mipmap_tree *mt,
1212 uint32_t level,
1213 uint32_t layer)
1214 {
1215 assert(mt->hiz_mt);
1216
1217 if (intel->is_haswell) {
1218 /* Disable HiZ for some slices to work around a hardware bug.
1219 *
1220 * Haswell hardware fails to respect
1221 * 3DSTATE_DEPTH_BUFFER.Depth_Coordinate_Offset_X/Y when during HiZ
1222 * ambiguate operations. The failure is inconsistent and affected by
1223 * other GPU contexts. Running a heavy GPU workload in a separate
1224 * process causes the failure rate to drop to nearly 0.
1225 *
1226 * To workaround the bug, we enable HiZ only when we can guarantee that
1227 * the Depth Coordinate Offset fields will be set to 0. The function
1228 * brw_get_depthstencil_tile_masks() is used to calculate the fields,
1229 * and the function is sometimes called in such a way that the presence
1230 * of an attached stencil buffer changes the fuction's return value.
1231 *
1232 * The largest tile size considered by brw_get_depthstencil_tile_masks()
1233 * is that of the stencil buffer. Therefore, if this hiz slice's
1234 * corresponding depth slice has an offset that is aligned to the
1235 * stencil buffer tile size, 64x64 pixels, then
1236 * 3DSTATE_DEPTH_BUFFER.Depth_Coordinate_Offset_X/Y is set to 0.
1237 */
1238 uint32_t depth_x_offset = mt->level[level].slice[layer].x_offset;
1239 uint32_t depth_y_offset = mt->level[level].slice[layer].y_offset;
1240 if ((depth_x_offset & 63) || (depth_y_offset & 63)) {
1241 return false;
1242 }
1243 }
1244
1245 mt->level[level].slice[layer].has_hiz = true;
1246 return true;
1247 }
1248
1249
1250
1251 bool
1252 intel_miptree_alloc_hiz(struct intel_context *intel,
1253 struct intel_mipmap_tree *mt)
1254 {
1255 assert(mt->hiz_mt == NULL);
1256 mt->hiz_mt = intel_miptree_create(intel,
1257 mt->target,
1258 mt->format,
1259 mt->first_level,
1260 mt->last_level,
1261 mt->logical_width0,
1262 mt->logical_height0,
1263 mt->logical_depth0,
1264 true,
1265 mt->num_samples,
1266 INTEL_MIPTREE_TILING_ANY);
1267
1268 if (!mt->hiz_mt)
1269 return false;
1270
1271 /* Mark that all slices need a HiZ resolve. */
1272 struct intel_resolve_map *head = &mt->hiz_map;
1273 for (int level = mt->first_level; level <= mt->last_level; ++level) {
1274 for (int layer = 0; layer < mt->level[level].depth; ++layer) {
1275 if (!intel_miptree_slice_enable_hiz(intel, mt, level, layer))
1276 continue;
1277
1278 head->next = malloc(sizeof(*head->next));
1279 head->next->prev = head;
1280 head->next->next = NULL;
1281 head = head->next;
1282
1283 head->level = level;
1284 head->layer = layer;
1285 head->need = GEN6_HIZ_OP_HIZ_RESOLVE;
1286 }
1287 }
1288
1289 return true;
1290 }
1291
1292 /**
1293 * Does the miptree slice have hiz enabled?
1294 */
1295 bool
1296 intel_miptree_slice_has_hiz(struct intel_mipmap_tree *mt,
1297 uint32_t level,
1298 uint32_t layer)
1299 {
1300 intel_miptree_check_level_layer(mt, level, layer);
1301 return mt->level[level].slice[layer].has_hiz;
1302 }
1303
1304 void
1305 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
1306 uint32_t level,
1307 uint32_t layer)
1308 {
1309 if (!intel_miptree_slice_has_hiz(mt, level, layer))
1310 return;
1311
1312 intel_resolve_map_set(&mt->hiz_map,
1313 level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
1314 }
1315
1316
1317 void
1318 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
1319 uint32_t level,
1320 uint32_t layer)
1321 {
1322 if (!intel_miptree_slice_has_hiz(mt, level, layer))
1323 return;
1324
1325 intel_resolve_map_set(&mt->hiz_map,
1326 level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
1327 }
1328
1329 static bool
1330 intel_miptree_slice_resolve(struct intel_context *intel,
1331 struct intel_mipmap_tree *mt,
1332 uint32_t level,
1333 uint32_t layer,
1334 enum gen6_hiz_op need)
1335 {
1336 intel_miptree_check_level_layer(mt, level, layer);
1337
1338 struct intel_resolve_map *item =
1339 intel_resolve_map_get(&mt->hiz_map, level, layer);
1340
1341 if (!item || item->need != need)
1342 return false;
1343
1344 intel_hiz_exec(intel, mt, level, layer, need);
1345 intel_resolve_map_remove(item);
1346 return true;
1347 }
1348
1349 bool
1350 intel_miptree_slice_resolve_hiz(struct intel_context *intel,
1351 struct intel_mipmap_tree *mt,
1352 uint32_t level,
1353 uint32_t layer)
1354 {
1355 return intel_miptree_slice_resolve(intel, mt, level, layer,
1356 GEN6_HIZ_OP_HIZ_RESOLVE);
1357 }
1358
1359 bool
1360 intel_miptree_slice_resolve_depth(struct intel_context *intel,
1361 struct intel_mipmap_tree *mt,
1362 uint32_t level,
1363 uint32_t layer)
1364 {
1365 return intel_miptree_slice_resolve(intel, mt, level, layer,
1366 GEN6_HIZ_OP_DEPTH_RESOLVE);
1367 }
1368
1369 static bool
1370 intel_miptree_all_slices_resolve(struct intel_context *intel,
1371 struct intel_mipmap_tree *mt,
1372 enum gen6_hiz_op need)
1373 {
1374 bool did_resolve = false;
1375 struct intel_resolve_map *i, *next;
1376
1377 for (i = mt->hiz_map.next; i; i = next) {
1378 next = i->next;
1379 if (i->need != need)
1380 continue;
1381
1382 intel_hiz_exec(intel, mt, i->level, i->layer, need);
1383 intel_resolve_map_remove(i);
1384 did_resolve = true;
1385 }
1386
1387 return did_resolve;
1388 }
1389
1390 bool
1391 intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
1392 struct intel_mipmap_tree *mt)
1393 {
1394 return intel_miptree_all_slices_resolve(intel, mt,
1395 GEN6_HIZ_OP_HIZ_RESOLVE);
1396 }
1397
1398 bool
1399 intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
1400 struct intel_mipmap_tree *mt)
1401 {
1402 return intel_miptree_all_slices_resolve(intel, mt,
1403 GEN6_HIZ_OP_DEPTH_RESOLVE);
1404 }
1405
1406 /**
1407 * \brief Get pointer offset into stencil buffer.
1408 *
1409 * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
1410 * must decode the tile's layout in software.
1411 *
1412 * See
1413 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
1414 * Format.
1415 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
1416 *
1417 * Even though the returned offset is always positive, the return type is
1418 * signed due to
1419 * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
1420 * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
1421 */
1422 static intptr_t
1423 intel_offset_S8(uint32_t stride, uint32_t x, uint32_t y, bool swizzled)
1424 {
1425 uint32_t tile_size = 4096;
1426 uint32_t tile_width = 64;
1427 uint32_t tile_height = 64;
1428 uint32_t row_size = 64 * stride;
1429
1430 uint32_t tile_x = x / tile_width;
1431 uint32_t tile_y = y / tile_height;
1432
1433 /* The byte's address relative to the tile's base addres. */
1434 uint32_t byte_x = x % tile_width;
1435 uint32_t byte_y = y % tile_height;
1436
1437 uintptr_t u = tile_y * row_size
1438 + tile_x * tile_size
1439 + 512 * (byte_x / 8)
1440 + 64 * (byte_y / 8)
1441 + 32 * ((byte_y / 4) % 2)
1442 + 16 * ((byte_x / 4) % 2)
1443 + 8 * ((byte_y / 2) % 2)
1444 + 4 * ((byte_x / 2) % 2)
1445 + 2 * (byte_y % 2)
1446 + 1 * (byte_x % 2);
1447
1448 if (swizzled) {
1449 /* adjust for bit6 swizzling */
1450 if (((byte_x / 8) % 2) == 1) {
1451 if (((byte_y / 8) % 2) == 0) {
1452 u += 64;
1453 } else {
1454 u -= 64;
1455 }
1456 }
1457 }
1458
1459 return u;
1460 }
1461
1462 static void
1463 intel_miptree_updownsample(struct intel_context *intel,
1464 struct intel_mipmap_tree *src,
1465 struct intel_mipmap_tree *dst,
1466 unsigned width,
1467 unsigned height)
1468 {
1469 #ifndef I915
1470 int src_x0 = 0;
1471 int src_y0 = 0;
1472 int dst_x0 = 0;
1473 int dst_y0 = 0;
1474
1475 brw_blorp_blit_miptrees(intel,
1476 src, 0 /* level */, 0 /* layer */,
1477 dst, 0 /* level */, 0 /* layer */,
1478 src_x0, src_y0,
1479 width, height,
1480 dst_x0, dst_y0,
1481 width, height,
1482 false, false /*mirror x, y*/);
1483
1484 if (src->stencil_mt) {
1485 brw_blorp_blit_miptrees(intel,
1486 src->stencil_mt, 0 /* level */, 0 /* layer */,
1487 dst->stencil_mt, 0 /* level */, 0 /* layer */,
1488 src_x0, src_y0,
1489 width, height,
1490 dst_x0, dst_y0,
1491 width, height,
1492 false, false /*mirror x, y*/);
1493 }
1494 #endif /* I915 */
1495 }
1496
1497 static void
1498 assert_is_flat(struct intel_mipmap_tree *mt)
1499 {
1500 assert(mt->target == GL_TEXTURE_2D);
1501 assert(mt->first_level == 0);
1502 assert(mt->last_level == 0);
1503 }
1504
1505 /**
1506 * \brief Downsample from mt to mt->singlesample_mt.
1507 *
1508 * If the miptree needs no downsample, then skip.
1509 */
1510 void
1511 intel_miptree_downsample(struct intel_context *intel,
1512 struct intel_mipmap_tree *mt)
1513 {
1514 /* Only flat, renderbuffer-like miptrees are supported. */
1515 assert_is_flat(mt);
1516
1517 if (!mt->need_downsample)
1518 return;
1519 intel_miptree_updownsample(intel,
1520 mt, mt->singlesample_mt,
1521 mt->logical_width0,
1522 mt->logical_height0);
1523 mt->need_downsample = false;
1524 }
1525
1526 /**
1527 * \brief Upsample from mt->singlesample_mt to mt.
1528 *
1529 * The upsample is done unconditionally.
1530 */
1531 void
1532 intel_miptree_upsample(struct intel_context *intel,
1533 struct intel_mipmap_tree *mt)
1534 {
1535 /* Only flat, renderbuffer-like miptrees are supported. */
1536 assert_is_flat(mt);
1537 assert(!mt->need_downsample);
1538
1539 intel_miptree_updownsample(intel,
1540 mt->singlesample_mt, mt,
1541 mt->logical_width0,
1542 mt->logical_height0);
1543 }
1544
1545 void *
1546 intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
1547 {
1548 drm_intel_bo *bo = mt->region->bo;
1549
1550 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
1551 if (drm_intel_bo_busy(bo)) {
1552 perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
1553 }
1554 }
1555
1556 intel_flush(&intel->ctx);
1557
1558 if (mt->region->tiling != I915_TILING_NONE)
1559 drm_intel_gem_bo_map_gtt(bo);
1560 else
1561 drm_intel_bo_map(bo, true);
1562
1563 return bo->virtual;
1564 }
1565
1566 void
1567 intel_miptree_unmap_raw(struct intel_context *intel,
1568 struct intel_mipmap_tree *mt)
1569 {
1570 drm_intel_bo_unmap(mt->region->bo);
1571 }
1572
1573 static void
1574 intel_miptree_map_gtt(struct intel_context *intel,
1575 struct intel_mipmap_tree *mt,
1576 struct intel_miptree_map *map,
1577 unsigned int level, unsigned int slice)
1578 {
1579 unsigned int bw, bh;
1580 void *base;
1581 unsigned int image_x, image_y;
1582 int x = map->x;
1583 int y = map->y;
1584
1585 /* For compressed formats, the stride is the number of bytes per
1586 * row of blocks. intel_miptree_get_image_offset() already does
1587 * the divide.
1588 */
1589 _mesa_get_format_block_size(mt->format, &bw, &bh);
1590 assert(y % bh == 0);
1591 y /= bh;
1592
1593 base = intel_miptree_map_raw(intel, mt) + mt->offset;
1594
1595 if (base == NULL)
1596 map->ptr = NULL;
1597 else {
1598 /* Note that in the case of cube maps, the caller must have passed the
1599 * slice number referencing the face.
1600 */
1601 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1602 x += image_x;
1603 y += image_y;
1604
1605 map->stride = mt->region->pitch;
1606 map->ptr = base + y * map->stride + x * mt->cpp;
1607 }
1608
1609 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
1610 map->x, map->y, map->w, map->h,
1611 mt, _mesa_get_format_name(mt->format),
1612 x, y, map->ptr, map->stride);
1613 }
1614
1615 static void
1616 intel_miptree_unmap_gtt(struct intel_context *intel,
1617 struct intel_mipmap_tree *mt,
1618 struct intel_miptree_map *map,
1619 unsigned int level,
1620 unsigned int slice)
1621 {
1622 intel_miptree_unmap_raw(intel, mt);
1623 }
1624
1625 static void
1626 intel_miptree_map_blit(struct intel_context *intel,
1627 struct intel_mipmap_tree *mt,
1628 struct intel_miptree_map *map,
1629 unsigned int level, unsigned int slice)
1630 {
1631 map->mt = intel_miptree_create(intel, GL_TEXTURE_2D, mt->format,
1632 0, 0,
1633 map->w, map->h, 1,
1634 false, 0,
1635 INTEL_MIPTREE_TILING_NONE);
1636 if (!map->mt) {
1637 fprintf(stderr, "Failed to allocate blit temporary\n");
1638 goto fail;
1639 }
1640 map->stride = map->mt->region->pitch;
1641
1642 if (!intel_miptree_blit(intel,
1643 mt, level, slice,
1644 map->x, map->y, false,
1645 map->mt, 0, 0,
1646 0, 0, false,
1647 map->w, map->h, GL_COPY)) {
1648 fprintf(stderr, "Failed to blit\n");
1649 goto fail;
1650 }
1651
1652 intel_batchbuffer_flush(intel);
1653 map->ptr = intel_miptree_map_raw(intel, map->mt);
1654
1655 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
1656 map->x, map->y, map->w, map->h,
1657 mt, _mesa_get_format_name(mt->format),
1658 level, slice, map->ptr, map->stride);
1659
1660 return;
1661
1662 fail:
1663 intel_miptree_release(&map->mt);
1664 map->ptr = NULL;
1665 map->stride = 0;
1666 }
1667
1668 static void
1669 intel_miptree_unmap_blit(struct intel_context *intel,
1670 struct intel_mipmap_tree *mt,
1671 struct intel_miptree_map *map,
1672 unsigned int level,
1673 unsigned int slice)
1674 {
1675 struct gl_context *ctx = &intel->ctx;
1676
1677 intel_miptree_unmap_raw(intel, map->mt);
1678
1679 if (map->mode & GL_MAP_WRITE_BIT) {
1680 bool ok = intel_miptree_blit(intel,
1681 map->mt, 0, 0,
1682 0, 0, false,
1683 mt, level, slice,
1684 map->x, map->y, false,
1685 map->w, map->h, GL_COPY);
1686 WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
1687 }
1688
1689 intel_miptree_release(&map->mt);
1690 }
1691
1692 static void
1693 intel_miptree_map_s8(struct intel_context *intel,
1694 struct intel_mipmap_tree *mt,
1695 struct intel_miptree_map *map,
1696 unsigned int level, unsigned int slice)
1697 {
1698 map->stride = map->w;
1699 map->buffer = map->ptr = malloc(map->stride * map->h);
1700 if (!map->buffer)
1701 return;
1702
1703 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1704 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1705 * invalidate is set, since we'll be writing the whole rectangle from our
1706 * temporary buffer back out.
1707 */
1708 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1709 uint8_t *untiled_s8_map = map->ptr;
1710 uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
1711 unsigned int image_x, image_y;
1712
1713 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1714
1715 for (uint32_t y = 0; y < map->h; y++) {
1716 for (uint32_t x = 0; x < map->w; x++) {
1717 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1718 x + image_x + map->x,
1719 y + image_y + map->y,
1720 intel->has_swizzling);
1721 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
1722 }
1723 }
1724
1725 intel_miptree_unmap_raw(intel, mt);
1726
1727 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
1728 map->x, map->y, map->w, map->h,
1729 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
1730 } else {
1731 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1732 map->x, map->y, map->w, map->h,
1733 mt, map->ptr, map->stride);
1734 }
1735 }
1736
1737 static void
1738 intel_miptree_unmap_s8(struct intel_context *intel,
1739 struct intel_mipmap_tree *mt,
1740 struct intel_miptree_map *map,
1741 unsigned int level,
1742 unsigned int slice)
1743 {
1744 if (map->mode & GL_MAP_WRITE_BIT) {
1745 unsigned int image_x, image_y;
1746 uint8_t *untiled_s8_map = map->ptr;
1747 uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
1748
1749 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1750
1751 for (uint32_t y = 0; y < map->h; y++) {
1752 for (uint32_t x = 0; x < map->w; x++) {
1753 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1754 x + map->x,
1755 y + map->y,
1756 intel->has_swizzling);
1757 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
1758 }
1759 }
1760
1761 intel_miptree_unmap_raw(intel, mt);
1762 }
1763
1764 free(map->buffer);
1765 }
1766
1767 static void
1768 intel_miptree_map_etc(struct intel_context *intel,
1769 struct intel_mipmap_tree *mt,
1770 struct intel_miptree_map *map,
1771 unsigned int level,
1772 unsigned int slice)
1773 {
1774 assert(mt->etc_format != MESA_FORMAT_NONE);
1775 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) {
1776 assert(mt->format == MESA_FORMAT_RGBX8888_REV);
1777 }
1778
1779 assert(map->mode & GL_MAP_WRITE_BIT);
1780 assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
1781
1782 map->stride = _mesa_format_row_stride(mt->etc_format, map->w);
1783 map->buffer = malloc(_mesa_format_image_size(mt->etc_format,
1784 map->w, map->h, 1));
1785 map->ptr = map->buffer;
1786 }
1787
1788 static void
1789 intel_miptree_unmap_etc(struct intel_context *intel,
1790 struct intel_mipmap_tree *mt,
1791 struct intel_miptree_map *map,
1792 unsigned int level,
1793 unsigned int slice)
1794 {
1795 uint32_t image_x;
1796 uint32_t image_y;
1797 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1798
1799 image_x += map->x;
1800 image_y += map->y;
1801
1802 uint8_t *dst = intel_miptree_map_raw(intel, mt)
1803 + image_y * mt->region->pitch
1804 + image_x * mt->region->cpp;
1805
1806 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8)
1807 _mesa_etc1_unpack_rgba8888(dst, mt->region->pitch,
1808 map->ptr, map->stride,
1809 map->w, map->h);
1810 else
1811 _mesa_unpack_etc2_format(dst, mt->region->pitch,
1812 map->ptr, map->stride,
1813 map->w, map->h, mt->etc_format);
1814
1815 intel_miptree_unmap_raw(intel, mt);
1816 free(map->buffer);
1817 }
1818
1819 /**
1820 * Mapping function for packed depth/stencil miptrees backed by real separate
1821 * miptrees for depth and stencil.
1822 *
1823 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
1824 * separate from the depth buffer. Yet at the GL API level, we have to expose
1825 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
1826 * be able to map that memory for texture storage and glReadPixels-type
1827 * operations. We give Mesa core that access by mallocing a temporary and
1828 * copying the data between the actual backing store and the temporary.
1829 */
1830 static void
1831 intel_miptree_map_depthstencil(struct intel_context *intel,
1832 struct intel_mipmap_tree *mt,
1833 struct intel_miptree_map *map,
1834 unsigned int level, unsigned int slice)
1835 {
1836 struct intel_mipmap_tree *z_mt = mt;
1837 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1838 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1839 int packed_bpp = map_z32f_x24s8 ? 8 : 4;
1840
1841 map->stride = map->w * packed_bpp;
1842 map->buffer = map->ptr = malloc(map->stride * map->h);
1843 if (!map->buffer)
1844 return;
1845
1846 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1847 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1848 * invalidate is set, since we'll be writing the whole rectangle from our
1849 * temporary buffer back out.
1850 */
1851 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1852 uint32_t *packed_map = map->ptr;
1853 uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
1854 uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
1855 unsigned int s_image_x, s_image_y;
1856 unsigned int z_image_x, z_image_y;
1857
1858 intel_miptree_get_image_offset(s_mt, level, slice,
1859 &s_image_x, &s_image_y);
1860 intel_miptree_get_image_offset(z_mt, level, slice,
1861 &z_image_x, &z_image_y);
1862
1863 for (uint32_t y = 0; y < map->h; y++) {
1864 for (uint32_t x = 0; x < map->w; x++) {
1865 int map_x = map->x + x, map_y = map->y + y;
1866 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1867 map_x + s_image_x,
1868 map_y + s_image_y,
1869 intel->has_swizzling);
1870 ptrdiff_t z_offset = ((map_y + z_image_y) *
1871 (z_mt->region->pitch / 4) +
1872 (map_x + z_image_x));
1873 uint8_t s = s_map[s_offset];
1874 uint32_t z = z_map[z_offset];
1875
1876 if (map_z32f_x24s8) {
1877 packed_map[(y * map->w + x) * 2 + 0] = z;
1878 packed_map[(y * map->w + x) * 2 + 1] = s;
1879 } else {
1880 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
1881 }
1882 }
1883 }
1884
1885 intel_miptree_unmap_raw(intel, s_mt);
1886 intel_miptree_unmap_raw(intel, z_mt);
1887
1888 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
1889 __FUNCTION__,
1890 map->x, map->y, map->w, map->h,
1891 z_mt, map->x + z_image_x, map->y + z_image_y,
1892 s_mt, map->x + s_image_x, map->y + s_image_y,
1893 map->ptr, map->stride);
1894 } else {
1895 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1896 map->x, map->y, map->w, map->h,
1897 mt, map->ptr, map->stride);
1898 }
1899 }
1900
1901 static void
1902 intel_miptree_unmap_depthstencil(struct intel_context *intel,
1903 struct intel_mipmap_tree *mt,
1904 struct intel_miptree_map *map,
1905 unsigned int level,
1906 unsigned int slice)
1907 {
1908 struct intel_mipmap_tree *z_mt = mt;
1909 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1910 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1911
1912 if (map->mode & GL_MAP_WRITE_BIT) {
1913 uint32_t *packed_map = map->ptr;
1914 uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
1915 uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
1916 unsigned int s_image_x, s_image_y;
1917 unsigned int z_image_x, z_image_y;
1918
1919 intel_miptree_get_image_offset(s_mt, level, slice,
1920 &s_image_x, &s_image_y);
1921 intel_miptree_get_image_offset(z_mt, level, slice,
1922 &z_image_x, &z_image_y);
1923
1924 for (uint32_t y = 0; y < map->h; y++) {
1925 for (uint32_t x = 0; x < map->w; x++) {
1926 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1927 x + s_image_x + map->x,
1928 y + s_image_y + map->y,
1929 intel->has_swizzling);
1930 ptrdiff_t z_offset = ((y + z_image_y) *
1931 (z_mt->region->pitch / 4) +
1932 (x + z_image_x));
1933
1934 if (map_z32f_x24s8) {
1935 z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
1936 s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
1937 } else {
1938 uint32_t packed = packed_map[y * map->w + x];
1939 s_map[s_offset] = packed >> 24;
1940 z_map[z_offset] = packed;
1941 }
1942 }
1943 }
1944
1945 intel_miptree_unmap_raw(intel, s_mt);
1946 intel_miptree_unmap_raw(intel, z_mt);
1947
1948 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1949 __FUNCTION__,
1950 map->x, map->y, map->w, map->h,
1951 z_mt, _mesa_get_format_name(z_mt->format),
1952 map->x + z_image_x, map->y + z_image_y,
1953 s_mt, map->x + s_image_x, map->y + s_image_y,
1954 map->ptr, map->stride);
1955 }
1956
1957 free(map->buffer);
1958 }
1959
1960 /**
1961 * Create and attach a map to the miptree at (level, slice). Return the
1962 * attached map.
1963 */
1964 static struct intel_miptree_map*
1965 intel_miptree_attach_map(struct intel_mipmap_tree *mt,
1966 unsigned int level,
1967 unsigned int slice,
1968 unsigned int x,
1969 unsigned int y,
1970 unsigned int w,
1971 unsigned int h,
1972 GLbitfield mode)
1973 {
1974 struct intel_miptree_map *map = calloc(1, sizeof(*map));
1975
1976 if (!map)
1977 return NULL;
1978
1979 assert(mt->level[level].slice[slice].map == NULL);
1980 mt->level[level].slice[slice].map = map;
1981
1982 map->mode = mode;
1983 map->x = x;
1984 map->y = y;
1985 map->w = w;
1986 map->h = h;
1987
1988 return map;
1989 }
1990
1991 /**
1992 * Release the map at (level, slice).
1993 */
1994 static void
1995 intel_miptree_release_map(struct intel_mipmap_tree *mt,
1996 unsigned int level,
1997 unsigned int slice)
1998 {
1999 struct intel_miptree_map **map;
2000
2001 map = &mt->level[level].slice[slice].map;
2002 free(*map);
2003 *map = NULL;
2004 }
2005
2006 static void
2007 intel_miptree_map_singlesample(struct intel_context *intel,
2008 struct intel_mipmap_tree *mt,
2009 unsigned int level,
2010 unsigned int slice,
2011 unsigned int x,
2012 unsigned int y,
2013 unsigned int w,
2014 unsigned int h,
2015 GLbitfield mode,
2016 void **out_ptr,
2017 int *out_stride)
2018 {
2019 struct intel_miptree_map *map;
2020
2021 assert(mt->num_samples <= 1);
2022
2023 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
2024 if (!map){
2025 *out_ptr = NULL;
2026 *out_stride = 0;
2027 return;
2028 }
2029
2030 intel_miptree_slice_resolve_depth(intel, mt, level, slice);
2031 if (map->mode & GL_MAP_WRITE_BIT) {
2032 intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
2033 }
2034
2035 if (mt->format == MESA_FORMAT_S8) {
2036 intel_miptree_map_s8(intel, mt, map, level, slice);
2037 } else if (mt->etc_format != MESA_FORMAT_NONE &&
2038 !(mode & BRW_MAP_DIRECT_BIT)) {
2039 intel_miptree_map_etc(intel, mt, map, level, slice);
2040 } else if (mt->stencil_mt && !(mode & BRW_MAP_DIRECT_BIT)) {
2041 intel_miptree_map_depthstencil(intel, mt, map, level, slice);
2042 }
2043 /* See intel_miptree_blit() for details on the 32k pitch limit. */
2044 else if (intel->has_llc &&
2045 !(mode & GL_MAP_WRITE_BIT) &&
2046 !mt->compressed &&
2047 (mt->region->tiling == I915_TILING_X ||
2048 (intel->gen >= 6 && mt->region->tiling == I915_TILING_Y)) &&
2049 mt->region->pitch < 32768) {
2050 intel_miptree_map_blit(intel, mt, map, level, slice);
2051 } else if (mt->region->tiling != I915_TILING_NONE &&
2052 mt->region->bo->size >= intel->max_gtt_map_object_size) {
2053 assert(mt->region->pitch < 32768);
2054 intel_miptree_map_blit(intel, mt, map, level, slice);
2055 } else {
2056 intel_miptree_map_gtt(intel, mt, map, level, slice);
2057 }
2058
2059 *out_ptr = map->ptr;
2060 *out_stride = map->stride;
2061
2062 if (map->ptr == NULL)
2063 intel_miptree_release_map(mt, level, slice);
2064 }
2065
2066 static void
2067 intel_miptree_unmap_singlesample(struct intel_context *intel,
2068 struct intel_mipmap_tree *mt,
2069 unsigned int level,
2070 unsigned int slice)
2071 {
2072 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
2073
2074 assert(mt->num_samples <= 1);
2075
2076 if (!map)
2077 return;
2078
2079 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
2080 mt, _mesa_get_format_name(mt->format), level, slice);
2081
2082 if (mt->format == MESA_FORMAT_S8) {
2083 intel_miptree_unmap_s8(intel, mt, map, level, slice);
2084 } else if (mt->etc_format != MESA_FORMAT_NONE &&
2085 !(map->mode & BRW_MAP_DIRECT_BIT)) {
2086 intel_miptree_unmap_etc(intel, mt, map, level, slice);
2087 } else if (mt->stencil_mt && !(map->mode & BRW_MAP_DIRECT_BIT)) {
2088 intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
2089 } else if (map->mt) {
2090 intel_miptree_unmap_blit(intel, mt, map, level, slice);
2091 } else {
2092 intel_miptree_unmap_gtt(intel, mt, map, level, slice);
2093 }
2094
2095 intel_miptree_release_map(mt, level, slice);
2096 }
2097
2098 static void
2099 intel_miptree_map_multisample(struct intel_context *intel,
2100 struct intel_mipmap_tree *mt,
2101 unsigned int level,
2102 unsigned int slice,
2103 unsigned int x,
2104 unsigned int y,
2105 unsigned int w,
2106 unsigned int h,
2107 GLbitfield mode,
2108 void **out_ptr,
2109 int *out_stride)
2110 {
2111 struct intel_miptree_map *map;
2112
2113 assert(mt->num_samples > 1);
2114
2115 /* Only flat, renderbuffer-like miptrees are supported. */
2116 if (mt->target != GL_TEXTURE_2D ||
2117 mt->first_level != 0 ||
2118 mt->last_level != 0) {
2119 _mesa_problem(&intel->ctx, "attempt to map a multisample miptree for "
2120 "which (target, first_level, last_level != "
2121 "(GL_TEXTURE_2D, 0, 0)");
2122 goto fail;
2123 }
2124
2125 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
2126 if (!map)
2127 goto fail;
2128
2129 if (!mt->singlesample_mt) {
2130 mt->singlesample_mt =
2131 intel_miptree_create_for_renderbuffer(intel,
2132 mt->format,
2133 mt->logical_width0,
2134 mt->logical_height0,
2135 0 /*num_samples*/);
2136 if (!mt->singlesample_mt)
2137 goto fail;
2138
2139 map->singlesample_mt_is_tmp = true;
2140 mt->need_downsample = true;
2141 }
2142
2143 intel_miptree_downsample(intel, mt);
2144 intel_miptree_map_singlesample(intel, mt->singlesample_mt,
2145 level, slice,
2146 x, y, w, h,
2147 mode,
2148 out_ptr, out_stride);
2149 return;
2150
2151 fail:
2152 intel_miptree_release_map(mt, level, slice);
2153 *out_ptr = NULL;
2154 *out_stride = 0;
2155 }
2156
2157 static void
2158 intel_miptree_unmap_multisample(struct intel_context *intel,
2159 struct intel_mipmap_tree *mt,
2160 unsigned int level,
2161 unsigned int slice)
2162 {
2163 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
2164
2165 assert(mt->num_samples > 1);
2166
2167 if (!map)
2168 return;
2169
2170 intel_miptree_unmap_singlesample(intel, mt->singlesample_mt, level, slice);
2171
2172 mt->need_downsample = false;
2173 if (map->mode & GL_MAP_WRITE_BIT)
2174 intel_miptree_upsample(intel, mt);
2175
2176 if (map->singlesample_mt_is_tmp)
2177 intel_miptree_release(&mt->singlesample_mt);
2178
2179 intel_miptree_release_map(mt, level, slice);
2180 }
2181
2182 void
2183 intel_miptree_map(struct intel_context *intel,
2184 struct intel_mipmap_tree *mt,
2185 unsigned int level,
2186 unsigned int slice,
2187 unsigned int x,
2188 unsigned int y,
2189 unsigned int w,
2190 unsigned int h,
2191 GLbitfield mode,
2192 void **out_ptr,
2193 int *out_stride)
2194 {
2195 if (mt->num_samples <= 1)
2196 intel_miptree_map_singlesample(intel, mt,
2197 level, slice,
2198 x, y, w, h,
2199 mode,
2200 out_ptr, out_stride);
2201 else
2202 intel_miptree_map_multisample(intel, mt,
2203 level, slice,
2204 x, y, w, h,
2205 mode,
2206 out_ptr, out_stride);
2207 }
2208
2209 void
2210 intel_miptree_unmap(struct intel_context *intel,
2211 struct intel_mipmap_tree *mt,
2212 unsigned int level,
2213 unsigned int slice)
2214 {
2215 if (mt->num_samples <= 1)
2216 intel_miptree_unmap_singlesample(intel, mt, level, slice);
2217 else
2218 intel_miptree_unmap_multisample(intel, mt, level, slice);
2219 }