intel: Conditionally compile mcs-related code for i965 only.
[mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <GL/gl.h>
29 #include <GL/internal/dri_interface.h>
30
31 #include "intel_batchbuffer.h"
32 #include "intel_chipset.h"
33 #include "intel_context.h"
34 #include "intel_mipmap_tree.h"
35 #include "intel_regions.h"
36 #include "intel_resolve_map.h"
37 #include "intel_tex_layout.h"
38 #include "intel_tex.h"
39 #include "intel_blit.h"
40
41 #ifndef I915
42 #include "brw_blorp.h"
43 #endif
44
45 #include "main/enums.h"
46 #include "main/formats.h"
47 #include "main/glformats.h"
48 #include "main/texcompress_etc.h"
49 #include "main/teximage.h"
50
51 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
52
53 static GLenum
54 target_to_target(GLenum target)
55 {
56 switch (target) {
57 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
58 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
59 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
60 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
61 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
62 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
63 return GL_TEXTURE_CUBE_MAP_ARB;
64 default:
65 return target;
66 }
67 }
68
69
70 /**
71 * Determine which MSAA layout should be used by the MSAA surface being
72 * created, based on the chip generation and the surface type.
73 */
74 static enum intel_msaa_layout
75 compute_msaa_layout(struct intel_context *intel, gl_format format, GLenum target)
76 {
77 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
78 if (intel->gen < 7)
79 return INTEL_MSAA_LAYOUT_IMS;
80
81 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
82 switch (_mesa_get_format_base_format(format)) {
83 case GL_DEPTH_COMPONENT:
84 case GL_STENCIL_INDEX:
85 case GL_DEPTH_STENCIL:
86 return INTEL_MSAA_LAYOUT_IMS;
87 default:
88 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
89 *
90 * This field must be set to 0 for all SINT MSRTs when all RT channels
91 * are not written
92 *
93 * In practice this means that we have to disable MCS for all signed
94 * integer MSAA buffers. The alternative, to disable MCS only when one
95 * of the render target channels is disabled, is impractical because it
96 * would require converting between CMS and UMS MSAA layouts on the fly,
97 * which is expensive.
98 */
99 if (_mesa_get_format_datatype(format) == GL_INT) {
100 /* TODO: is this workaround needed for future chipsets? */
101 assert(intel->gen == 7);
102 return INTEL_MSAA_LAYOUT_UMS;
103 } else {
104 /* For now, if we're going to be texturing from this surface,
105 * force UMS, so that the shader doesn't have to do different things
106 * based on whether there's a multisample control surface needing sampled first.
107 * We can't just blindly read the MCS surface in all cases because:
108 *
109 * From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
110 *
111 * If this field is disabled and the sampling engine <ld_mcs> message
112 * is issued on this surface, the MCS surface may be accessed. Software
113 * must ensure that the surface is defined to avoid GTT errors.
114 */
115 if (target == GL_TEXTURE_2D_MULTISAMPLE ||
116 target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
117 return INTEL_MSAA_LAYOUT_UMS;
118 } else {
119 return INTEL_MSAA_LAYOUT_CMS;
120 }
121 }
122 }
123 }
124
125
126 /**
127 * @param for_bo Indicates that the caller is
128 * intel_miptree_create_for_bo(). If true, then do not create
129 * \c stencil_mt.
130 */
131 struct intel_mipmap_tree *
132 intel_miptree_create_layout(struct intel_context *intel,
133 GLenum target,
134 gl_format format,
135 GLuint first_level,
136 GLuint last_level,
137 GLuint width0,
138 GLuint height0,
139 GLuint depth0,
140 bool for_bo,
141 GLuint num_samples)
142 {
143 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
144
145 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
146 _mesa_lookup_enum_by_nr(target),
147 _mesa_get_format_name(format),
148 first_level, last_level, mt);
149
150 mt->target = target_to_target(target);
151 mt->format = format;
152 mt->first_level = first_level;
153 mt->last_level = last_level;
154 mt->logical_width0 = width0;
155 mt->logical_height0 = height0;
156 mt->logical_depth0 = depth0;
157
158 /* The cpp is bytes per (1, blockheight)-sized block for compressed
159 * textures. This is why you'll see divides by blockheight all over
160 */
161 unsigned bw, bh;
162 _mesa_get_format_block_size(format, &bw, &bh);
163 assert(_mesa_get_format_bytes(mt->format) % bw == 0);
164 mt->cpp = _mesa_get_format_bytes(mt->format) / bw;
165
166 mt->num_samples = num_samples;
167 mt->compressed = _mesa_is_format_compressed(format);
168 mt->msaa_layout = INTEL_MSAA_LAYOUT_NONE;
169 mt->refcount = 1;
170
171 if (num_samples > 1) {
172 /* Adjust width/height/depth for MSAA */
173 mt->msaa_layout = compute_msaa_layout(intel, format, mt->target);
174 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
175 /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
176 *
177 * "Any of the other messages (sample*, LOD, load4) used with a
178 * (4x) multisampled surface will in-effect sample a surface with
179 * double the height and width as that indicated in the surface
180 * state. Each pixel position on the original-sized surface is
181 * replaced with a 2x2 of samples with the following arrangement:
182 *
183 * sample 0 sample 2
184 * sample 1 sample 3"
185 *
186 * Thus, when sampling from a multisampled texture, it behaves as
187 * though the layout in memory for (x,y,sample) is:
188 *
189 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
190 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
191 *
192 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
193 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
194 *
195 * However, the actual layout of multisampled data in memory is:
196 *
197 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
198 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
199 *
200 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
201 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
202 *
203 * This pattern repeats for each 2x2 pixel block.
204 *
205 * As a result, when calculating the size of our 4-sample buffer for
206 * an odd width or height, we have to align before scaling up because
207 * sample 3 is in that bottom right 2x2 block.
208 */
209 switch (num_samples) {
210 case 4:
211 width0 = ALIGN(width0, 2) * 2;
212 height0 = ALIGN(height0, 2) * 2;
213 break;
214 case 8:
215 width0 = ALIGN(width0, 2) * 4;
216 height0 = ALIGN(height0, 2) * 2;
217 break;
218 default:
219 /* num_samples should already have been quantized to 0, 1, 4, or
220 * 8.
221 */
222 assert(false);
223 }
224 } else {
225 /* Non-interleaved */
226 depth0 *= num_samples;
227 }
228 }
229
230 /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we
231 * use it elsewhere?
232 */
233 switch (mt->msaa_layout) {
234 case INTEL_MSAA_LAYOUT_NONE:
235 case INTEL_MSAA_LAYOUT_IMS:
236 mt->array_spacing_lod0 = false;
237 break;
238 case INTEL_MSAA_LAYOUT_UMS:
239 case INTEL_MSAA_LAYOUT_CMS:
240 mt->array_spacing_lod0 = true;
241 break;
242 }
243
244 if (target == GL_TEXTURE_CUBE_MAP) {
245 assert(depth0 == 1);
246 depth0 = 6;
247 }
248
249 mt->physical_width0 = width0;
250 mt->physical_height0 = height0;
251 mt->physical_depth0 = depth0;
252
253 if (!for_bo &&
254 _mesa_get_format_base_format(format) == GL_DEPTH_STENCIL &&
255 (intel->must_use_separate_stencil ||
256 (intel->has_separate_stencil &&
257 intel->vtbl.is_hiz_depth_format(intel, format)))) {
258 mt->stencil_mt = intel_miptree_create(intel,
259 mt->target,
260 MESA_FORMAT_S8,
261 mt->first_level,
262 mt->last_level,
263 mt->logical_width0,
264 mt->logical_height0,
265 mt->logical_depth0,
266 true,
267 num_samples,
268 INTEL_MIPTREE_TILING_ANY);
269 if (!mt->stencil_mt) {
270 intel_miptree_release(&mt);
271 return NULL;
272 }
273
274 /* Fix up the Z miptree format for how we're splitting out separate
275 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
276 */
277 if (mt->format == MESA_FORMAT_S8_Z24) {
278 mt->format = MESA_FORMAT_X8_Z24;
279 } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
280 mt->format = MESA_FORMAT_Z32_FLOAT;
281 mt->cpp = 4;
282 } else {
283 _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
284 _mesa_get_format_name(mt->format));
285 }
286 }
287
288 intel_get_texture_alignment_unit(intel, mt->format,
289 &mt->align_w, &mt->align_h);
290
291 #ifdef I915
292 (void) intel;
293 if (intel->is_945)
294 i945_miptree_layout(mt);
295 else
296 i915_miptree_layout(mt);
297 #else
298 brw_miptree_layout(intel, mt);
299 #endif
300
301 return mt;
302 }
303
304 /**
305 * \brief Helper function for intel_miptree_create().
306 */
307 static uint32_t
308 intel_miptree_choose_tiling(struct intel_context *intel,
309 gl_format format,
310 uint32_t width0,
311 uint32_t num_samples,
312 enum intel_miptree_tiling_mode requested,
313 struct intel_mipmap_tree *mt)
314 {
315
316 if (format == MESA_FORMAT_S8) {
317 /* The stencil buffer is W tiled. However, we request from the kernel a
318 * non-tiled buffer because the GTT is incapable of W fencing.
319 */
320 return I915_TILING_NONE;
321 }
322
323 /* Some usages may want only one type of tiling, like depth miptrees (Y
324 * tiled), or temporary BOs for uploading data once (linear).
325 */
326 switch (requested) {
327 case INTEL_MIPTREE_TILING_ANY:
328 break;
329 case INTEL_MIPTREE_TILING_Y:
330 return I915_TILING_Y;
331 case INTEL_MIPTREE_TILING_NONE:
332 return I915_TILING_NONE;
333 }
334
335 if (num_samples > 1) {
336 /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
337 * Surface"):
338 *
339 * [DevSNB+]: For multi-sample render targets, this field must be
340 * 1. MSRTs can only be tiled.
341 *
342 * Our usual reason for preferring X tiling (fast blits using the
343 * blitting engine) doesn't apply to MSAA, since we'll generally be
344 * downsampling or upsampling when blitting between the MSAA buffer
345 * and another buffer, and the blitting engine doesn't support that.
346 * So use Y tiling, since it makes better use of the cache.
347 */
348 return I915_TILING_Y;
349 }
350
351 GLenum base_format = _mesa_get_format_base_format(format);
352 if (intel->gen >= 4 &&
353 (base_format == GL_DEPTH_COMPONENT ||
354 base_format == GL_DEPTH_STENCIL_EXT))
355 return I915_TILING_Y;
356
357 int minimum_pitch = mt->total_width * mt->cpp;
358
359 /* If the width is much smaller than a tile, don't bother tiling. */
360 if (minimum_pitch < 64)
361 return I915_TILING_NONE;
362
363 if (ALIGN(minimum_pitch, 512) >= 32768) {
364 perf_debug("%dx%d miptree too large to blit, falling back to untiled",
365 mt->total_width, mt->total_height);
366 return I915_TILING_NONE;
367 }
368
369 /* Pre-gen6 doesn't have BLORP to handle Y-tiling, so use X-tiling. */
370 if (intel->gen < 6)
371 return I915_TILING_X;
372
373 return I915_TILING_Y | I915_TILING_X;
374 }
375
376 struct intel_mipmap_tree *
377 intel_miptree_create(struct intel_context *intel,
378 GLenum target,
379 gl_format format,
380 GLuint first_level,
381 GLuint last_level,
382 GLuint width0,
383 GLuint height0,
384 GLuint depth0,
385 bool expect_accelerated_upload,
386 GLuint num_samples,
387 enum intel_miptree_tiling_mode requested_tiling)
388 {
389 struct intel_mipmap_tree *mt;
390 gl_format tex_format = format;
391 gl_format etc_format = MESA_FORMAT_NONE;
392 GLuint total_width, total_height;
393
394 if (!intel->is_baytrail) {
395 switch (format) {
396 case MESA_FORMAT_ETC1_RGB8:
397 format = MESA_FORMAT_RGBX8888_REV;
398 break;
399 case MESA_FORMAT_ETC2_RGB8:
400 format = MESA_FORMAT_RGBX8888_REV;
401 break;
402 case MESA_FORMAT_ETC2_SRGB8:
403 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC:
404 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1:
405 format = MESA_FORMAT_SARGB8;
406 break;
407 case MESA_FORMAT_ETC2_RGBA8_EAC:
408 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1:
409 format = MESA_FORMAT_RGBA8888_REV;
410 break;
411 case MESA_FORMAT_ETC2_R11_EAC:
412 format = MESA_FORMAT_R16;
413 break;
414 case MESA_FORMAT_ETC2_SIGNED_R11_EAC:
415 format = MESA_FORMAT_SIGNED_R16;
416 break;
417 case MESA_FORMAT_ETC2_RG11_EAC:
418 format = MESA_FORMAT_GR1616;
419 break;
420 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC:
421 format = MESA_FORMAT_SIGNED_GR1616;
422 break;
423 default:
424 /* Non ETC1 / ETC2 format */
425 break;
426 }
427 }
428
429 etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
430
431 mt = intel_miptree_create_layout(intel, target, format,
432 first_level, last_level, width0,
433 height0, depth0,
434 false, num_samples);
435 /*
436 * pitch == 0 || height == 0 indicates the null texture
437 */
438 if (!mt || !mt->total_width || !mt->total_height) {
439 intel_miptree_release(&mt);
440 return NULL;
441 }
442
443 total_width = mt->total_width;
444 total_height = mt->total_height;
445
446 if (format == MESA_FORMAT_S8) {
447 /* Align to size of W tile, 64x64. */
448 total_width = ALIGN(total_width, 64);
449 total_height = ALIGN(total_height, 64);
450 }
451
452 uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
453 num_samples, requested_tiling,
454 mt);
455 bool y_or_x = tiling == (I915_TILING_Y | I915_TILING_X);
456
457 mt->etc_format = etc_format;
458 mt->region = intel_region_alloc(intel->intelScreen,
459 y_or_x ? I915_TILING_Y : tiling,
460 mt->cpp,
461 total_width,
462 total_height,
463 expect_accelerated_upload);
464
465 /* If the region is too large to fit in the aperture, we need to use the
466 * BLT engine to support it. The BLT paths can't currently handle Y-tiling,
467 * so we need to fall back to X.
468 */
469 if (y_or_x && mt->region->bo->size >= intel->max_gtt_map_object_size) {
470 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
471 mt->total_width, mt->total_height);
472 intel_region_release(&mt->region);
473
474 mt->region = intel_region_alloc(intel->intelScreen,
475 I915_TILING_X,
476 mt->cpp,
477 total_width,
478 total_height,
479 expect_accelerated_upload);
480 }
481
482 mt->offset = 0;
483
484 if (!mt->region) {
485 intel_miptree_release(&mt);
486 return NULL;
487 }
488
489 return mt;
490 }
491
492 struct intel_mipmap_tree *
493 intel_miptree_create_for_bo(struct intel_context *intel,
494 drm_intel_bo *bo,
495 gl_format format,
496 uint32_t offset,
497 uint32_t width,
498 uint32_t height,
499 int pitch,
500 uint32_t tiling)
501 {
502 struct intel_mipmap_tree *mt;
503
504 struct intel_region *region = calloc(1, sizeof(*region));
505 if (!region)
506 return NULL;
507
508 /* Nothing will be able to use this miptree with the BO if the offset isn't
509 * aligned.
510 */
511 if (tiling != I915_TILING_NONE)
512 assert(offset % 4096 == 0);
513
514 /* miptrees can't handle negative pitch. If you need flipping of images,
515 * that's outside of the scope of the mt.
516 */
517 assert(pitch >= 0);
518
519 mt = intel_miptree_create_layout(intel, GL_TEXTURE_2D, format,
520 0, 0,
521 width, height, 1,
522 true, 0 /* num_samples */);
523 if (!mt)
524 return mt;
525
526 region->cpp = mt->cpp;
527 region->width = width;
528 region->height = height;
529 region->pitch = pitch;
530 region->refcount = 1;
531 drm_intel_bo_reference(bo);
532 region->bo = bo;
533 region->tiling = tiling;
534
535 mt->region = region;
536 mt->offset = offset;
537
538 return mt;
539 }
540
541
542 /**
543 * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
544 *
545 * For a multisample DRI2 buffer, this wraps the given region with
546 * a singlesample miptree, then creates a multisample miptree into which the
547 * singlesample miptree is embedded as a child.
548 */
549 struct intel_mipmap_tree*
550 intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
551 unsigned dri_attachment,
552 gl_format format,
553 uint32_t num_samples,
554 struct intel_region *region)
555 {
556 struct intel_mipmap_tree *singlesample_mt = NULL;
557 struct intel_mipmap_tree *multisample_mt = NULL;
558
559 /* Only the front and back buffers, which are color buffers, are shared
560 * through DRI2.
561 */
562 assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
563 dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
564 dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
565 assert(_mesa_get_format_base_format(format) == GL_RGB ||
566 _mesa_get_format_base_format(format) == GL_RGBA);
567
568 singlesample_mt = intel_miptree_create_for_bo(intel,
569 region->bo,
570 format,
571 0,
572 region->width,
573 region->height,
574 region->pitch,
575 region->tiling);
576 if (!singlesample_mt)
577 return NULL;
578 singlesample_mt->region->name = region->name;
579
580 if (num_samples == 0)
581 return singlesample_mt;
582
583 multisample_mt = intel_miptree_create_for_renderbuffer(intel,
584 format,
585 region->width,
586 region->height,
587 num_samples);
588 if (!multisample_mt) {
589 intel_miptree_release(&singlesample_mt);
590 return NULL;
591 }
592
593 multisample_mt->singlesample_mt = singlesample_mt;
594 multisample_mt->need_downsample = false;
595
596 if (intel->is_front_buffer_rendering &&
597 (dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
598 dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT)) {
599 intel_miptree_upsample(intel, multisample_mt);
600 }
601
602 return multisample_mt;
603 }
604
605 struct intel_mipmap_tree*
606 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
607 gl_format format,
608 uint32_t width,
609 uint32_t height,
610 uint32_t num_samples)
611 {
612 struct intel_mipmap_tree *mt;
613 uint32_t depth = 1;
614 bool ok;
615
616 mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
617 width, height, depth, true, num_samples,
618 INTEL_MIPTREE_TILING_ANY);
619 if (!mt)
620 goto fail;
621
622 if (intel->vtbl.is_hiz_depth_format(intel, format)) {
623 ok = intel_miptree_alloc_hiz(intel, mt);
624 if (!ok)
625 goto fail;
626 }
627
628 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
629 ok = intel_miptree_alloc_mcs(intel, mt, num_samples);
630 if (!ok)
631 goto fail;
632 }
633
634 return mt;
635
636 fail:
637 intel_miptree_release(&mt);
638 return NULL;
639 }
640
641 void
642 intel_miptree_reference(struct intel_mipmap_tree **dst,
643 struct intel_mipmap_tree *src)
644 {
645 if (*dst == src)
646 return;
647
648 intel_miptree_release(dst);
649
650 if (src) {
651 src->refcount++;
652 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
653 }
654
655 *dst = src;
656 }
657
658
659 void
660 intel_miptree_release(struct intel_mipmap_tree **mt)
661 {
662 if (!*mt)
663 return;
664
665 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
666 if (--(*mt)->refcount <= 0) {
667 GLuint i;
668
669 DBG("%s deleting %p\n", __FUNCTION__, *mt);
670
671 intel_region_release(&((*mt)->region));
672 intel_miptree_release(&(*mt)->stencil_mt);
673 intel_miptree_release(&(*mt)->hiz_mt);
674 #ifndef I915
675 intel_miptree_release(&(*mt)->mcs_mt);
676 #endif
677 intel_miptree_release(&(*mt)->singlesample_mt);
678 intel_resolve_map_clear(&(*mt)->hiz_map);
679
680 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
681 free((*mt)->level[i].slice);
682 }
683
684 free(*mt);
685 }
686 *mt = NULL;
687 }
688
689 void
690 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
691 int *width, int *height, int *depth)
692 {
693 switch (image->TexObject->Target) {
694 case GL_TEXTURE_1D_ARRAY:
695 *width = image->Width;
696 *height = 1;
697 *depth = image->Height;
698 break;
699 default:
700 *width = image->Width;
701 *height = image->Height;
702 *depth = image->Depth;
703 break;
704 }
705 }
706
707 /**
708 * Can the image be pulled into a unified mipmap tree? This mirrors
709 * the completeness test in a lot of ways.
710 *
711 * Not sure whether I want to pass gl_texture_image here.
712 */
713 bool
714 intel_miptree_match_image(struct intel_mipmap_tree *mt,
715 struct gl_texture_image *image)
716 {
717 struct intel_texture_image *intelImage = intel_texture_image(image);
718 GLuint level = intelImage->base.Base.Level;
719 int width, height, depth;
720
721 /* glTexImage* choose the texture object based on the target passed in, and
722 * objects can't change targets over their lifetimes, so this should be
723 * true.
724 */
725 assert(target_to_target(image->TexObject->Target) == mt->target);
726
727 gl_format mt_format = mt->format;
728 if (mt->format == MESA_FORMAT_X8_Z24 && mt->stencil_mt)
729 mt_format = MESA_FORMAT_S8_Z24;
730 if (mt->format == MESA_FORMAT_Z32_FLOAT && mt->stencil_mt)
731 mt_format = MESA_FORMAT_Z32_FLOAT_X24S8;
732 if (mt->etc_format != MESA_FORMAT_NONE)
733 mt_format = mt->etc_format;
734
735 if (image->TexFormat != mt_format)
736 return false;
737
738 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
739
740 if (mt->target == GL_TEXTURE_CUBE_MAP)
741 depth = 6;
742
743 /* Test image dimensions against the base level image adjusted for
744 * minification. This will also catch images not present in the
745 * tree, changed targets, etc.
746 */
747 if (mt->target == GL_TEXTURE_2D_MULTISAMPLE ||
748 mt->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
749 /* nonzero level here is always bogus */
750 assert(level == 0);
751
752 if (width != mt->logical_width0 ||
753 height != mt->logical_height0 ||
754 depth != mt->logical_depth0) {
755 return false;
756 }
757 }
758 else {
759 /* all normal textures, renderbuffers, etc */
760 if (width != mt->level[level].width ||
761 height != mt->level[level].height ||
762 depth != mt->level[level].depth) {
763 return false;
764 }
765 }
766
767 if (image->NumSamples != mt->num_samples)
768 return false;
769
770 return true;
771 }
772
773
774 void
775 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
776 GLuint level,
777 GLuint x, GLuint y,
778 GLuint w, GLuint h, GLuint d)
779 {
780 mt->level[level].width = w;
781 mt->level[level].height = h;
782 mt->level[level].depth = d;
783 mt->level[level].level_x = x;
784 mt->level[level].level_y = y;
785
786 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
787 level, w, h, d, x, y);
788
789 assert(mt->level[level].slice == NULL);
790
791 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
792 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
793 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
794 }
795
796
797 void
798 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
799 GLuint level, GLuint img,
800 GLuint x, GLuint y)
801 {
802 if (img == 0 && level == 0)
803 assert(x == 0 && y == 0);
804
805 assert(img < mt->level[level].depth);
806
807 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
808 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
809
810 DBG("%s level %d img %d pos %d,%d\n",
811 __FUNCTION__, level, img,
812 mt->level[level].slice[img].x_offset,
813 mt->level[level].slice[img].y_offset);
814 }
815
816 void
817 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
818 GLuint level, GLuint slice,
819 GLuint *x, GLuint *y)
820 {
821 assert(slice < mt->level[level].depth);
822
823 *x = mt->level[level].slice[slice].x_offset;
824 *y = mt->level[level].slice[slice].y_offset;
825 }
826
827 /**
828 * Rendering with tiled buffers requires that the base address of the buffer
829 * be aligned to a page boundary. For renderbuffers, and sometimes with
830 * textures, we may want the surface to point at a texture image level that
831 * isn't at a page boundary.
832 *
833 * This function returns an appropriately-aligned base offset
834 * according to the tiling restrictions, plus any required x/y offset
835 * from there.
836 */
837 uint32_t
838 intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
839 GLuint level, GLuint slice,
840 uint32_t *tile_x,
841 uint32_t *tile_y)
842 {
843 struct intel_region *region = mt->region;
844 uint32_t x, y;
845 uint32_t mask_x, mask_y;
846
847 intel_region_get_tile_masks(region, &mask_x, &mask_y, false);
848 intel_miptree_get_image_offset(mt, level, slice, &x, &y);
849
850 *tile_x = x & mask_x;
851 *tile_y = y & mask_y;
852
853 return intel_region_get_aligned_offset(region, x & ~mask_x, y & ~mask_y,
854 false);
855 }
856
857 static void
858 intel_miptree_copy_slice_sw(struct intel_context *intel,
859 struct intel_mipmap_tree *dst_mt,
860 struct intel_mipmap_tree *src_mt,
861 int level,
862 int slice,
863 int width,
864 int height)
865 {
866 void *src, *dst;
867 int src_stride, dst_stride;
868 int cpp = dst_mt->cpp;
869
870 intel_miptree_map(intel, src_mt,
871 level, slice,
872 0, 0,
873 width, height,
874 GL_MAP_READ_BIT | BRW_MAP_DIRECT_BIT,
875 &src, &src_stride);
876
877 intel_miptree_map(intel, dst_mt,
878 level, slice,
879 0, 0,
880 width, height,
881 GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT |
882 BRW_MAP_DIRECT_BIT,
883 &dst, &dst_stride);
884
885 DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
886 _mesa_get_format_name(src_mt->format),
887 src_mt, src, src_stride,
888 _mesa_get_format_name(dst_mt->format),
889 dst_mt, dst, dst_stride,
890 width, height);
891
892 int row_size = cpp * width;
893 if (src_stride == row_size &&
894 dst_stride == row_size) {
895 memcpy(dst, src, row_size * height);
896 } else {
897 for (int i = 0; i < height; i++) {
898 memcpy(dst, src, row_size);
899 dst += dst_stride;
900 src += src_stride;
901 }
902 }
903
904 intel_miptree_unmap(intel, dst_mt, level, slice);
905 intel_miptree_unmap(intel, src_mt, level, slice);
906
907 /* Don't forget to copy the stencil data over, too. We could have skipped
908 * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
909 * shuffling the two data sources in/out of temporary storage instead of
910 * the direct mapping we get this way.
911 */
912 if (dst_mt->stencil_mt) {
913 assert(src_mt->stencil_mt);
914 intel_miptree_copy_slice_sw(intel, dst_mt->stencil_mt, src_mt->stencil_mt,
915 level, slice, width, height);
916 }
917 }
918
919 static void
920 intel_miptree_copy_slice(struct intel_context *intel,
921 struct intel_mipmap_tree *dst_mt,
922 struct intel_mipmap_tree *src_mt,
923 int level,
924 int face,
925 int depth)
926
927 {
928 gl_format format = src_mt->format;
929 uint32_t width = src_mt->level[level].width;
930 uint32_t height = src_mt->level[level].height;
931 int slice;
932
933 if (face > 0)
934 slice = face;
935 else
936 slice = depth;
937
938 assert(depth < src_mt->level[level].depth);
939 assert(src_mt->format == dst_mt->format);
940
941 if (dst_mt->compressed) {
942 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
943 width = ALIGN(width, dst_mt->align_w);
944 }
945
946 /* If it's a packed depth/stencil buffer with separate stencil, the blit
947 * below won't apply since we can't do the depth's Y tiling or the
948 * stencil's W tiling in the blitter.
949 */
950 if (src_mt->stencil_mt) {
951 intel_miptree_copy_slice_sw(intel,
952 dst_mt, src_mt,
953 level, slice,
954 width, height);
955 return;
956 }
957
958 uint32_t dst_x, dst_y, src_x, src_y;
959 intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
960 intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
961
962 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
963 _mesa_get_format_name(src_mt->format),
964 src_mt, src_x, src_y, src_mt->region->pitch,
965 _mesa_get_format_name(dst_mt->format),
966 dst_mt, dst_x, dst_y, dst_mt->region->pitch,
967 width, height);
968
969 if (!intel_miptree_blit(intel,
970 src_mt, level, slice, 0, 0, false,
971 dst_mt, level, slice, 0, 0, false,
972 width, height, GL_COPY)) {
973 perf_debug("miptree validate blit for %s failed\n",
974 _mesa_get_format_name(format));
975
976 intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
977 width, height);
978 }
979 }
980
981 /**
982 * Copies the image's current data to the given miptree, and associates that
983 * miptree with the image.
984 *
985 * If \c invalidate is true, then the actual image data does not need to be
986 * copied, but the image still needs to be associated to the new miptree (this
987 * is set to true if we're about to clear the image).
988 */
989 void
990 intel_miptree_copy_teximage(struct intel_context *intel,
991 struct intel_texture_image *intelImage,
992 struct intel_mipmap_tree *dst_mt,
993 bool invalidate)
994 {
995 struct intel_mipmap_tree *src_mt = intelImage->mt;
996 struct intel_texture_object *intel_obj =
997 intel_texture_object(intelImage->base.Base.TexObject);
998 int level = intelImage->base.Base.Level;
999 int face = intelImage->base.Base.Face;
1000 GLuint depth = intelImage->base.Base.Depth;
1001
1002 if (!invalidate) {
1003 for (int slice = 0; slice < depth; slice++) {
1004 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
1005 }
1006 }
1007
1008 intel_miptree_reference(&intelImage->mt, dst_mt);
1009 intel_obj->needs_validate = true;
1010 }
1011
1012 bool
1013 intel_miptree_alloc_mcs(struct intel_context *intel,
1014 struct intel_mipmap_tree *mt,
1015 GLuint num_samples)
1016 {
1017 assert(intel->gen >= 7); /* MCS only used on Gen7+ */
1018 #ifdef I915
1019 return false;
1020 #else
1021 assert(mt->mcs_mt == NULL);
1022
1023 /* Choose the correct format for the MCS buffer. All that really matters
1024 * is that we allocate the right buffer size, since we'll always be
1025 * accessing this miptree using MCS-specific hardware mechanisms, which
1026 * infer the correct format based on num_samples.
1027 */
1028 gl_format format;
1029 switch (num_samples) {
1030 case 4:
1031 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
1032 * each sample).
1033 */
1034 format = MESA_FORMAT_R8;
1035 break;
1036 case 8:
1037 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
1038 * for each sample, plus 8 padding bits).
1039 */
1040 format = MESA_FORMAT_R_UINT32;
1041 break;
1042 default:
1043 assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
1044 return false;
1045 };
1046
1047 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
1048 *
1049 * "The MCS surface must be stored as Tile Y."
1050 */
1051 mt->mcs_mt = intel_miptree_create(intel,
1052 mt->target,
1053 format,
1054 mt->first_level,
1055 mt->last_level,
1056 mt->logical_width0,
1057 mt->logical_height0,
1058 mt->logical_depth0,
1059 true,
1060 0 /* num_samples */,
1061 INTEL_MIPTREE_TILING_Y);
1062
1063 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
1064 *
1065 * When MCS buffer is enabled and bound to MSRT, it is required that it
1066 * is cleared prior to any rendering.
1067 *
1068 * Since we don't use the MCS buffer for any purpose other than rendering,
1069 * it makes sense to just clear it immediately upon allocation.
1070 *
1071 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
1072 */
1073 void *data = intel_miptree_map_raw(intel, mt->mcs_mt);
1074 memset(data, 0xff, mt->mcs_mt->region->bo->size);
1075 intel_miptree_unmap_raw(intel, mt->mcs_mt);
1076
1077 return mt->mcs_mt;
1078 #endif
1079 }
1080
1081 /**
1082 * Helper for intel_miptree_alloc_hiz() that sets
1083 * \c mt->level[level].slice[layer].has_hiz. Return true if and only if
1084 * \c has_hiz was set.
1085 */
1086 static bool
1087 intel_miptree_slice_enable_hiz(struct intel_context *intel,
1088 struct intel_mipmap_tree *mt,
1089 uint32_t level,
1090 uint32_t layer)
1091 {
1092 assert(mt->hiz_mt);
1093
1094 if (intel->is_haswell) {
1095 /* Disable HiZ for some slices to work around a hardware bug.
1096 *
1097 * Haswell hardware fails to respect
1098 * 3DSTATE_DEPTH_BUFFER.Depth_Coordinate_Offset_X/Y when during HiZ
1099 * ambiguate operations. The failure is inconsistent and affected by
1100 * other GPU contexts. Running a heavy GPU workload in a separate
1101 * process causes the failure rate to drop to nearly 0.
1102 *
1103 * To workaround the bug, we enable HiZ only when we can guarantee that
1104 * the Depth Coordinate Offset fields will be set to 0. The function
1105 * brw_get_depthstencil_tile_masks() is used to calculate the fields,
1106 * and the function is sometimes called in such a way that the presence
1107 * of an attached stencil buffer changes the fuction's return value.
1108 *
1109 * The largest tile size considered by brw_get_depthstencil_tile_masks()
1110 * is that of the stencil buffer. Therefore, if this hiz slice's
1111 * corresponding depth slice has an offset that is aligned to the
1112 * stencil buffer tile size, 64x64 pixels, then
1113 * 3DSTATE_DEPTH_BUFFER.Depth_Coordinate_Offset_X/Y is set to 0.
1114 */
1115 uint32_t depth_x_offset = mt->level[level].slice[layer].x_offset;
1116 uint32_t depth_y_offset = mt->level[level].slice[layer].y_offset;
1117 if ((depth_x_offset & 63) || (depth_y_offset & 63)) {
1118 return false;
1119 }
1120 }
1121
1122 mt->level[level].slice[layer].has_hiz = true;
1123 return true;
1124 }
1125
1126
1127
1128 bool
1129 intel_miptree_alloc_hiz(struct intel_context *intel,
1130 struct intel_mipmap_tree *mt)
1131 {
1132 assert(mt->hiz_mt == NULL);
1133 mt->hiz_mt = intel_miptree_create(intel,
1134 mt->target,
1135 mt->format,
1136 mt->first_level,
1137 mt->last_level,
1138 mt->logical_width0,
1139 mt->logical_height0,
1140 mt->logical_depth0,
1141 true,
1142 mt->num_samples,
1143 INTEL_MIPTREE_TILING_ANY);
1144
1145 if (!mt->hiz_mt)
1146 return false;
1147
1148 /* Mark that all slices need a HiZ resolve. */
1149 struct intel_resolve_map *head = &mt->hiz_map;
1150 for (int level = mt->first_level; level <= mt->last_level; ++level) {
1151 for (int layer = 0; layer < mt->level[level].depth; ++layer) {
1152 if (!intel_miptree_slice_enable_hiz(intel, mt, level, layer))
1153 continue;
1154
1155 head->next = malloc(sizeof(*head->next));
1156 head->next->prev = head;
1157 head->next->next = NULL;
1158 head = head->next;
1159
1160 head->level = level;
1161 head->layer = layer;
1162 head->need = GEN6_HIZ_OP_HIZ_RESOLVE;
1163 }
1164 }
1165
1166 return true;
1167 }
1168
1169 /**
1170 * Does the miptree slice have hiz enabled?
1171 */
1172 bool
1173 intel_miptree_slice_has_hiz(struct intel_mipmap_tree *mt,
1174 uint32_t level,
1175 uint32_t layer)
1176 {
1177 intel_miptree_check_level_layer(mt, level, layer);
1178 return mt->level[level].slice[layer].has_hiz;
1179 }
1180
1181 void
1182 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
1183 uint32_t level,
1184 uint32_t layer)
1185 {
1186 if (!intel_miptree_slice_has_hiz(mt, level, layer))
1187 return;
1188
1189 intel_resolve_map_set(&mt->hiz_map,
1190 level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
1191 }
1192
1193
1194 void
1195 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
1196 uint32_t level,
1197 uint32_t layer)
1198 {
1199 if (!intel_miptree_slice_has_hiz(mt, level, layer))
1200 return;
1201
1202 intel_resolve_map_set(&mt->hiz_map,
1203 level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
1204 }
1205
1206 static bool
1207 intel_miptree_slice_resolve(struct intel_context *intel,
1208 struct intel_mipmap_tree *mt,
1209 uint32_t level,
1210 uint32_t layer,
1211 enum gen6_hiz_op need)
1212 {
1213 intel_miptree_check_level_layer(mt, level, layer);
1214
1215 struct intel_resolve_map *item =
1216 intel_resolve_map_get(&mt->hiz_map, level, layer);
1217
1218 if (!item || item->need != need)
1219 return false;
1220
1221 intel_hiz_exec(intel, mt, level, layer, need);
1222 intel_resolve_map_remove(item);
1223 return true;
1224 }
1225
1226 bool
1227 intel_miptree_slice_resolve_hiz(struct intel_context *intel,
1228 struct intel_mipmap_tree *mt,
1229 uint32_t level,
1230 uint32_t layer)
1231 {
1232 return intel_miptree_slice_resolve(intel, mt, level, layer,
1233 GEN6_HIZ_OP_HIZ_RESOLVE);
1234 }
1235
1236 bool
1237 intel_miptree_slice_resolve_depth(struct intel_context *intel,
1238 struct intel_mipmap_tree *mt,
1239 uint32_t level,
1240 uint32_t layer)
1241 {
1242 return intel_miptree_slice_resolve(intel, mt, level, layer,
1243 GEN6_HIZ_OP_DEPTH_RESOLVE);
1244 }
1245
1246 static bool
1247 intel_miptree_all_slices_resolve(struct intel_context *intel,
1248 struct intel_mipmap_tree *mt,
1249 enum gen6_hiz_op need)
1250 {
1251 bool did_resolve = false;
1252 struct intel_resolve_map *i, *next;
1253
1254 for (i = mt->hiz_map.next; i; i = next) {
1255 next = i->next;
1256 if (i->need != need)
1257 continue;
1258
1259 intel_hiz_exec(intel, mt, i->level, i->layer, need);
1260 intel_resolve_map_remove(i);
1261 did_resolve = true;
1262 }
1263
1264 return did_resolve;
1265 }
1266
1267 bool
1268 intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
1269 struct intel_mipmap_tree *mt)
1270 {
1271 return intel_miptree_all_slices_resolve(intel, mt,
1272 GEN6_HIZ_OP_HIZ_RESOLVE);
1273 }
1274
1275 bool
1276 intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
1277 struct intel_mipmap_tree *mt)
1278 {
1279 return intel_miptree_all_slices_resolve(intel, mt,
1280 GEN6_HIZ_OP_DEPTH_RESOLVE);
1281 }
1282
1283 /**
1284 * \brief Get pointer offset into stencil buffer.
1285 *
1286 * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
1287 * must decode the tile's layout in software.
1288 *
1289 * See
1290 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
1291 * Format.
1292 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
1293 *
1294 * Even though the returned offset is always positive, the return type is
1295 * signed due to
1296 * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
1297 * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
1298 */
1299 static intptr_t
1300 intel_offset_S8(uint32_t stride, uint32_t x, uint32_t y, bool swizzled)
1301 {
1302 uint32_t tile_size = 4096;
1303 uint32_t tile_width = 64;
1304 uint32_t tile_height = 64;
1305 uint32_t row_size = 64 * stride;
1306
1307 uint32_t tile_x = x / tile_width;
1308 uint32_t tile_y = y / tile_height;
1309
1310 /* The byte's address relative to the tile's base addres. */
1311 uint32_t byte_x = x % tile_width;
1312 uint32_t byte_y = y % tile_height;
1313
1314 uintptr_t u = tile_y * row_size
1315 + tile_x * tile_size
1316 + 512 * (byte_x / 8)
1317 + 64 * (byte_y / 8)
1318 + 32 * ((byte_y / 4) % 2)
1319 + 16 * ((byte_x / 4) % 2)
1320 + 8 * ((byte_y / 2) % 2)
1321 + 4 * ((byte_x / 2) % 2)
1322 + 2 * (byte_y % 2)
1323 + 1 * (byte_x % 2);
1324
1325 if (swizzled) {
1326 /* adjust for bit6 swizzling */
1327 if (((byte_x / 8) % 2) == 1) {
1328 if (((byte_y / 8) % 2) == 0) {
1329 u += 64;
1330 } else {
1331 u -= 64;
1332 }
1333 }
1334 }
1335
1336 return u;
1337 }
1338
1339 static void
1340 intel_miptree_updownsample(struct intel_context *intel,
1341 struct intel_mipmap_tree *src,
1342 struct intel_mipmap_tree *dst,
1343 unsigned width,
1344 unsigned height)
1345 {
1346 #ifndef I915
1347 int src_x0 = 0;
1348 int src_y0 = 0;
1349 int dst_x0 = 0;
1350 int dst_y0 = 0;
1351
1352 brw_blorp_blit_miptrees(intel,
1353 src, 0 /* level */, 0 /* layer */,
1354 dst, 0 /* level */, 0 /* layer */,
1355 src_x0, src_y0,
1356 width, height,
1357 dst_x0, dst_y0,
1358 width, height,
1359 false, false /*mirror x, y*/);
1360
1361 if (src->stencil_mt) {
1362 brw_blorp_blit_miptrees(intel,
1363 src->stencil_mt, 0 /* level */, 0 /* layer */,
1364 dst->stencil_mt, 0 /* level */, 0 /* layer */,
1365 src_x0, src_y0,
1366 width, height,
1367 dst_x0, dst_y0,
1368 width, height,
1369 false, false /*mirror x, y*/);
1370 }
1371 #endif /* I915 */
1372 }
1373
1374 static void
1375 assert_is_flat(struct intel_mipmap_tree *mt)
1376 {
1377 assert(mt->target == GL_TEXTURE_2D);
1378 assert(mt->first_level == 0);
1379 assert(mt->last_level == 0);
1380 }
1381
1382 /**
1383 * \brief Downsample from mt to mt->singlesample_mt.
1384 *
1385 * If the miptree needs no downsample, then skip.
1386 */
1387 void
1388 intel_miptree_downsample(struct intel_context *intel,
1389 struct intel_mipmap_tree *mt)
1390 {
1391 /* Only flat, renderbuffer-like miptrees are supported. */
1392 assert_is_flat(mt);
1393
1394 if (!mt->need_downsample)
1395 return;
1396 intel_miptree_updownsample(intel,
1397 mt, mt->singlesample_mt,
1398 mt->logical_width0,
1399 mt->logical_height0);
1400 mt->need_downsample = false;
1401 }
1402
1403 /**
1404 * \brief Upsample from mt->singlesample_mt to mt.
1405 *
1406 * The upsample is done unconditionally.
1407 */
1408 void
1409 intel_miptree_upsample(struct intel_context *intel,
1410 struct intel_mipmap_tree *mt)
1411 {
1412 /* Only flat, renderbuffer-like miptrees are supported. */
1413 assert_is_flat(mt);
1414 assert(!mt->need_downsample);
1415
1416 intel_miptree_updownsample(intel,
1417 mt->singlesample_mt, mt,
1418 mt->logical_width0,
1419 mt->logical_height0);
1420 }
1421
1422 void *
1423 intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
1424 {
1425 drm_intel_bo *bo = mt->region->bo;
1426
1427 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
1428 if (drm_intel_bo_busy(bo)) {
1429 perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
1430 }
1431 }
1432
1433 intel_flush(&intel->ctx);
1434
1435 if (mt->region->tiling != I915_TILING_NONE)
1436 drm_intel_gem_bo_map_gtt(bo);
1437 else
1438 drm_intel_bo_map(bo, true);
1439
1440 return bo->virtual;
1441 }
1442
1443 void
1444 intel_miptree_unmap_raw(struct intel_context *intel,
1445 struct intel_mipmap_tree *mt)
1446 {
1447 drm_intel_bo_unmap(mt->region->bo);
1448 }
1449
1450 static void
1451 intel_miptree_map_gtt(struct intel_context *intel,
1452 struct intel_mipmap_tree *mt,
1453 struct intel_miptree_map *map,
1454 unsigned int level, unsigned int slice)
1455 {
1456 unsigned int bw, bh;
1457 void *base;
1458 unsigned int image_x, image_y;
1459 int x = map->x;
1460 int y = map->y;
1461
1462 /* For compressed formats, the stride is the number of bytes per
1463 * row of blocks. intel_miptree_get_image_offset() already does
1464 * the divide.
1465 */
1466 _mesa_get_format_block_size(mt->format, &bw, &bh);
1467 assert(y % bh == 0);
1468 y /= bh;
1469
1470 base = intel_miptree_map_raw(intel, mt) + mt->offset;
1471
1472 if (base == NULL)
1473 map->ptr = NULL;
1474 else {
1475 /* Note that in the case of cube maps, the caller must have passed the
1476 * slice number referencing the face.
1477 */
1478 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1479 x += image_x;
1480 y += image_y;
1481
1482 map->stride = mt->region->pitch;
1483 map->ptr = base + y * map->stride + x * mt->cpp;
1484 }
1485
1486 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
1487 map->x, map->y, map->w, map->h,
1488 mt, _mesa_get_format_name(mt->format),
1489 x, y, map->ptr, map->stride);
1490 }
1491
1492 static void
1493 intel_miptree_unmap_gtt(struct intel_context *intel,
1494 struct intel_mipmap_tree *mt,
1495 struct intel_miptree_map *map,
1496 unsigned int level,
1497 unsigned int slice)
1498 {
1499 intel_miptree_unmap_raw(intel, mt);
1500 }
1501
1502 static void
1503 intel_miptree_map_blit(struct intel_context *intel,
1504 struct intel_mipmap_tree *mt,
1505 struct intel_miptree_map *map,
1506 unsigned int level, unsigned int slice)
1507 {
1508 map->mt = intel_miptree_create(intel, GL_TEXTURE_2D, mt->format,
1509 0, 0,
1510 map->w, map->h, 1,
1511 false, 0,
1512 INTEL_MIPTREE_TILING_NONE);
1513 if (!map->mt) {
1514 fprintf(stderr, "Failed to allocate blit temporary\n");
1515 goto fail;
1516 }
1517 map->stride = map->mt->region->pitch;
1518
1519 if (!intel_miptree_blit(intel,
1520 mt, level, slice,
1521 map->x, map->y, false,
1522 map->mt, 0, 0,
1523 0, 0, false,
1524 map->w, map->h, GL_COPY)) {
1525 fprintf(stderr, "Failed to blit\n");
1526 goto fail;
1527 }
1528
1529 intel_batchbuffer_flush(intel);
1530 map->ptr = intel_miptree_map_raw(intel, map->mt);
1531
1532 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
1533 map->x, map->y, map->w, map->h,
1534 mt, _mesa_get_format_name(mt->format),
1535 level, slice, map->ptr, map->stride);
1536
1537 return;
1538
1539 fail:
1540 intel_miptree_release(&map->mt);
1541 map->ptr = NULL;
1542 map->stride = 0;
1543 }
1544
1545 static void
1546 intel_miptree_unmap_blit(struct intel_context *intel,
1547 struct intel_mipmap_tree *mt,
1548 struct intel_miptree_map *map,
1549 unsigned int level,
1550 unsigned int slice)
1551 {
1552 struct gl_context *ctx = &intel->ctx;
1553
1554 intel_miptree_unmap_raw(intel, map->mt);
1555
1556 if (map->mode & GL_MAP_WRITE_BIT) {
1557 bool ok = intel_miptree_blit(intel,
1558 map->mt, 0, 0,
1559 0, 0, false,
1560 mt, level, slice,
1561 map->x, map->y, false,
1562 map->w, map->h, GL_COPY);
1563 WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
1564 }
1565
1566 intel_miptree_release(&map->mt);
1567 }
1568
1569 static void
1570 intel_miptree_map_s8(struct intel_context *intel,
1571 struct intel_mipmap_tree *mt,
1572 struct intel_miptree_map *map,
1573 unsigned int level, unsigned int slice)
1574 {
1575 map->stride = map->w;
1576 map->buffer = map->ptr = malloc(map->stride * map->h);
1577 if (!map->buffer)
1578 return;
1579
1580 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1581 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1582 * invalidate is set, since we'll be writing the whole rectangle from our
1583 * temporary buffer back out.
1584 */
1585 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1586 uint8_t *untiled_s8_map = map->ptr;
1587 uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
1588 unsigned int image_x, image_y;
1589
1590 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1591
1592 for (uint32_t y = 0; y < map->h; y++) {
1593 for (uint32_t x = 0; x < map->w; x++) {
1594 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1595 x + image_x + map->x,
1596 y + image_y + map->y,
1597 intel->has_swizzling);
1598 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
1599 }
1600 }
1601
1602 intel_miptree_unmap_raw(intel, mt);
1603
1604 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
1605 map->x, map->y, map->w, map->h,
1606 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
1607 } else {
1608 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1609 map->x, map->y, map->w, map->h,
1610 mt, map->ptr, map->stride);
1611 }
1612 }
1613
1614 static void
1615 intel_miptree_unmap_s8(struct intel_context *intel,
1616 struct intel_mipmap_tree *mt,
1617 struct intel_miptree_map *map,
1618 unsigned int level,
1619 unsigned int slice)
1620 {
1621 if (map->mode & GL_MAP_WRITE_BIT) {
1622 unsigned int image_x, image_y;
1623 uint8_t *untiled_s8_map = map->ptr;
1624 uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
1625
1626 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1627
1628 for (uint32_t y = 0; y < map->h; y++) {
1629 for (uint32_t x = 0; x < map->w; x++) {
1630 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1631 x + map->x,
1632 y + map->y,
1633 intel->has_swizzling);
1634 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
1635 }
1636 }
1637
1638 intel_miptree_unmap_raw(intel, mt);
1639 }
1640
1641 free(map->buffer);
1642 }
1643
1644 static void
1645 intel_miptree_map_etc(struct intel_context *intel,
1646 struct intel_mipmap_tree *mt,
1647 struct intel_miptree_map *map,
1648 unsigned int level,
1649 unsigned int slice)
1650 {
1651 assert(mt->etc_format != MESA_FORMAT_NONE);
1652 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) {
1653 assert(mt->format == MESA_FORMAT_RGBX8888_REV);
1654 }
1655
1656 assert(map->mode & GL_MAP_WRITE_BIT);
1657 assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
1658
1659 map->stride = _mesa_format_row_stride(mt->etc_format, map->w);
1660 map->buffer = malloc(_mesa_format_image_size(mt->etc_format,
1661 map->w, map->h, 1));
1662 map->ptr = map->buffer;
1663 }
1664
1665 static void
1666 intel_miptree_unmap_etc(struct intel_context *intel,
1667 struct intel_mipmap_tree *mt,
1668 struct intel_miptree_map *map,
1669 unsigned int level,
1670 unsigned int slice)
1671 {
1672 uint32_t image_x;
1673 uint32_t image_y;
1674 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1675
1676 image_x += map->x;
1677 image_y += map->y;
1678
1679 uint8_t *dst = intel_miptree_map_raw(intel, mt)
1680 + image_y * mt->region->pitch
1681 + image_x * mt->region->cpp;
1682
1683 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8)
1684 _mesa_etc1_unpack_rgba8888(dst, mt->region->pitch,
1685 map->ptr, map->stride,
1686 map->w, map->h);
1687 else
1688 _mesa_unpack_etc2_format(dst, mt->region->pitch,
1689 map->ptr, map->stride,
1690 map->w, map->h, mt->etc_format);
1691
1692 intel_miptree_unmap_raw(intel, mt);
1693 free(map->buffer);
1694 }
1695
1696 /**
1697 * Mapping function for packed depth/stencil miptrees backed by real separate
1698 * miptrees for depth and stencil.
1699 *
1700 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
1701 * separate from the depth buffer. Yet at the GL API level, we have to expose
1702 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
1703 * be able to map that memory for texture storage and glReadPixels-type
1704 * operations. We give Mesa core that access by mallocing a temporary and
1705 * copying the data between the actual backing store and the temporary.
1706 */
1707 static void
1708 intel_miptree_map_depthstencil(struct intel_context *intel,
1709 struct intel_mipmap_tree *mt,
1710 struct intel_miptree_map *map,
1711 unsigned int level, unsigned int slice)
1712 {
1713 struct intel_mipmap_tree *z_mt = mt;
1714 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1715 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1716 int packed_bpp = map_z32f_x24s8 ? 8 : 4;
1717
1718 map->stride = map->w * packed_bpp;
1719 map->buffer = map->ptr = malloc(map->stride * map->h);
1720 if (!map->buffer)
1721 return;
1722
1723 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1724 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1725 * invalidate is set, since we'll be writing the whole rectangle from our
1726 * temporary buffer back out.
1727 */
1728 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1729 uint32_t *packed_map = map->ptr;
1730 uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
1731 uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
1732 unsigned int s_image_x, s_image_y;
1733 unsigned int z_image_x, z_image_y;
1734
1735 intel_miptree_get_image_offset(s_mt, level, slice,
1736 &s_image_x, &s_image_y);
1737 intel_miptree_get_image_offset(z_mt, level, slice,
1738 &z_image_x, &z_image_y);
1739
1740 for (uint32_t y = 0; y < map->h; y++) {
1741 for (uint32_t x = 0; x < map->w; x++) {
1742 int map_x = map->x + x, map_y = map->y + y;
1743 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1744 map_x + s_image_x,
1745 map_y + s_image_y,
1746 intel->has_swizzling);
1747 ptrdiff_t z_offset = ((map_y + z_image_y) *
1748 (z_mt->region->pitch / 4) +
1749 (map_x + z_image_x));
1750 uint8_t s = s_map[s_offset];
1751 uint32_t z = z_map[z_offset];
1752
1753 if (map_z32f_x24s8) {
1754 packed_map[(y * map->w + x) * 2 + 0] = z;
1755 packed_map[(y * map->w + x) * 2 + 1] = s;
1756 } else {
1757 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
1758 }
1759 }
1760 }
1761
1762 intel_miptree_unmap_raw(intel, s_mt);
1763 intel_miptree_unmap_raw(intel, z_mt);
1764
1765 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
1766 __FUNCTION__,
1767 map->x, map->y, map->w, map->h,
1768 z_mt, map->x + z_image_x, map->y + z_image_y,
1769 s_mt, map->x + s_image_x, map->y + s_image_y,
1770 map->ptr, map->stride);
1771 } else {
1772 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1773 map->x, map->y, map->w, map->h,
1774 mt, map->ptr, map->stride);
1775 }
1776 }
1777
1778 static void
1779 intel_miptree_unmap_depthstencil(struct intel_context *intel,
1780 struct intel_mipmap_tree *mt,
1781 struct intel_miptree_map *map,
1782 unsigned int level,
1783 unsigned int slice)
1784 {
1785 struct intel_mipmap_tree *z_mt = mt;
1786 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1787 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1788
1789 if (map->mode & GL_MAP_WRITE_BIT) {
1790 uint32_t *packed_map = map->ptr;
1791 uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
1792 uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
1793 unsigned int s_image_x, s_image_y;
1794 unsigned int z_image_x, z_image_y;
1795
1796 intel_miptree_get_image_offset(s_mt, level, slice,
1797 &s_image_x, &s_image_y);
1798 intel_miptree_get_image_offset(z_mt, level, slice,
1799 &z_image_x, &z_image_y);
1800
1801 for (uint32_t y = 0; y < map->h; y++) {
1802 for (uint32_t x = 0; x < map->w; x++) {
1803 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1804 x + s_image_x + map->x,
1805 y + s_image_y + map->y,
1806 intel->has_swizzling);
1807 ptrdiff_t z_offset = ((y + z_image_y) *
1808 (z_mt->region->pitch / 4) +
1809 (x + z_image_x));
1810
1811 if (map_z32f_x24s8) {
1812 z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
1813 s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
1814 } else {
1815 uint32_t packed = packed_map[y * map->w + x];
1816 s_map[s_offset] = packed >> 24;
1817 z_map[z_offset] = packed;
1818 }
1819 }
1820 }
1821
1822 intel_miptree_unmap_raw(intel, s_mt);
1823 intel_miptree_unmap_raw(intel, z_mt);
1824
1825 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1826 __FUNCTION__,
1827 map->x, map->y, map->w, map->h,
1828 z_mt, _mesa_get_format_name(z_mt->format),
1829 map->x + z_image_x, map->y + z_image_y,
1830 s_mt, map->x + s_image_x, map->y + s_image_y,
1831 map->ptr, map->stride);
1832 }
1833
1834 free(map->buffer);
1835 }
1836
1837 /**
1838 * Create and attach a map to the miptree at (level, slice). Return the
1839 * attached map.
1840 */
1841 static struct intel_miptree_map*
1842 intel_miptree_attach_map(struct intel_mipmap_tree *mt,
1843 unsigned int level,
1844 unsigned int slice,
1845 unsigned int x,
1846 unsigned int y,
1847 unsigned int w,
1848 unsigned int h,
1849 GLbitfield mode)
1850 {
1851 struct intel_miptree_map *map = calloc(1, sizeof(*map));
1852
1853 if (!map)
1854 return NULL;
1855
1856 assert(mt->level[level].slice[slice].map == NULL);
1857 mt->level[level].slice[slice].map = map;
1858
1859 map->mode = mode;
1860 map->x = x;
1861 map->y = y;
1862 map->w = w;
1863 map->h = h;
1864
1865 return map;
1866 }
1867
1868 /**
1869 * Release the map at (level, slice).
1870 */
1871 static void
1872 intel_miptree_release_map(struct intel_mipmap_tree *mt,
1873 unsigned int level,
1874 unsigned int slice)
1875 {
1876 struct intel_miptree_map **map;
1877
1878 map = &mt->level[level].slice[slice].map;
1879 free(*map);
1880 *map = NULL;
1881 }
1882
1883 static void
1884 intel_miptree_map_singlesample(struct intel_context *intel,
1885 struct intel_mipmap_tree *mt,
1886 unsigned int level,
1887 unsigned int slice,
1888 unsigned int x,
1889 unsigned int y,
1890 unsigned int w,
1891 unsigned int h,
1892 GLbitfield mode,
1893 void **out_ptr,
1894 int *out_stride)
1895 {
1896 struct intel_miptree_map *map;
1897
1898 assert(mt->num_samples <= 1);
1899
1900 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
1901 if (!map){
1902 *out_ptr = NULL;
1903 *out_stride = 0;
1904 return;
1905 }
1906
1907 intel_miptree_slice_resolve_depth(intel, mt, level, slice);
1908 if (map->mode & GL_MAP_WRITE_BIT) {
1909 intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
1910 }
1911
1912 if (mt->format == MESA_FORMAT_S8) {
1913 intel_miptree_map_s8(intel, mt, map, level, slice);
1914 } else if (mt->etc_format != MESA_FORMAT_NONE &&
1915 !(mode & BRW_MAP_DIRECT_BIT)) {
1916 intel_miptree_map_etc(intel, mt, map, level, slice);
1917 } else if (mt->stencil_mt && !(mode & BRW_MAP_DIRECT_BIT)) {
1918 intel_miptree_map_depthstencil(intel, mt, map, level, slice);
1919 }
1920 /* See intel_miptree_blit() for details on the 32k pitch limit. */
1921 else if (intel->has_llc &&
1922 !(mode & GL_MAP_WRITE_BIT) &&
1923 !mt->compressed &&
1924 (mt->region->tiling == I915_TILING_X ||
1925 (intel->gen >= 6 && mt->region->tiling == I915_TILING_Y)) &&
1926 mt->region->pitch < 32768) {
1927 intel_miptree_map_blit(intel, mt, map, level, slice);
1928 } else if (mt->region->tiling != I915_TILING_NONE &&
1929 mt->region->bo->size >= intel->max_gtt_map_object_size) {
1930 assert(mt->region->pitch < 32768);
1931 intel_miptree_map_blit(intel, mt, map, level, slice);
1932 } else {
1933 intel_miptree_map_gtt(intel, mt, map, level, slice);
1934 }
1935
1936 *out_ptr = map->ptr;
1937 *out_stride = map->stride;
1938
1939 if (map->ptr == NULL)
1940 intel_miptree_release_map(mt, level, slice);
1941 }
1942
1943 static void
1944 intel_miptree_unmap_singlesample(struct intel_context *intel,
1945 struct intel_mipmap_tree *mt,
1946 unsigned int level,
1947 unsigned int slice)
1948 {
1949 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1950
1951 assert(mt->num_samples <= 1);
1952
1953 if (!map)
1954 return;
1955
1956 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
1957 mt, _mesa_get_format_name(mt->format), level, slice);
1958
1959 if (mt->format == MESA_FORMAT_S8) {
1960 intel_miptree_unmap_s8(intel, mt, map, level, slice);
1961 } else if (mt->etc_format != MESA_FORMAT_NONE &&
1962 !(map->mode & BRW_MAP_DIRECT_BIT)) {
1963 intel_miptree_unmap_etc(intel, mt, map, level, slice);
1964 } else if (mt->stencil_mt && !(map->mode & BRW_MAP_DIRECT_BIT)) {
1965 intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
1966 } else if (map->mt) {
1967 intel_miptree_unmap_blit(intel, mt, map, level, slice);
1968 } else {
1969 intel_miptree_unmap_gtt(intel, mt, map, level, slice);
1970 }
1971
1972 intel_miptree_release_map(mt, level, slice);
1973 }
1974
1975 static void
1976 intel_miptree_map_multisample(struct intel_context *intel,
1977 struct intel_mipmap_tree *mt,
1978 unsigned int level,
1979 unsigned int slice,
1980 unsigned int x,
1981 unsigned int y,
1982 unsigned int w,
1983 unsigned int h,
1984 GLbitfield mode,
1985 void **out_ptr,
1986 int *out_stride)
1987 {
1988 struct intel_miptree_map *map;
1989
1990 assert(mt->num_samples > 1);
1991
1992 /* Only flat, renderbuffer-like miptrees are supported. */
1993 if (mt->target != GL_TEXTURE_2D ||
1994 mt->first_level != 0 ||
1995 mt->last_level != 0) {
1996 _mesa_problem(&intel->ctx, "attempt to map a multisample miptree for "
1997 "which (target, first_level, last_level != "
1998 "(GL_TEXTURE_2D, 0, 0)");
1999 goto fail;
2000 }
2001
2002 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
2003 if (!map)
2004 goto fail;
2005
2006 if (!mt->singlesample_mt) {
2007 mt->singlesample_mt =
2008 intel_miptree_create_for_renderbuffer(intel,
2009 mt->format,
2010 mt->logical_width0,
2011 mt->logical_height0,
2012 0 /*num_samples*/);
2013 if (!mt->singlesample_mt)
2014 goto fail;
2015
2016 map->singlesample_mt_is_tmp = true;
2017 mt->need_downsample = true;
2018 }
2019
2020 intel_miptree_downsample(intel, mt);
2021 intel_miptree_map_singlesample(intel, mt->singlesample_mt,
2022 level, slice,
2023 x, y, w, h,
2024 mode,
2025 out_ptr, out_stride);
2026 return;
2027
2028 fail:
2029 intel_miptree_release_map(mt, level, slice);
2030 *out_ptr = NULL;
2031 *out_stride = 0;
2032 }
2033
2034 static void
2035 intel_miptree_unmap_multisample(struct intel_context *intel,
2036 struct intel_mipmap_tree *mt,
2037 unsigned int level,
2038 unsigned int slice)
2039 {
2040 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
2041
2042 assert(mt->num_samples > 1);
2043
2044 if (!map)
2045 return;
2046
2047 intel_miptree_unmap_singlesample(intel, mt->singlesample_mt, level, slice);
2048
2049 mt->need_downsample = false;
2050 if (map->mode & GL_MAP_WRITE_BIT)
2051 intel_miptree_upsample(intel, mt);
2052
2053 if (map->singlesample_mt_is_tmp)
2054 intel_miptree_release(&mt->singlesample_mt);
2055
2056 intel_miptree_release_map(mt, level, slice);
2057 }
2058
2059 void
2060 intel_miptree_map(struct intel_context *intel,
2061 struct intel_mipmap_tree *mt,
2062 unsigned int level,
2063 unsigned int slice,
2064 unsigned int x,
2065 unsigned int y,
2066 unsigned int w,
2067 unsigned int h,
2068 GLbitfield mode,
2069 void **out_ptr,
2070 int *out_stride)
2071 {
2072 if (mt->num_samples <= 1)
2073 intel_miptree_map_singlesample(intel, mt,
2074 level, slice,
2075 x, y, w, h,
2076 mode,
2077 out_ptr, out_stride);
2078 else
2079 intel_miptree_map_multisample(intel, mt,
2080 level, slice,
2081 x, y, w, h,
2082 mode,
2083 out_ptr, out_stride);
2084 }
2085
2086 void
2087 intel_miptree_unmap(struct intel_context *intel,
2088 struct intel_mipmap_tree *mt,
2089 unsigned int level,
2090 unsigned int slice)
2091 {
2092 if (mt->num_samples <= 1)
2093 intel_miptree_unmap_singlesample(intel, mt, level, slice);
2094 else
2095 intel_miptree_unmap_multisample(intel, mt, level, slice);
2096 }