intel: Refactor creation of hiz and mcs miptrees
[mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "intel_batchbuffer.h"
29 #include "intel_context.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_regions.h"
32 #include "intel_resolve_map.h"
33 #include "intel_span.h"
34 #include "intel_tex_layout.h"
35 #include "intel_tex.h"
36 #include "intel_blit.h"
37
38 #include "main/enums.h"
39 #include "main/formats.h"
40 #include "main/glformats.h"
41 #include "main/texcompress_etc.h"
42 #include "main/teximage.h"
43
44 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
45
46 static GLenum
47 target_to_target(GLenum target)
48 {
49 switch (target) {
50 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
51 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
52 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
53 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
54 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
55 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
56 return GL_TEXTURE_CUBE_MAP_ARB;
57 default:
58 return target;
59 }
60 }
61
62 /**
63 * @param for_region Indicates that the caller is
64 * intel_miptree_create_for_region(). If true, then do not create
65 * \c stencil_mt.
66 */
67 static struct intel_mipmap_tree *
68 intel_miptree_create_internal(struct intel_context *intel,
69 GLenum target,
70 gl_format format,
71 GLuint first_level,
72 GLuint last_level,
73 GLuint width0,
74 GLuint height0,
75 GLuint depth0,
76 bool for_region,
77 GLuint num_samples,
78 enum intel_msaa_layout msaa_layout)
79 {
80 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
81 int compress_byte = 0;
82
83 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
84 _mesa_lookup_enum_by_nr(target),
85 _mesa_get_format_name(format),
86 first_level, last_level, mt);
87
88 if (_mesa_is_format_compressed(format))
89 compress_byte = intel_compressed_num_bytes(format);
90
91 mt->target = target_to_target(target);
92 mt->format = format;
93 mt->first_level = first_level;
94 mt->last_level = last_level;
95 mt->width0 = width0;
96 mt->height0 = height0;
97 mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
98 mt->num_samples = num_samples;
99 mt->compressed = compress_byte ? 1 : 0;
100 mt->msaa_layout = msaa_layout;
101 mt->refcount = 1;
102
103 /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we
104 * use it elsewhere?
105 */
106 switch (msaa_layout) {
107 case INTEL_MSAA_LAYOUT_NONE:
108 case INTEL_MSAA_LAYOUT_IMS:
109 mt->array_spacing_lod0 = false;
110 break;
111 case INTEL_MSAA_LAYOUT_UMS:
112 case INTEL_MSAA_LAYOUT_CMS:
113 mt->array_spacing_lod0 = true;
114 break;
115 }
116
117 if (target == GL_TEXTURE_CUBE_MAP) {
118 assert(depth0 == 1);
119 mt->depth0 = 6;
120 } else {
121 mt->depth0 = depth0;
122 }
123
124 if (!for_region &&
125 _mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
126 (intel->must_use_separate_stencil ||
127 (intel->has_separate_stencil &&
128 intel->vtbl.is_hiz_depth_format(intel, format)))) {
129 /* MSAA stencil surfaces always use IMS layout. */
130 enum intel_msaa_layout msaa_layout =
131 num_samples > 1 ? INTEL_MSAA_LAYOUT_IMS : INTEL_MSAA_LAYOUT_NONE;
132 mt->stencil_mt = intel_miptree_create(intel,
133 mt->target,
134 MESA_FORMAT_S8,
135 mt->first_level,
136 mt->last_level,
137 mt->width0,
138 mt->height0,
139 mt->depth0,
140 true,
141 num_samples,
142 msaa_layout);
143 if (!mt->stencil_mt) {
144 intel_miptree_release(&mt);
145 return NULL;
146 }
147
148 /* Fix up the Z miptree format for how we're splitting out separate
149 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
150 */
151 if (mt->format == MESA_FORMAT_S8_Z24) {
152 mt->format = MESA_FORMAT_X8_Z24;
153 } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
154 mt->format = MESA_FORMAT_Z32_FLOAT;
155 mt->cpp = 4;
156 } else {
157 _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
158 _mesa_get_format_name(mt->format));
159 }
160 }
161
162 intel_get_texture_alignment_unit(intel, mt->format,
163 &mt->align_w, &mt->align_h);
164
165 #ifdef I915
166 (void) intel;
167 if (intel->is_945)
168 i945_miptree_layout(mt);
169 else
170 i915_miptree_layout(mt);
171 #else
172 brw_miptree_layout(intel, mt);
173 #endif
174
175 return mt;
176 }
177
178
179 struct intel_mipmap_tree *
180 intel_miptree_create(struct intel_context *intel,
181 GLenum target,
182 gl_format format,
183 GLuint first_level,
184 GLuint last_level,
185 GLuint width0,
186 GLuint height0,
187 GLuint depth0,
188 bool expect_accelerated_upload,
189 GLuint num_samples,
190 enum intel_msaa_layout msaa_layout)
191 {
192 struct intel_mipmap_tree *mt;
193 uint32_t tiling = I915_TILING_NONE;
194 GLenum base_format;
195 bool wraps_etc1 = false;
196
197 if (format == MESA_FORMAT_ETC1_RGB8) {
198 format = MESA_FORMAT_RGBX8888_REV;
199 wraps_etc1 = true;
200 }
201
202 base_format = _mesa_get_format_base_format(format);
203
204 if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
205 if (intel->gen >= 4 &&
206 (base_format == GL_DEPTH_COMPONENT ||
207 base_format == GL_DEPTH_STENCIL_EXT))
208 tiling = I915_TILING_Y;
209 else if (msaa_layout != INTEL_MSAA_LAYOUT_NONE) {
210 /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
211 * Surface"):
212 *
213 * [DevSNB+]: For multi-sample render targets, this field must be
214 * 1. MSRTs can only be tiled.
215 *
216 * Our usual reason for preferring X tiling (fast blits using the
217 * blitting engine) doesn't apply to MSAA, since we'll generally be
218 * downsampling or upsampling when blitting between the MSAA buffer
219 * and another buffer, and the blitting engine doesn't support that.
220 * So use Y tiling, since it makes better use of the cache.
221 */
222 tiling = I915_TILING_Y;
223 } else if (width0 >= 64)
224 tiling = I915_TILING_X;
225 }
226
227 if (format == MESA_FORMAT_S8) {
228 /* The stencil buffer is W tiled. However, we request from the kernel a
229 * non-tiled buffer because the GTT is incapable of W fencing. So round
230 * up the width and height to match the size of W tiles (64x64).
231 */
232 tiling = I915_TILING_NONE;
233 width0 = ALIGN(width0, 64);
234 height0 = ALIGN(height0, 64);
235 }
236
237 mt = intel_miptree_create_internal(intel, target, format,
238 first_level, last_level, width0,
239 height0, depth0,
240 false, num_samples, msaa_layout);
241 /*
242 * pitch == 0 || height == 0 indicates the null texture
243 */
244 if (!mt || !mt->total_width || !mt->total_height) {
245 intel_miptree_release(&mt);
246 return NULL;
247 }
248
249 mt->wraps_etc1 = wraps_etc1;
250 mt->region = intel_region_alloc(intel->intelScreen,
251 tiling,
252 mt->cpp,
253 mt->total_width,
254 mt->total_height,
255 expect_accelerated_upload);
256 mt->offset = 0;
257
258 if (!mt->region) {
259 intel_miptree_release(&mt);
260 return NULL;
261 }
262
263 return mt;
264 }
265
266
267 struct intel_mipmap_tree *
268 intel_miptree_create_for_region(struct intel_context *intel,
269 GLenum target,
270 gl_format format,
271 struct intel_region *region)
272 {
273 struct intel_mipmap_tree *mt;
274
275 mt = intel_miptree_create_internal(intel, target, format,
276 0, 0,
277 region->width, region->height, 1,
278 true, 0 /* num_samples */,
279 INTEL_MSAA_LAYOUT_NONE);
280 if (!mt)
281 return mt;
282
283 intel_region_reference(&mt->region, region);
284
285 return mt;
286 }
287
288 /**
289 * Determine which MSAA layout should be used by the MSAA surface being
290 * created, based on the chip generation and the surface type.
291 */
292 static enum intel_msaa_layout
293 compute_msaa_layout(struct intel_context *intel, gl_format format)
294 {
295 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
296 if (intel->gen < 7)
297 return INTEL_MSAA_LAYOUT_IMS;
298
299 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
300 switch (_mesa_get_format_base_format(format)) {
301 case GL_DEPTH_COMPONENT:
302 case GL_STENCIL_INDEX:
303 case GL_DEPTH_STENCIL:
304 return INTEL_MSAA_LAYOUT_IMS;
305 default:
306 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
307 *
308 * This field must be set to 0 for all SINT MSRTs when all RT channels
309 * are not written
310 *
311 * In practice this means that we have to disable MCS for all signed
312 * integer MSAA buffers. The alternative, to disable MCS only when one
313 * of the render target channels is disabled, is impractical because it
314 * would require converting between CMS and UMS MSAA layouts on the fly,
315 * which is expensive.
316 */
317 if (_mesa_get_format_datatype(format) == GL_INT) {
318 /* TODO: is this workaround needed for future chipsets? */
319 assert(intel->gen == 7);
320 return INTEL_MSAA_LAYOUT_UMS;
321 } else {
322 return INTEL_MSAA_LAYOUT_CMS;
323 }
324 }
325 }
326
327 struct intel_mipmap_tree*
328 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
329 gl_format format,
330 uint32_t width,
331 uint32_t height,
332 uint32_t num_samples)
333 {
334 struct intel_mipmap_tree *mt;
335 uint32_t depth = 1;
336 enum intel_msaa_layout msaa_layout = INTEL_MSAA_LAYOUT_NONE;
337 bool ok;
338
339 if (num_samples > 1) {
340 /* Adjust width/height/depth for MSAA */
341 msaa_layout = compute_msaa_layout(intel, format);
342 if (msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
343 /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
344 *
345 * "Any of the other messages (sample*, LOD, load4) used with a
346 * (4x) multisampled surface will in-effect sample a surface with
347 * double the height and width as that indicated in the surface
348 * state. Each pixel position on the original-sized surface is
349 * replaced with a 2x2 of samples with the following arrangement:
350 *
351 * sample 0 sample 2
352 * sample 1 sample 3"
353 *
354 * Thus, when sampling from a multisampled texture, it behaves as
355 * though the layout in memory for (x,y,sample) is:
356 *
357 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
358 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
359 *
360 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
361 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
362 *
363 * However, the actual layout of multisampled data in memory is:
364 *
365 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
366 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
367 *
368 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
369 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
370 *
371 * This pattern repeats for each 2x2 pixel block.
372 *
373 * As a result, when calculating the size of our 4-sample buffer for
374 * an odd width or height, we have to align before scaling up because
375 * sample 3 is in that bottom right 2x2 block.
376 */
377 switch (num_samples) {
378 case 4:
379 width = ALIGN(width, 2) * 2;
380 height = ALIGN(height, 2) * 2;
381 break;
382 case 8:
383 width = ALIGN(width, 2) * 4;
384 height = ALIGN(height, 2) * 2;
385 break;
386 default:
387 /* num_samples should already have been quantized to 0, 1, 4, or
388 * 8.
389 */
390 assert(false);
391 }
392 } else {
393 /* Non-interleaved */
394 depth = num_samples;
395 }
396 }
397
398 mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
399 width, height, depth, true, num_samples,
400 msaa_layout);
401 if (!mt)
402 goto fail;
403
404 if (intel->vtbl.is_hiz_depth_format(intel, format)) {
405 ok = intel_miptree_alloc_hiz(intel, mt, num_samples);
406 if (!ok)
407 goto fail;
408 }
409
410 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
411 ok = intel_miptree_alloc_mcs(intel, mt, num_samples);
412 if (!ok)
413 goto fail;
414 }
415
416 return mt;
417
418 fail:
419 intel_miptree_release(&mt);
420 return NULL;
421 }
422
423 void
424 intel_miptree_reference(struct intel_mipmap_tree **dst,
425 struct intel_mipmap_tree *src)
426 {
427 if (*dst == src)
428 return;
429
430 intel_miptree_release(dst);
431
432 if (src) {
433 src->refcount++;
434 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
435 }
436
437 *dst = src;
438 }
439
440
441 void
442 intel_miptree_release(struct intel_mipmap_tree **mt)
443 {
444 if (!*mt)
445 return;
446
447 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
448 if (--(*mt)->refcount <= 0) {
449 GLuint i;
450
451 DBG("%s deleting %p\n", __FUNCTION__, *mt);
452
453 intel_region_release(&((*mt)->region));
454 intel_miptree_release(&(*mt)->stencil_mt);
455 intel_miptree_release(&(*mt)->hiz_mt);
456 intel_miptree_release(&(*mt)->mcs_mt);
457 intel_resolve_map_clear(&(*mt)->hiz_map);
458
459 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
460 free((*mt)->level[i].slice);
461 }
462
463 free(*mt);
464 }
465 *mt = NULL;
466 }
467
468 void
469 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
470 int *width, int *height, int *depth)
471 {
472 switch (image->TexObject->Target) {
473 case GL_TEXTURE_1D_ARRAY:
474 *width = image->Width;
475 *height = 1;
476 *depth = image->Height;
477 break;
478 default:
479 *width = image->Width;
480 *height = image->Height;
481 *depth = image->Depth;
482 break;
483 }
484 }
485
486 /**
487 * Can the image be pulled into a unified mipmap tree? This mirrors
488 * the completeness test in a lot of ways.
489 *
490 * Not sure whether I want to pass gl_texture_image here.
491 */
492 bool
493 intel_miptree_match_image(struct intel_mipmap_tree *mt,
494 struct gl_texture_image *image)
495 {
496 struct intel_texture_image *intelImage = intel_texture_image(image);
497 GLuint level = intelImage->base.Base.Level;
498 int width, height, depth;
499
500 if (target_to_target(image->TexObject->Target) != mt->target)
501 return false;
502
503 if (image->TexFormat != mt->format &&
504 !(image->TexFormat == MESA_FORMAT_S8_Z24 &&
505 mt->format == MESA_FORMAT_X8_Z24 &&
506 mt->stencil_mt)) {
507 return false;
508 }
509
510 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
511
512 if (mt->target == GL_TEXTURE_CUBE_MAP)
513 depth = 6;
514
515 /* Test image dimensions against the base level image adjusted for
516 * minification. This will also catch images not present in the
517 * tree, changed targets, etc.
518 */
519 if (width != mt->level[level].width ||
520 height != mt->level[level].height ||
521 depth != mt->level[level].depth)
522 return false;
523
524 return true;
525 }
526
527
528 void
529 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
530 GLuint level,
531 GLuint x, GLuint y,
532 GLuint w, GLuint h, GLuint d)
533 {
534 mt->level[level].width = w;
535 mt->level[level].height = h;
536 mt->level[level].depth = d;
537 mt->level[level].level_x = x;
538 mt->level[level].level_y = y;
539
540 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
541 level, w, h, d, x, y);
542
543 assert(mt->level[level].slice == NULL);
544
545 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
546 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
547 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
548 }
549
550
551 void
552 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
553 GLuint level, GLuint img,
554 GLuint x, GLuint y)
555 {
556 if (img == 0 && level == 0)
557 assert(x == 0 && y == 0);
558
559 assert(img < mt->level[level].depth);
560
561 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
562 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
563
564 DBG("%s level %d img %d pos %d,%d\n",
565 __FUNCTION__, level, img,
566 mt->level[level].slice[img].x_offset,
567 mt->level[level].slice[img].y_offset);
568 }
569
570
571 /**
572 * For cube map textures, either the \c face parameter can be used, of course,
573 * or the cube face can be interpreted as a depth layer and the \c layer
574 * parameter used.
575 */
576 void
577 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
578 GLuint level, GLuint face, GLuint layer,
579 GLuint *x, GLuint *y)
580 {
581 int slice;
582
583 if (face > 0) {
584 assert(mt->target == GL_TEXTURE_CUBE_MAP);
585 assert(face < 6);
586 assert(layer == 0);
587 slice = face;
588 } else {
589 /* This branch may be taken even if the texture target is a cube map. In
590 * that case, the caller chose to interpret each cube face as a layer.
591 */
592 assert(face == 0);
593 slice = layer;
594 }
595
596 *x = mt->level[level].slice[slice].x_offset;
597 *y = mt->level[level].slice[slice].y_offset;
598 }
599
600 static void
601 intel_miptree_copy_slice(struct intel_context *intel,
602 struct intel_mipmap_tree *dst_mt,
603 struct intel_mipmap_tree *src_mt,
604 int level,
605 int face,
606 int depth)
607
608 {
609 gl_format format = src_mt->format;
610 uint32_t width = src_mt->level[level].width;
611 uint32_t height = src_mt->level[level].height;
612
613 assert(depth < src_mt->level[level].depth);
614
615 if (dst_mt->compressed) {
616 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
617 width = ALIGN(width, dst_mt->align_w);
618 }
619
620 uint32_t dst_x, dst_y, src_x, src_y;
621 intel_miptree_get_image_offset(dst_mt, level, face, depth,
622 &dst_x, &dst_y);
623 intel_miptree_get_image_offset(src_mt, level, face, depth,
624 &src_x, &src_y);
625
626 DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
627 src_mt, src_x, src_y, src_mt->region->pitch * src_mt->region->cpp,
628 dst_mt, dst_x, dst_y, dst_mt->region->pitch * dst_mt->region->cpp,
629 width, height);
630
631 if (!intelEmitCopyBlit(intel,
632 dst_mt->region->cpp,
633 src_mt->region->pitch, src_mt->region->bo,
634 0, src_mt->region->tiling,
635 dst_mt->region->pitch, dst_mt->region->bo,
636 0, dst_mt->region->tiling,
637 src_x, src_y,
638 dst_x, dst_y,
639 width, height,
640 GL_COPY)) {
641
642 fallback_debug("miptree validate blit for %s failed\n",
643 _mesa_get_format_name(format));
644 void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
645 void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
646
647 _mesa_copy_rect(dst,
648 dst_mt->cpp,
649 dst_mt->region->pitch,
650 dst_x, dst_y,
651 width, height,
652 src, src_mt->region->pitch,
653 src_x, src_y);
654
655 intel_region_unmap(intel, dst_mt->region);
656 intel_region_unmap(intel, src_mt->region);
657 }
658
659 if (src_mt->stencil_mt) {
660 intel_miptree_copy_slice(intel,
661 dst_mt->stencil_mt, src_mt->stencil_mt,
662 level, face, depth);
663 }
664 }
665
666 /**
667 * Copies the image's current data to the given miptree, and associates that
668 * miptree with the image.
669 */
670 void
671 intel_miptree_copy_teximage(struct intel_context *intel,
672 struct intel_texture_image *intelImage,
673 struct intel_mipmap_tree *dst_mt)
674 {
675 struct intel_mipmap_tree *src_mt = intelImage->mt;
676 int level = intelImage->base.Base.Level;
677 int face = intelImage->base.Base.Face;
678 GLuint depth = intelImage->base.Base.Depth;
679
680 for (int slice = 0; slice < depth; slice++) {
681 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
682 }
683
684 intel_miptree_reference(&intelImage->mt, dst_mt);
685 }
686
687 bool
688 intel_miptree_alloc_mcs(struct intel_context *intel,
689 struct intel_mipmap_tree *mt,
690 GLuint num_samples)
691 {
692 assert(mt->mcs_mt == NULL);
693 assert(intel->gen >= 7); /* MCS only used on Gen7+ */
694
695 /* Choose the correct format for the MCS buffer. All that really matters
696 * is that we allocate the right buffer size, since we'll always be
697 * accessing this miptree using MCS-specific hardware mechanisms, which
698 * infer the correct format based on num_samples.
699 */
700 gl_format format;
701 switch (num_samples) {
702 case 4:
703 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
704 * each sample).
705 */
706 format = MESA_FORMAT_R8;
707 break;
708 case 8:
709 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
710 * for each sample, plus 8 padding bits).
711 */
712 format = MESA_FORMAT_R_UINT32;
713 break;
714 default:
715 assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
716 break;
717 };
718
719 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
720 *
721 * "The MCS surface must be stored as Tile Y."
722 *
723 * We set msaa_format to INTEL_MSAA_LAYOUT_CMS to force
724 * intel_miptree_create() to use Y tiling. msaa_format is otherwise
725 * ignored for the MCS miptree.
726 */
727 mt->mcs_mt = intel_miptree_create(intel,
728 mt->target,
729 format,
730 mt->first_level,
731 mt->last_level,
732 mt->width0,
733 mt->height0,
734 mt->depth0,
735 true,
736 0 /* num_samples */,
737 INTEL_MSAA_LAYOUT_CMS);
738
739 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
740 *
741 * When MCS buffer is enabled and bound to MSRT, it is required that it
742 * is cleared prior to any rendering.
743 *
744 * Since we don't use the MCS buffer for any purpose other than rendering,
745 * it makes sense to just clear it immediately upon allocation.
746 *
747 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
748 */
749 void *data = intel_region_map(intel, mt->mcs_mt->region, 0);
750 memset(data, 0xff, mt->mcs_mt->region->bo->size);
751 intel_region_unmap(intel, mt->mcs_mt->region);
752
753 return mt->mcs_mt;
754 }
755
756 bool
757 intel_miptree_alloc_hiz(struct intel_context *intel,
758 struct intel_mipmap_tree *mt,
759 GLuint num_samples)
760 {
761 assert(mt->hiz_mt == NULL);
762 /* MSAA HiZ surfaces always use IMS layout. */
763 mt->hiz_mt = intel_miptree_create(intel,
764 mt->target,
765 MESA_FORMAT_X8_Z24,
766 mt->first_level,
767 mt->last_level,
768 mt->width0,
769 mt->height0,
770 mt->depth0,
771 true,
772 num_samples,
773 INTEL_MSAA_LAYOUT_IMS);
774
775 if (!mt->hiz_mt)
776 return false;
777
778 /* Mark that all slices need a HiZ resolve. */
779 struct intel_resolve_map *head = &mt->hiz_map;
780 for (int level = mt->first_level; level <= mt->last_level; ++level) {
781 for (int layer = 0; layer < mt->level[level].depth; ++layer) {
782 head->next = malloc(sizeof(*head->next));
783 head->next->prev = head;
784 head->next->next = NULL;
785 head = head->next;
786
787 head->level = level;
788 head->layer = layer;
789 head->need = GEN6_HIZ_OP_HIZ_RESOLVE;
790 }
791 }
792
793 return true;
794 }
795
796 void
797 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
798 uint32_t level,
799 uint32_t layer)
800 {
801 intel_miptree_check_level_layer(mt, level, layer);
802
803 if (!mt->hiz_mt)
804 return;
805
806 intel_resolve_map_set(&mt->hiz_map,
807 level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
808 }
809
810
811 void
812 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
813 uint32_t level,
814 uint32_t layer)
815 {
816 intel_miptree_check_level_layer(mt, level, layer);
817
818 if (!mt->hiz_mt)
819 return;
820
821 intel_resolve_map_set(&mt->hiz_map,
822 level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
823 }
824
825 static bool
826 intel_miptree_slice_resolve(struct intel_context *intel,
827 struct intel_mipmap_tree *mt,
828 uint32_t level,
829 uint32_t layer,
830 enum gen6_hiz_op need)
831 {
832 intel_miptree_check_level_layer(mt, level, layer);
833
834 struct intel_resolve_map *item =
835 intel_resolve_map_get(&mt->hiz_map, level, layer);
836
837 if (!item || item->need != need)
838 return false;
839
840 intel_hiz_exec(intel, mt, level, layer, need);
841 intel_resolve_map_remove(item);
842 return true;
843 }
844
845 bool
846 intel_miptree_slice_resolve_hiz(struct intel_context *intel,
847 struct intel_mipmap_tree *mt,
848 uint32_t level,
849 uint32_t layer)
850 {
851 return intel_miptree_slice_resolve(intel, mt, level, layer,
852 GEN6_HIZ_OP_HIZ_RESOLVE);
853 }
854
855 bool
856 intel_miptree_slice_resolve_depth(struct intel_context *intel,
857 struct intel_mipmap_tree *mt,
858 uint32_t level,
859 uint32_t layer)
860 {
861 return intel_miptree_slice_resolve(intel, mt, level, layer,
862 GEN6_HIZ_OP_DEPTH_RESOLVE);
863 }
864
865 static bool
866 intel_miptree_all_slices_resolve(struct intel_context *intel,
867 struct intel_mipmap_tree *mt,
868 enum gen6_hiz_op need)
869 {
870 bool did_resolve = false;
871 struct intel_resolve_map *i, *next;
872
873 for (i = mt->hiz_map.next; i; i = next) {
874 next = i->next;
875 if (i->need != need)
876 continue;
877
878 intel_hiz_exec(intel, mt, i->level, i->layer, need);
879 intel_resolve_map_remove(i);
880 did_resolve = true;
881 }
882
883 return did_resolve;
884 }
885
886 bool
887 intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
888 struct intel_mipmap_tree *mt)
889 {
890 return intel_miptree_all_slices_resolve(intel, mt,
891 GEN6_HIZ_OP_HIZ_RESOLVE);
892 }
893
894 bool
895 intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
896 struct intel_mipmap_tree *mt)
897 {
898 return intel_miptree_all_slices_resolve(intel, mt,
899 GEN6_HIZ_OP_DEPTH_RESOLVE);
900 }
901
902 static void
903 intel_miptree_map_gtt(struct intel_context *intel,
904 struct intel_mipmap_tree *mt,
905 struct intel_miptree_map *map,
906 unsigned int level, unsigned int slice)
907 {
908 unsigned int bw, bh;
909 void *base;
910 unsigned int image_x, image_y;
911 int x = map->x;
912 int y = map->y;
913
914 /* For compressed formats, the stride is the number of bytes per
915 * row of blocks. intel_miptree_get_image_offset() already does
916 * the divide.
917 */
918 _mesa_get_format_block_size(mt->format, &bw, &bh);
919 assert(y % bh == 0);
920 y /= bh;
921
922 base = intel_region_map(intel, mt->region, map->mode);
923
924 if (base == NULL)
925 map->ptr = NULL;
926 else {
927 /* Note that in the case of cube maps, the caller must have passed the
928 * slice number referencing the face.
929 */
930 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
931 x += image_x;
932 y += image_y;
933
934 map->stride = mt->region->pitch * mt->cpp;
935 map->ptr = base + y * map->stride + x * mt->cpp;
936 }
937
938 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
939 map->x, map->y, map->w, map->h,
940 mt, _mesa_get_format_name(mt->format),
941 x, y, map->ptr, map->stride);
942 }
943
944 static void
945 intel_miptree_unmap_gtt(struct intel_context *intel,
946 struct intel_mipmap_tree *mt,
947 struct intel_miptree_map *map,
948 unsigned int level,
949 unsigned int slice)
950 {
951 intel_region_unmap(intel, mt->region);
952 }
953
954 static void
955 intel_miptree_map_blit(struct intel_context *intel,
956 struct intel_mipmap_tree *mt,
957 struct intel_miptree_map *map,
958 unsigned int level, unsigned int slice)
959 {
960 unsigned int image_x, image_y;
961 int x = map->x;
962 int y = map->y;
963 int ret;
964
965 /* The blitter requires the pitch to be aligned to 4. */
966 map->stride = ALIGN(map->w * mt->region->cpp, 4);
967
968 map->bo = drm_intel_bo_alloc(intel->bufmgr, "intel_miptree_map_blit() temp",
969 map->stride * map->h, 4096);
970 if (!map->bo) {
971 fprintf(stderr, "Failed to allocate blit temporary\n");
972 goto fail;
973 }
974
975 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
976 x += image_x;
977 y += image_y;
978
979 if (!intelEmitCopyBlit(intel,
980 mt->region->cpp,
981 mt->region->pitch, mt->region->bo,
982 0, mt->region->tiling,
983 map->stride / mt->region->cpp, map->bo,
984 0, I915_TILING_NONE,
985 x, y,
986 0, 0,
987 map->w, map->h,
988 GL_COPY)) {
989 fprintf(stderr, "Failed to blit\n");
990 goto fail;
991 }
992
993 intel_batchbuffer_flush(intel);
994 ret = drm_intel_bo_map(map->bo, (map->mode & GL_MAP_WRITE_BIT) != 0);
995 if (ret) {
996 fprintf(stderr, "Failed to map blit temporary\n");
997 goto fail;
998 }
999
1000 map->ptr = map->bo->virtual;
1001
1002 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
1003 map->x, map->y, map->w, map->h,
1004 mt, _mesa_get_format_name(mt->format),
1005 x, y, map->ptr, map->stride);
1006
1007 return;
1008
1009 fail:
1010 drm_intel_bo_unreference(map->bo);
1011 map->ptr = NULL;
1012 map->stride = 0;
1013 }
1014
1015 static void
1016 intel_miptree_unmap_blit(struct intel_context *intel,
1017 struct intel_mipmap_tree *mt,
1018 struct intel_miptree_map *map,
1019 unsigned int level,
1020 unsigned int slice)
1021 {
1022 assert(!(map->mode & GL_MAP_WRITE_BIT));
1023
1024 drm_intel_bo_unmap(map->bo);
1025 drm_intel_bo_unreference(map->bo);
1026 }
1027
1028 static void
1029 intel_miptree_map_s8(struct intel_context *intel,
1030 struct intel_mipmap_tree *mt,
1031 struct intel_miptree_map *map,
1032 unsigned int level, unsigned int slice)
1033 {
1034 map->stride = map->w;
1035 map->buffer = map->ptr = malloc(map->stride * map->h);
1036 if (!map->buffer)
1037 return;
1038
1039 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1040 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1041 * invalidate is set, since we'll be writing the whole rectangle from our
1042 * temporary buffer back out.
1043 */
1044 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1045 uint8_t *untiled_s8_map = map->ptr;
1046 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
1047 GL_MAP_READ_BIT);
1048 unsigned int image_x, image_y;
1049
1050 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
1051
1052 for (uint32_t y = 0; y < map->h; y++) {
1053 for (uint32_t x = 0; x < map->w; x++) {
1054 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1055 x + image_x + map->x,
1056 y + image_y + map->y,
1057 intel->has_swizzling);
1058 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
1059 }
1060 }
1061
1062 intel_region_unmap(intel, mt->region);
1063
1064 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
1065 map->x, map->y, map->w, map->h,
1066 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
1067 } else {
1068 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1069 map->x, map->y, map->w, map->h,
1070 mt, map->ptr, map->stride);
1071 }
1072 }
1073
1074 static void
1075 intel_miptree_unmap_s8(struct intel_context *intel,
1076 struct intel_mipmap_tree *mt,
1077 struct intel_miptree_map *map,
1078 unsigned int level,
1079 unsigned int slice)
1080 {
1081 if (map->mode & GL_MAP_WRITE_BIT) {
1082 unsigned int image_x, image_y;
1083 uint8_t *untiled_s8_map = map->ptr;
1084 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
1085
1086 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
1087
1088 for (uint32_t y = 0; y < map->h; y++) {
1089 for (uint32_t x = 0; x < map->w; x++) {
1090 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1091 x + map->x,
1092 y + map->y,
1093 intel->has_swizzling);
1094 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
1095 }
1096 }
1097
1098 intel_region_unmap(intel, mt->region);
1099 }
1100
1101 free(map->buffer);
1102 }
1103
1104 static void
1105 intel_miptree_map_etc1(struct intel_context *intel,
1106 struct intel_mipmap_tree *mt,
1107 struct intel_miptree_map *map,
1108 unsigned int level,
1109 unsigned int slice)
1110 {
1111 /* For justification of these invariants,
1112 * see intel_mipmap_tree:wraps_etc1.
1113 */
1114 assert(mt->wraps_etc1);
1115 assert(mt->format == MESA_FORMAT_RGBX8888_REV);
1116
1117 /* From the GL_OES_compressed_ETC1_RGB8_texture spec:
1118 * INVALID_OPERATION is generated by CompressedTexSubImage2D,
1119 * TexSubImage2D, or CopyTexSubImage2D if the texture image <level>
1120 * bound to <target> has internal format ETC1_RGB8_OES.
1121 *
1122 * This implies that intel_miptree_map_etc1() can only be called from
1123 * glCompressedTexImage2D, and hence the assertions below hold.
1124 */
1125 assert(map->mode & GL_MAP_WRITE_BIT);
1126 assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
1127 assert(map->x == 0);
1128 assert(map->y == 0);
1129
1130 /* Each ETC1 block contains 4x4 pixels in 8 bytes. */
1131 map->stride = 2 * map->w;
1132 map->buffer = map->ptr = malloc(map->stride * map->h);
1133 }
1134
1135 static void
1136 intel_miptree_unmap_etc1(struct intel_context *intel,
1137 struct intel_mipmap_tree *mt,
1138 struct intel_miptree_map *map,
1139 unsigned int level,
1140 unsigned int slice)
1141 {
1142 uint32_t image_x;
1143 uint32_t image_y;
1144 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
1145
1146 uint8_t *xbgr = intel_region_map(intel, mt->region, map->mode)
1147 + image_y * mt->region->pitch * mt->region->cpp
1148 + image_x * mt->region->cpp;
1149
1150 _mesa_etc1_unpack_rgba8888(xbgr, mt->region->pitch * mt->region->cpp,
1151 map->ptr, map->stride,
1152 map->w, map->h);
1153
1154 intel_region_unmap(intel, mt->region);
1155 free(map->buffer);
1156 }
1157
1158 /**
1159 * Mapping function for packed depth/stencil miptrees backed by real separate
1160 * miptrees for depth and stencil.
1161 *
1162 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
1163 * separate from the depth buffer. Yet at the GL API level, we have to expose
1164 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
1165 * be able to map that memory for texture storage and glReadPixels-type
1166 * operations. We give Mesa core that access by mallocing a temporary and
1167 * copying the data between the actual backing store and the temporary.
1168 */
1169 static void
1170 intel_miptree_map_depthstencil(struct intel_context *intel,
1171 struct intel_mipmap_tree *mt,
1172 struct intel_miptree_map *map,
1173 unsigned int level, unsigned int slice)
1174 {
1175 struct intel_mipmap_tree *z_mt = mt;
1176 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1177 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1178 int packed_bpp = map_z32f_x24s8 ? 8 : 4;
1179
1180 map->stride = map->w * packed_bpp;
1181 map->buffer = map->ptr = malloc(map->stride * map->h);
1182 if (!map->buffer)
1183 return;
1184
1185 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1186 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1187 * invalidate is set, since we'll be writing the whole rectangle from our
1188 * temporary buffer back out.
1189 */
1190 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1191 uint32_t *packed_map = map->ptr;
1192 uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
1193 uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
1194 unsigned int s_image_x, s_image_y;
1195 unsigned int z_image_x, z_image_y;
1196
1197 intel_miptree_get_image_offset(s_mt, level, 0, slice,
1198 &s_image_x, &s_image_y);
1199 intel_miptree_get_image_offset(z_mt, level, 0, slice,
1200 &z_image_x, &z_image_y);
1201
1202 for (uint32_t y = 0; y < map->h; y++) {
1203 for (uint32_t x = 0; x < map->w; x++) {
1204 int map_x = map->x + x, map_y = map->y + y;
1205 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1206 map_x + s_image_x,
1207 map_y + s_image_y,
1208 intel->has_swizzling);
1209 ptrdiff_t z_offset = ((map_y + z_image_y) * z_mt->region->pitch +
1210 (map_x + z_image_x));
1211 uint8_t s = s_map[s_offset];
1212 uint32_t z = z_map[z_offset];
1213
1214 if (map_z32f_x24s8) {
1215 packed_map[(y * map->w + x) * 2 + 0] = z;
1216 packed_map[(y * map->w + x) * 2 + 1] = s;
1217 } else {
1218 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
1219 }
1220 }
1221 }
1222
1223 intel_region_unmap(intel, s_mt->region);
1224 intel_region_unmap(intel, z_mt->region);
1225
1226 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
1227 __FUNCTION__,
1228 map->x, map->y, map->w, map->h,
1229 z_mt, map->x + z_image_x, map->y + z_image_y,
1230 s_mt, map->x + s_image_x, map->y + s_image_y,
1231 map->ptr, map->stride);
1232 } else {
1233 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1234 map->x, map->y, map->w, map->h,
1235 mt, map->ptr, map->stride);
1236 }
1237 }
1238
1239 static void
1240 intel_miptree_unmap_depthstencil(struct intel_context *intel,
1241 struct intel_mipmap_tree *mt,
1242 struct intel_miptree_map *map,
1243 unsigned int level,
1244 unsigned int slice)
1245 {
1246 struct intel_mipmap_tree *z_mt = mt;
1247 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1248 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1249
1250 if (map->mode & GL_MAP_WRITE_BIT) {
1251 uint32_t *packed_map = map->ptr;
1252 uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
1253 uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
1254 unsigned int s_image_x, s_image_y;
1255 unsigned int z_image_x, z_image_y;
1256
1257 intel_miptree_get_image_offset(s_mt, level, 0, slice,
1258 &s_image_x, &s_image_y);
1259 intel_miptree_get_image_offset(z_mt, level, 0, slice,
1260 &z_image_x, &z_image_y);
1261
1262 for (uint32_t y = 0; y < map->h; y++) {
1263 for (uint32_t x = 0; x < map->w; x++) {
1264 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1265 x + s_image_x + map->x,
1266 y + s_image_y + map->y,
1267 intel->has_swizzling);
1268 ptrdiff_t z_offset = ((y + z_image_y) * z_mt->region->pitch +
1269 (x + z_image_x));
1270
1271 if (map_z32f_x24s8) {
1272 z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
1273 s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
1274 } else {
1275 uint32_t packed = packed_map[y * map->w + x];
1276 s_map[s_offset] = packed >> 24;
1277 z_map[z_offset] = packed;
1278 }
1279 }
1280 }
1281
1282 intel_region_unmap(intel, s_mt->region);
1283 intel_region_unmap(intel, z_mt->region);
1284
1285 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1286 __FUNCTION__,
1287 map->x, map->y, map->w, map->h,
1288 z_mt, _mesa_get_format_name(z_mt->format),
1289 map->x + z_image_x, map->y + z_image_y,
1290 s_mt, map->x + s_image_x, map->y + s_image_y,
1291 map->ptr, map->stride);
1292 }
1293
1294 free(map->buffer);
1295 }
1296
1297 void
1298 intel_miptree_map(struct intel_context *intel,
1299 struct intel_mipmap_tree *mt,
1300 unsigned int level,
1301 unsigned int slice,
1302 unsigned int x,
1303 unsigned int y,
1304 unsigned int w,
1305 unsigned int h,
1306 GLbitfield mode,
1307 void **out_ptr,
1308 int *out_stride)
1309 {
1310 struct intel_miptree_map *map;
1311
1312 map = calloc(1, sizeof(struct intel_miptree_map));
1313 if (!map){
1314 *out_ptr = NULL;
1315 *out_stride = 0;
1316 return;
1317 }
1318
1319 assert(!mt->level[level].slice[slice].map);
1320 mt->level[level].slice[slice].map = map;
1321 map->mode = mode;
1322 map->x = x;
1323 map->y = y;
1324 map->w = w;
1325 map->h = h;
1326
1327 intel_miptree_slice_resolve_depth(intel, mt, level, slice);
1328 if (map->mode & GL_MAP_WRITE_BIT) {
1329 intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
1330 }
1331
1332 if (mt->format == MESA_FORMAT_S8) {
1333 intel_miptree_map_s8(intel, mt, map, level, slice);
1334 } else if (mt->wraps_etc1) {
1335 intel_miptree_map_etc1(intel, mt, map, level, slice);
1336 } else if (mt->stencil_mt) {
1337 intel_miptree_map_depthstencil(intel, mt, map, level, slice);
1338 } else if (intel->has_llc &&
1339 !(mode & GL_MAP_WRITE_BIT) &&
1340 !mt->compressed &&
1341 mt->region->tiling == I915_TILING_X) {
1342 intel_miptree_map_blit(intel, mt, map, level, slice);
1343 } else {
1344 intel_miptree_map_gtt(intel, mt, map, level, slice);
1345 }
1346
1347 *out_ptr = map->ptr;
1348 *out_stride = map->stride;
1349
1350 if (map->ptr == NULL) {
1351 mt->level[level].slice[slice].map = NULL;
1352 free(map);
1353 }
1354 }
1355
1356 void
1357 intel_miptree_unmap(struct intel_context *intel,
1358 struct intel_mipmap_tree *mt,
1359 unsigned int level,
1360 unsigned int slice)
1361 {
1362 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1363
1364 if (!map)
1365 return;
1366
1367 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
1368 mt, _mesa_get_format_name(mt->format), level, slice);
1369
1370 if (mt->format == MESA_FORMAT_S8) {
1371 intel_miptree_unmap_s8(intel, mt, map, level, slice);
1372 } else if (mt->wraps_etc1) {
1373 intel_miptree_unmap_etc1(intel, mt, map, level, slice);
1374 } else if (mt->stencil_mt) {
1375 intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
1376 } else if (map->bo) {
1377 intel_miptree_unmap_blit(intel, mt, map, level, slice);
1378 } else {
1379 intel_miptree_unmap_gtt(intel, mt, map, level, slice);
1380 }
1381
1382 mt->level[level].slice[slice].map = NULL;
1383 free(map);
1384 }