i965/msaa: Treat GL_SAMPLES=1 as equivalent to GL_SAMPLES=0.
[mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "intel_batchbuffer.h"
29 #include "intel_context.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_regions.h"
32 #include "intel_resolve_map.h"
33 #include "intel_span.h"
34 #include "intel_tex_layout.h"
35 #include "intel_tex.h"
36 #include "intel_blit.h"
37
38 #include "main/enums.h"
39 #include "main/formats.h"
40 #include "main/glformats.h"
41 #include "main/texcompress_etc.h"
42 #include "main/teximage.h"
43
44 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
45
46 static GLenum
47 target_to_target(GLenum target)
48 {
49 switch (target) {
50 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
51 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
52 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
53 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
54 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
55 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
56 return GL_TEXTURE_CUBE_MAP_ARB;
57 default:
58 return target;
59 }
60 }
61
62 /**
63 * @param for_region Indicates that the caller is
64 * intel_miptree_create_for_region(). If true, then do not create
65 * \c stencil_mt.
66 */
67 static struct intel_mipmap_tree *
68 intel_miptree_create_internal(struct intel_context *intel,
69 GLenum target,
70 gl_format format,
71 GLuint first_level,
72 GLuint last_level,
73 GLuint width0,
74 GLuint height0,
75 GLuint depth0,
76 bool for_region,
77 GLuint num_samples,
78 enum intel_msaa_layout msaa_layout)
79 {
80 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
81 int compress_byte = 0;
82
83 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
84 _mesa_lookup_enum_by_nr(target),
85 _mesa_get_format_name(format),
86 first_level, last_level, mt);
87
88 if (_mesa_is_format_compressed(format))
89 compress_byte = intel_compressed_num_bytes(format);
90
91 mt->target = target_to_target(target);
92 mt->format = format;
93 mt->first_level = first_level;
94 mt->last_level = last_level;
95 mt->width0 = width0;
96 mt->height0 = height0;
97 mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
98 mt->num_samples = num_samples;
99 mt->compressed = compress_byte ? 1 : 0;
100 mt->msaa_layout = msaa_layout;
101 mt->refcount = 1;
102
103 /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we
104 * use it elsewhere?
105 */
106 switch (msaa_layout) {
107 case INTEL_MSAA_LAYOUT_NONE:
108 case INTEL_MSAA_LAYOUT_IMS:
109 mt->array_spacing_lod0 = false;
110 break;
111 case INTEL_MSAA_LAYOUT_UMS:
112 case INTEL_MSAA_LAYOUT_CMS:
113 mt->array_spacing_lod0 = true;
114 break;
115 }
116
117 if (target == GL_TEXTURE_CUBE_MAP) {
118 assert(depth0 == 1);
119 mt->depth0 = 6;
120 } else {
121 mt->depth0 = depth0;
122 }
123
124 if (!for_region &&
125 _mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
126 (intel->must_use_separate_stencil ||
127 (intel->has_separate_stencil &&
128 intel->vtbl.is_hiz_depth_format(intel, format)))) {
129 /* MSAA stencil surfaces always use IMS layout. */
130 enum intel_msaa_layout msaa_layout =
131 num_samples > 1 ? INTEL_MSAA_LAYOUT_IMS : INTEL_MSAA_LAYOUT_NONE;
132 mt->stencil_mt = intel_miptree_create(intel,
133 mt->target,
134 MESA_FORMAT_S8,
135 mt->first_level,
136 mt->last_level,
137 mt->width0,
138 mt->height0,
139 mt->depth0,
140 true,
141 num_samples,
142 msaa_layout);
143 if (!mt->stencil_mt) {
144 intel_miptree_release(&mt);
145 return NULL;
146 }
147
148 /* Fix up the Z miptree format for how we're splitting out separate
149 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
150 */
151 if (mt->format == MESA_FORMAT_S8_Z24) {
152 mt->format = MESA_FORMAT_X8_Z24;
153 } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
154 mt->format = MESA_FORMAT_Z32_FLOAT;
155 mt->cpp = 4;
156 } else {
157 _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
158 _mesa_get_format_name(mt->format));
159 }
160 }
161
162 intel_get_texture_alignment_unit(intel, mt->format,
163 &mt->align_w, &mt->align_h);
164
165 #ifdef I915
166 (void) intel;
167 if (intel->is_945)
168 i945_miptree_layout(mt);
169 else
170 i915_miptree_layout(mt);
171 #else
172 brw_miptree_layout(intel, mt);
173 #endif
174
175 return mt;
176 }
177
178
179 struct intel_mipmap_tree *
180 intel_miptree_create(struct intel_context *intel,
181 GLenum target,
182 gl_format format,
183 GLuint first_level,
184 GLuint last_level,
185 GLuint width0,
186 GLuint height0,
187 GLuint depth0,
188 bool expect_accelerated_upload,
189 GLuint num_samples,
190 enum intel_msaa_layout msaa_layout)
191 {
192 struct intel_mipmap_tree *mt;
193 uint32_t tiling = I915_TILING_NONE;
194 GLenum base_format;
195 bool wraps_etc1 = false;
196
197 if (format == MESA_FORMAT_ETC1_RGB8) {
198 format = MESA_FORMAT_RGBX8888_REV;
199 wraps_etc1 = true;
200 }
201
202 base_format = _mesa_get_format_base_format(format);
203
204 if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
205 if (intel->gen >= 4 &&
206 (base_format == GL_DEPTH_COMPONENT ||
207 base_format == GL_DEPTH_STENCIL_EXT))
208 tiling = I915_TILING_Y;
209 else if (msaa_layout != INTEL_MSAA_LAYOUT_NONE) {
210 /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
211 * Surface"):
212 *
213 * [DevSNB+]: For multi-sample render targets, this field must be
214 * 1. MSRTs can only be tiled.
215 *
216 * Our usual reason for preferring X tiling (fast blits using the
217 * blitting engine) doesn't apply to MSAA, since we'll generally be
218 * downsampling or upsampling when blitting between the MSAA buffer
219 * and another buffer, and the blitting engine doesn't support that.
220 * So use Y tiling, since it makes better use of the cache.
221 */
222 tiling = I915_TILING_Y;
223 } else if (width0 >= 64)
224 tiling = I915_TILING_X;
225 }
226
227 if (format == MESA_FORMAT_S8) {
228 /* The stencil buffer is W tiled. However, we request from the kernel a
229 * non-tiled buffer because the GTT is incapable of W fencing. So round
230 * up the width and height to match the size of W tiles (64x64).
231 */
232 tiling = I915_TILING_NONE;
233 width0 = ALIGN(width0, 64);
234 height0 = ALIGN(height0, 64);
235 }
236
237 mt = intel_miptree_create_internal(intel, target, format,
238 first_level, last_level, width0,
239 height0, depth0,
240 false, num_samples, msaa_layout);
241 /*
242 * pitch == 0 || height == 0 indicates the null texture
243 */
244 if (!mt || !mt->total_width || !mt->total_height) {
245 intel_miptree_release(&mt);
246 return NULL;
247 }
248
249 mt->wraps_etc1 = wraps_etc1;
250 mt->region = intel_region_alloc(intel->intelScreen,
251 tiling,
252 mt->cpp,
253 mt->total_width,
254 mt->total_height,
255 expect_accelerated_upload);
256 mt->offset = 0;
257
258 if (!mt->region) {
259 intel_miptree_release(&mt);
260 return NULL;
261 }
262
263 return mt;
264 }
265
266
267 struct intel_mipmap_tree *
268 intel_miptree_create_for_region(struct intel_context *intel,
269 GLenum target,
270 gl_format format,
271 struct intel_region *region)
272 {
273 struct intel_mipmap_tree *mt;
274
275 mt = intel_miptree_create_internal(intel, target, format,
276 0, 0,
277 region->width, region->height, 1,
278 true, 0 /* num_samples */,
279 INTEL_MSAA_LAYOUT_NONE);
280 if (!mt)
281 return mt;
282
283 intel_region_reference(&mt->region, region);
284
285 return mt;
286 }
287
288 /**
289 * Determine which MSAA layout should be used by the MSAA surface being
290 * created, based on the chip generation and the surface type.
291 */
292 static enum intel_msaa_layout
293 compute_msaa_layout(struct intel_context *intel, gl_format format)
294 {
295 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
296 if (intel->gen < 7)
297 return INTEL_MSAA_LAYOUT_IMS;
298
299 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
300 switch (_mesa_get_format_base_format(format)) {
301 case GL_DEPTH_COMPONENT:
302 case GL_STENCIL_INDEX:
303 case GL_DEPTH_STENCIL:
304 return INTEL_MSAA_LAYOUT_IMS;
305 default:
306 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
307 *
308 * This field must be set to 0 for all SINT MSRTs when all RT channels
309 * are not written
310 *
311 * In practice this means that we have to disable MCS for all signed
312 * integer MSAA buffers. The alternative, to disable MCS only when one
313 * of the render target channels is disabled, is impractical because it
314 * would require converting between CMS and UMS MSAA layouts on the fly,
315 * which is expensive.
316 */
317 if (_mesa_get_format_datatype(format) == GL_INT) {
318 /* TODO: is this workaround needed for future chipsets? */
319 assert(intel->gen == 7);
320 return INTEL_MSAA_LAYOUT_UMS;
321 } else {
322 return INTEL_MSAA_LAYOUT_CMS;
323 }
324 }
325 }
326
327 struct intel_mipmap_tree*
328 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
329 gl_format format,
330 uint32_t width,
331 uint32_t height,
332 uint32_t num_samples)
333 {
334 struct intel_mipmap_tree *mt;
335 uint32_t depth = 1;
336 enum intel_msaa_layout msaa_layout = INTEL_MSAA_LAYOUT_NONE;
337
338 if (num_samples > 1) {
339 /* Adjust width/height/depth for MSAA */
340 msaa_layout = compute_msaa_layout(intel, format);
341 if (msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
342 /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
343 *
344 * "Any of the other messages (sample*, LOD, load4) used with a
345 * (4x) multisampled surface will in-effect sample a surface with
346 * double the height and width as that indicated in the surface
347 * state. Each pixel position on the original-sized surface is
348 * replaced with a 2x2 of samples with the following arrangement:
349 *
350 * sample 0 sample 2
351 * sample 1 sample 3"
352 *
353 * Thus, when sampling from a multisampled texture, it behaves as
354 * though the layout in memory for (x,y,sample) is:
355 *
356 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
357 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
358 *
359 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
360 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
361 *
362 * However, the actual layout of multisampled data in memory is:
363 *
364 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
365 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
366 *
367 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
368 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
369 *
370 * This pattern repeats for each 2x2 pixel block.
371 *
372 * As a result, when calculating the size of our 4-sample buffer for
373 * an odd width or height, we have to align before scaling up because
374 * sample 3 is in that bottom right 2x2 block.
375 */
376 switch (num_samples) {
377 case 4:
378 width = ALIGN(width, 2) * 2;
379 height = ALIGN(height, 2) * 2;
380 break;
381 case 8:
382 width = ALIGN(width, 2) * 4;
383 height = ALIGN(height, 2) * 2;
384 break;
385 default:
386 /* num_samples should already have been quantized to 0, 1, 4, or
387 * 8.
388 */
389 assert(false);
390 }
391 } else {
392 /* Non-interleaved */
393 depth = num_samples;
394 }
395 }
396
397 mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
398 width, height, depth, true, num_samples,
399 msaa_layout);
400
401 return mt;
402 }
403
404 void
405 intel_miptree_reference(struct intel_mipmap_tree **dst,
406 struct intel_mipmap_tree *src)
407 {
408 if (*dst == src)
409 return;
410
411 intel_miptree_release(dst);
412
413 if (src) {
414 src->refcount++;
415 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
416 }
417
418 *dst = src;
419 }
420
421
422 void
423 intel_miptree_release(struct intel_mipmap_tree **mt)
424 {
425 if (!*mt)
426 return;
427
428 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
429 if (--(*mt)->refcount <= 0) {
430 GLuint i;
431
432 DBG("%s deleting %p\n", __FUNCTION__, *mt);
433
434 intel_region_release(&((*mt)->region));
435 intel_miptree_release(&(*mt)->stencil_mt);
436 intel_miptree_release(&(*mt)->hiz_mt);
437 intel_miptree_release(&(*mt)->mcs_mt);
438 intel_resolve_map_clear(&(*mt)->hiz_map);
439
440 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
441 free((*mt)->level[i].slice);
442 }
443
444 free(*mt);
445 }
446 *mt = NULL;
447 }
448
449 void
450 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
451 int *width, int *height, int *depth)
452 {
453 switch (image->TexObject->Target) {
454 case GL_TEXTURE_1D_ARRAY:
455 *width = image->Width;
456 *height = 1;
457 *depth = image->Height;
458 break;
459 default:
460 *width = image->Width;
461 *height = image->Height;
462 *depth = image->Depth;
463 break;
464 }
465 }
466
467 /**
468 * Can the image be pulled into a unified mipmap tree? This mirrors
469 * the completeness test in a lot of ways.
470 *
471 * Not sure whether I want to pass gl_texture_image here.
472 */
473 bool
474 intel_miptree_match_image(struct intel_mipmap_tree *mt,
475 struct gl_texture_image *image)
476 {
477 struct intel_texture_image *intelImage = intel_texture_image(image);
478 GLuint level = intelImage->base.Base.Level;
479 int width, height, depth;
480
481 if (target_to_target(image->TexObject->Target) != mt->target)
482 return false;
483
484 if (image->TexFormat != mt->format &&
485 !(image->TexFormat == MESA_FORMAT_S8_Z24 &&
486 mt->format == MESA_FORMAT_X8_Z24 &&
487 mt->stencil_mt)) {
488 return false;
489 }
490
491 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
492
493 if (mt->target == GL_TEXTURE_CUBE_MAP)
494 depth = 6;
495
496 /* Test image dimensions against the base level image adjusted for
497 * minification. This will also catch images not present in the
498 * tree, changed targets, etc.
499 */
500 if (width != mt->level[level].width ||
501 height != mt->level[level].height ||
502 depth != mt->level[level].depth)
503 return false;
504
505 return true;
506 }
507
508
509 void
510 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
511 GLuint level,
512 GLuint x, GLuint y,
513 GLuint w, GLuint h, GLuint d)
514 {
515 mt->level[level].width = w;
516 mt->level[level].height = h;
517 mt->level[level].depth = d;
518 mt->level[level].level_x = x;
519 mt->level[level].level_y = y;
520
521 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
522 level, w, h, d, x, y);
523
524 assert(mt->level[level].slice == NULL);
525
526 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
527 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
528 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
529 }
530
531
532 void
533 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
534 GLuint level, GLuint img,
535 GLuint x, GLuint y)
536 {
537 if (img == 0 && level == 0)
538 assert(x == 0 && y == 0);
539
540 assert(img < mt->level[level].depth);
541
542 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
543 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
544
545 DBG("%s level %d img %d pos %d,%d\n",
546 __FUNCTION__, level, img,
547 mt->level[level].slice[img].x_offset,
548 mt->level[level].slice[img].y_offset);
549 }
550
551
552 /**
553 * For cube map textures, either the \c face parameter can be used, of course,
554 * or the cube face can be interpreted as a depth layer and the \c layer
555 * parameter used.
556 */
557 void
558 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
559 GLuint level, GLuint face, GLuint layer,
560 GLuint *x, GLuint *y)
561 {
562 int slice;
563
564 if (face > 0) {
565 assert(mt->target == GL_TEXTURE_CUBE_MAP);
566 assert(face < 6);
567 assert(layer == 0);
568 slice = face;
569 } else {
570 /* This branch may be taken even if the texture target is a cube map. In
571 * that case, the caller chose to interpret each cube face as a layer.
572 */
573 assert(face == 0);
574 slice = layer;
575 }
576
577 *x = mt->level[level].slice[slice].x_offset;
578 *y = mt->level[level].slice[slice].y_offset;
579 }
580
581 static void
582 intel_miptree_copy_slice(struct intel_context *intel,
583 struct intel_mipmap_tree *dst_mt,
584 struct intel_mipmap_tree *src_mt,
585 int level,
586 int face,
587 int depth)
588
589 {
590 gl_format format = src_mt->format;
591 uint32_t width = src_mt->level[level].width;
592 uint32_t height = src_mt->level[level].height;
593
594 assert(depth < src_mt->level[level].depth);
595
596 if (dst_mt->compressed) {
597 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
598 width = ALIGN(width, dst_mt->align_w);
599 }
600
601 uint32_t dst_x, dst_y, src_x, src_y;
602 intel_miptree_get_image_offset(dst_mt, level, face, depth,
603 &dst_x, &dst_y);
604 intel_miptree_get_image_offset(src_mt, level, face, depth,
605 &src_x, &src_y);
606
607 DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
608 src_mt, src_x, src_y, src_mt->region->pitch * src_mt->region->cpp,
609 dst_mt, dst_x, dst_y, dst_mt->region->pitch * dst_mt->region->cpp,
610 width, height);
611
612 if (!intelEmitCopyBlit(intel,
613 dst_mt->region->cpp,
614 src_mt->region->pitch, src_mt->region->bo,
615 0, src_mt->region->tiling,
616 dst_mt->region->pitch, dst_mt->region->bo,
617 0, dst_mt->region->tiling,
618 src_x, src_y,
619 dst_x, dst_y,
620 width, height,
621 GL_COPY)) {
622
623 fallback_debug("miptree validate blit for %s failed\n",
624 _mesa_get_format_name(format));
625 void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
626 void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
627
628 _mesa_copy_rect(dst,
629 dst_mt->cpp,
630 dst_mt->region->pitch,
631 dst_x, dst_y,
632 width, height,
633 src, src_mt->region->pitch,
634 src_x, src_y);
635
636 intel_region_unmap(intel, dst_mt->region);
637 intel_region_unmap(intel, src_mt->region);
638 }
639
640 if (src_mt->stencil_mt) {
641 intel_miptree_copy_slice(intel,
642 dst_mt->stencil_mt, src_mt->stencil_mt,
643 level, face, depth);
644 }
645 }
646
647 /**
648 * Copies the image's current data to the given miptree, and associates that
649 * miptree with the image.
650 */
651 void
652 intel_miptree_copy_teximage(struct intel_context *intel,
653 struct intel_texture_image *intelImage,
654 struct intel_mipmap_tree *dst_mt)
655 {
656 struct intel_mipmap_tree *src_mt = intelImage->mt;
657 int level = intelImage->base.Base.Level;
658 int face = intelImage->base.Base.Face;
659 GLuint depth = intelImage->base.Base.Depth;
660
661 for (int slice = 0; slice < depth; slice++) {
662 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
663 }
664
665 intel_miptree_reference(&intelImage->mt, dst_mt);
666 }
667
668 bool
669 intel_miptree_alloc_mcs(struct intel_context *intel,
670 struct intel_mipmap_tree *mt,
671 GLuint num_samples)
672 {
673 assert(mt->mcs_mt == NULL);
674 assert(intel->gen >= 7); /* MCS only used on Gen7+ */
675
676 /* Choose the correct format for the MCS buffer. All that really matters
677 * is that we allocate the right buffer size, since we'll always be
678 * accessing this miptree using MCS-specific hardware mechanisms, which
679 * infer the correct format based on num_samples.
680 */
681 gl_format format;
682 switch (num_samples) {
683 case 4:
684 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
685 * each sample).
686 */
687 format = MESA_FORMAT_R8;
688 break;
689 case 8:
690 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
691 * for each sample, plus 8 padding bits).
692 */
693 format = MESA_FORMAT_R_UINT32;
694 break;
695 default:
696 assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
697 break;
698 };
699
700 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
701 *
702 * "The MCS surface must be stored as Tile Y."
703 *
704 * We set msaa_format to INTEL_MSAA_LAYOUT_CMS to force
705 * intel_miptree_create() to use Y tiling. msaa_format is otherwise
706 * ignored for the MCS miptree.
707 */
708 mt->mcs_mt = intel_miptree_create(intel,
709 mt->target,
710 format,
711 mt->first_level,
712 mt->last_level,
713 mt->width0,
714 mt->height0,
715 mt->depth0,
716 true,
717 0 /* num_samples */,
718 INTEL_MSAA_LAYOUT_CMS);
719
720 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
721 *
722 * When MCS buffer is enabled and bound to MSRT, it is required that it
723 * is cleared prior to any rendering.
724 *
725 * Since we don't use the MCS buffer for any purpose other than rendering,
726 * it makes sense to just clear it immediately upon allocation.
727 *
728 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
729 */
730 void *data = intel_region_map(intel, mt->mcs_mt->region, 0);
731 memset(data, 0xff, mt->mcs_mt->region->bo->size);
732 intel_region_unmap(intel, mt->mcs_mt->region);
733
734 return mt->mcs_mt;
735 }
736
737 bool
738 intel_miptree_alloc_hiz(struct intel_context *intel,
739 struct intel_mipmap_tree *mt,
740 GLuint num_samples)
741 {
742 assert(mt->hiz_mt == NULL);
743 /* MSAA HiZ surfaces always use IMS layout. */
744 mt->hiz_mt = intel_miptree_create(intel,
745 mt->target,
746 MESA_FORMAT_X8_Z24,
747 mt->first_level,
748 mt->last_level,
749 mt->width0,
750 mt->height0,
751 mt->depth0,
752 true,
753 num_samples,
754 INTEL_MSAA_LAYOUT_IMS);
755
756 if (!mt->hiz_mt)
757 return false;
758
759 /* Mark that all slices need a HiZ resolve. */
760 struct intel_resolve_map *head = &mt->hiz_map;
761 for (int level = mt->first_level; level <= mt->last_level; ++level) {
762 for (int layer = 0; layer < mt->level[level].depth; ++layer) {
763 head->next = malloc(sizeof(*head->next));
764 head->next->prev = head;
765 head->next->next = NULL;
766 head = head->next;
767
768 head->level = level;
769 head->layer = layer;
770 head->need = GEN6_HIZ_OP_HIZ_RESOLVE;
771 }
772 }
773
774 return true;
775 }
776
777 void
778 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
779 uint32_t level,
780 uint32_t layer)
781 {
782 intel_miptree_check_level_layer(mt, level, layer);
783
784 if (!mt->hiz_mt)
785 return;
786
787 intel_resolve_map_set(&mt->hiz_map,
788 level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
789 }
790
791
792 void
793 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
794 uint32_t level,
795 uint32_t layer)
796 {
797 intel_miptree_check_level_layer(mt, level, layer);
798
799 if (!mt->hiz_mt)
800 return;
801
802 intel_resolve_map_set(&mt->hiz_map,
803 level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
804 }
805
806 static bool
807 intel_miptree_slice_resolve(struct intel_context *intel,
808 struct intel_mipmap_tree *mt,
809 uint32_t level,
810 uint32_t layer,
811 enum gen6_hiz_op need)
812 {
813 intel_miptree_check_level_layer(mt, level, layer);
814
815 struct intel_resolve_map *item =
816 intel_resolve_map_get(&mt->hiz_map, level, layer);
817
818 if (!item || item->need != need)
819 return false;
820
821 intel_hiz_exec(intel, mt, level, layer, need);
822 intel_resolve_map_remove(item);
823 return true;
824 }
825
826 bool
827 intel_miptree_slice_resolve_hiz(struct intel_context *intel,
828 struct intel_mipmap_tree *mt,
829 uint32_t level,
830 uint32_t layer)
831 {
832 return intel_miptree_slice_resolve(intel, mt, level, layer,
833 GEN6_HIZ_OP_HIZ_RESOLVE);
834 }
835
836 bool
837 intel_miptree_slice_resolve_depth(struct intel_context *intel,
838 struct intel_mipmap_tree *mt,
839 uint32_t level,
840 uint32_t layer)
841 {
842 return intel_miptree_slice_resolve(intel, mt, level, layer,
843 GEN6_HIZ_OP_DEPTH_RESOLVE);
844 }
845
846 static bool
847 intel_miptree_all_slices_resolve(struct intel_context *intel,
848 struct intel_mipmap_tree *mt,
849 enum gen6_hiz_op need)
850 {
851 bool did_resolve = false;
852 struct intel_resolve_map *i, *next;
853
854 for (i = mt->hiz_map.next; i; i = next) {
855 next = i->next;
856 if (i->need != need)
857 continue;
858
859 intel_hiz_exec(intel, mt, i->level, i->layer, need);
860 intel_resolve_map_remove(i);
861 did_resolve = true;
862 }
863
864 return did_resolve;
865 }
866
867 bool
868 intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
869 struct intel_mipmap_tree *mt)
870 {
871 return intel_miptree_all_slices_resolve(intel, mt,
872 GEN6_HIZ_OP_HIZ_RESOLVE);
873 }
874
875 bool
876 intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
877 struct intel_mipmap_tree *mt)
878 {
879 return intel_miptree_all_slices_resolve(intel, mt,
880 GEN6_HIZ_OP_DEPTH_RESOLVE);
881 }
882
883 static void
884 intel_miptree_map_gtt(struct intel_context *intel,
885 struct intel_mipmap_tree *mt,
886 struct intel_miptree_map *map,
887 unsigned int level, unsigned int slice)
888 {
889 unsigned int bw, bh;
890 void *base;
891 unsigned int image_x, image_y;
892 int x = map->x;
893 int y = map->y;
894
895 /* For compressed formats, the stride is the number of bytes per
896 * row of blocks. intel_miptree_get_image_offset() already does
897 * the divide.
898 */
899 _mesa_get_format_block_size(mt->format, &bw, &bh);
900 assert(y % bh == 0);
901 y /= bh;
902
903 base = intel_region_map(intel, mt->region, map->mode);
904
905 if (base == NULL)
906 map->ptr = NULL;
907 else {
908 /* Note that in the case of cube maps, the caller must have passed the
909 * slice number referencing the face.
910 */
911 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
912 x += image_x;
913 y += image_y;
914
915 map->stride = mt->region->pitch * mt->cpp;
916 map->ptr = base + y * map->stride + x * mt->cpp;
917 }
918
919 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
920 map->x, map->y, map->w, map->h,
921 mt, _mesa_get_format_name(mt->format),
922 x, y, map->ptr, map->stride);
923 }
924
925 static void
926 intel_miptree_unmap_gtt(struct intel_context *intel,
927 struct intel_mipmap_tree *mt,
928 struct intel_miptree_map *map,
929 unsigned int level,
930 unsigned int slice)
931 {
932 intel_region_unmap(intel, mt->region);
933 }
934
935 static void
936 intel_miptree_map_blit(struct intel_context *intel,
937 struct intel_mipmap_tree *mt,
938 struct intel_miptree_map *map,
939 unsigned int level, unsigned int slice)
940 {
941 unsigned int image_x, image_y;
942 int x = map->x;
943 int y = map->y;
944 int ret;
945
946 /* The blitter requires the pitch to be aligned to 4. */
947 map->stride = ALIGN(map->w * mt->region->cpp, 4);
948
949 map->bo = drm_intel_bo_alloc(intel->bufmgr, "intel_miptree_map_blit() temp",
950 map->stride * map->h, 4096);
951 if (!map->bo) {
952 fprintf(stderr, "Failed to allocate blit temporary\n");
953 goto fail;
954 }
955
956 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
957 x += image_x;
958 y += image_y;
959
960 if (!intelEmitCopyBlit(intel,
961 mt->region->cpp,
962 mt->region->pitch, mt->region->bo,
963 0, mt->region->tiling,
964 map->stride / mt->region->cpp, map->bo,
965 0, I915_TILING_NONE,
966 x, y,
967 0, 0,
968 map->w, map->h,
969 GL_COPY)) {
970 fprintf(stderr, "Failed to blit\n");
971 goto fail;
972 }
973
974 intel_batchbuffer_flush(intel);
975 ret = drm_intel_bo_map(map->bo, (map->mode & GL_MAP_WRITE_BIT) != 0);
976 if (ret) {
977 fprintf(stderr, "Failed to map blit temporary\n");
978 goto fail;
979 }
980
981 map->ptr = map->bo->virtual;
982
983 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
984 map->x, map->y, map->w, map->h,
985 mt, _mesa_get_format_name(mt->format),
986 x, y, map->ptr, map->stride);
987
988 return;
989
990 fail:
991 drm_intel_bo_unreference(map->bo);
992 map->ptr = NULL;
993 map->stride = 0;
994 }
995
996 static void
997 intel_miptree_unmap_blit(struct intel_context *intel,
998 struct intel_mipmap_tree *mt,
999 struct intel_miptree_map *map,
1000 unsigned int level,
1001 unsigned int slice)
1002 {
1003 assert(!(map->mode & GL_MAP_WRITE_BIT));
1004
1005 drm_intel_bo_unmap(map->bo);
1006 drm_intel_bo_unreference(map->bo);
1007 }
1008
1009 static void
1010 intel_miptree_map_s8(struct intel_context *intel,
1011 struct intel_mipmap_tree *mt,
1012 struct intel_miptree_map *map,
1013 unsigned int level, unsigned int slice)
1014 {
1015 map->stride = map->w;
1016 map->buffer = map->ptr = malloc(map->stride * map->h);
1017 if (!map->buffer)
1018 return;
1019
1020 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1021 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1022 * invalidate is set, since we'll be writing the whole rectangle from our
1023 * temporary buffer back out.
1024 */
1025 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1026 uint8_t *untiled_s8_map = map->ptr;
1027 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
1028 GL_MAP_READ_BIT);
1029 unsigned int image_x, image_y;
1030
1031 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
1032
1033 for (uint32_t y = 0; y < map->h; y++) {
1034 for (uint32_t x = 0; x < map->w; x++) {
1035 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1036 x + image_x + map->x,
1037 y + image_y + map->y,
1038 intel->has_swizzling);
1039 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
1040 }
1041 }
1042
1043 intel_region_unmap(intel, mt->region);
1044
1045 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
1046 map->x, map->y, map->w, map->h,
1047 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
1048 } else {
1049 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1050 map->x, map->y, map->w, map->h,
1051 mt, map->ptr, map->stride);
1052 }
1053 }
1054
1055 static void
1056 intel_miptree_unmap_s8(struct intel_context *intel,
1057 struct intel_mipmap_tree *mt,
1058 struct intel_miptree_map *map,
1059 unsigned int level,
1060 unsigned int slice)
1061 {
1062 if (map->mode & GL_MAP_WRITE_BIT) {
1063 unsigned int image_x, image_y;
1064 uint8_t *untiled_s8_map = map->ptr;
1065 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
1066
1067 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
1068
1069 for (uint32_t y = 0; y < map->h; y++) {
1070 for (uint32_t x = 0; x < map->w; x++) {
1071 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1072 x + map->x,
1073 y + map->y,
1074 intel->has_swizzling);
1075 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
1076 }
1077 }
1078
1079 intel_region_unmap(intel, mt->region);
1080 }
1081
1082 free(map->buffer);
1083 }
1084
1085 static void
1086 intel_miptree_map_etc1(struct intel_context *intel,
1087 struct intel_mipmap_tree *mt,
1088 struct intel_miptree_map *map,
1089 unsigned int level,
1090 unsigned int slice)
1091 {
1092 /* For justification of these invariants,
1093 * see intel_mipmap_tree:wraps_etc1.
1094 */
1095 assert(mt->wraps_etc1);
1096 assert(mt->format == MESA_FORMAT_RGBX8888_REV);
1097
1098 /* From the GL_OES_compressed_ETC1_RGB8_texture spec:
1099 * INVALID_OPERATION is generated by CompressedTexSubImage2D,
1100 * TexSubImage2D, or CopyTexSubImage2D if the texture image <level>
1101 * bound to <target> has internal format ETC1_RGB8_OES.
1102 *
1103 * This implies that intel_miptree_map_etc1() can only be called from
1104 * glCompressedTexImage2D, and hence the assertions below hold.
1105 */
1106 assert(map->mode & GL_MAP_WRITE_BIT);
1107 assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
1108 assert(map->x == 0);
1109 assert(map->y == 0);
1110
1111 /* Each ETC1 block contains 4x4 pixels in 8 bytes. */
1112 map->stride = 2 * map->w;
1113 map->buffer = map->ptr = malloc(map->stride * map->h);
1114 }
1115
1116 static void
1117 intel_miptree_unmap_etc1(struct intel_context *intel,
1118 struct intel_mipmap_tree *mt,
1119 struct intel_miptree_map *map,
1120 unsigned int level,
1121 unsigned int slice)
1122 {
1123 uint32_t image_x;
1124 uint32_t image_y;
1125 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
1126
1127 uint8_t *xbgr = intel_region_map(intel, mt->region, map->mode)
1128 + image_y * mt->region->pitch * mt->region->cpp
1129 + image_x * mt->region->cpp;
1130
1131 _mesa_etc1_unpack_rgba8888(xbgr, mt->region->pitch * mt->region->cpp,
1132 map->ptr, map->stride,
1133 map->w, map->h);
1134
1135 intel_region_unmap(intel, mt->region);
1136 free(map->buffer);
1137 }
1138
1139 /**
1140 * Mapping function for packed depth/stencil miptrees backed by real separate
1141 * miptrees for depth and stencil.
1142 *
1143 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
1144 * separate from the depth buffer. Yet at the GL API level, we have to expose
1145 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
1146 * be able to map that memory for texture storage and glReadPixels-type
1147 * operations. We give Mesa core that access by mallocing a temporary and
1148 * copying the data between the actual backing store and the temporary.
1149 */
1150 static void
1151 intel_miptree_map_depthstencil(struct intel_context *intel,
1152 struct intel_mipmap_tree *mt,
1153 struct intel_miptree_map *map,
1154 unsigned int level, unsigned int slice)
1155 {
1156 struct intel_mipmap_tree *z_mt = mt;
1157 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1158 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1159 int packed_bpp = map_z32f_x24s8 ? 8 : 4;
1160
1161 map->stride = map->w * packed_bpp;
1162 map->buffer = map->ptr = malloc(map->stride * map->h);
1163 if (!map->buffer)
1164 return;
1165
1166 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1167 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1168 * invalidate is set, since we'll be writing the whole rectangle from our
1169 * temporary buffer back out.
1170 */
1171 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1172 uint32_t *packed_map = map->ptr;
1173 uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
1174 uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
1175 unsigned int s_image_x, s_image_y;
1176 unsigned int z_image_x, z_image_y;
1177
1178 intel_miptree_get_image_offset(s_mt, level, 0, slice,
1179 &s_image_x, &s_image_y);
1180 intel_miptree_get_image_offset(z_mt, level, 0, slice,
1181 &z_image_x, &z_image_y);
1182
1183 for (uint32_t y = 0; y < map->h; y++) {
1184 for (uint32_t x = 0; x < map->w; x++) {
1185 int map_x = map->x + x, map_y = map->y + y;
1186 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1187 map_x + s_image_x,
1188 map_y + s_image_y,
1189 intel->has_swizzling);
1190 ptrdiff_t z_offset = ((map_y + z_image_y) * z_mt->region->pitch +
1191 (map_x + z_image_x));
1192 uint8_t s = s_map[s_offset];
1193 uint32_t z = z_map[z_offset];
1194
1195 if (map_z32f_x24s8) {
1196 packed_map[(y * map->w + x) * 2 + 0] = z;
1197 packed_map[(y * map->w + x) * 2 + 1] = s;
1198 } else {
1199 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
1200 }
1201 }
1202 }
1203
1204 intel_region_unmap(intel, s_mt->region);
1205 intel_region_unmap(intel, z_mt->region);
1206
1207 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
1208 __FUNCTION__,
1209 map->x, map->y, map->w, map->h,
1210 z_mt, map->x + z_image_x, map->y + z_image_y,
1211 s_mt, map->x + s_image_x, map->y + s_image_y,
1212 map->ptr, map->stride);
1213 } else {
1214 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1215 map->x, map->y, map->w, map->h,
1216 mt, map->ptr, map->stride);
1217 }
1218 }
1219
1220 static void
1221 intel_miptree_unmap_depthstencil(struct intel_context *intel,
1222 struct intel_mipmap_tree *mt,
1223 struct intel_miptree_map *map,
1224 unsigned int level,
1225 unsigned int slice)
1226 {
1227 struct intel_mipmap_tree *z_mt = mt;
1228 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1229 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1230
1231 if (map->mode & GL_MAP_WRITE_BIT) {
1232 uint32_t *packed_map = map->ptr;
1233 uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
1234 uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
1235 unsigned int s_image_x, s_image_y;
1236 unsigned int z_image_x, z_image_y;
1237
1238 intel_miptree_get_image_offset(s_mt, level, 0, slice,
1239 &s_image_x, &s_image_y);
1240 intel_miptree_get_image_offset(z_mt, level, 0, slice,
1241 &z_image_x, &z_image_y);
1242
1243 for (uint32_t y = 0; y < map->h; y++) {
1244 for (uint32_t x = 0; x < map->w; x++) {
1245 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1246 x + s_image_x + map->x,
1247 y + s_image_y + map->y,
1248 intel->has_swizzling);
1249 ptrdiff_t z_offset = ((y + z_image_y) * z_mt->region->pitch +
1250 (x + z_image_x));
1251
1252 if (map_z32f_x24s8) {
1253 z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
1254 s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
1255 } else {
1256 uint32_t packed = packed_map[y * map->w + x];
1257 s_map[s_offset] = packed >> 24;
1258 z_map[z_offset] = packed;
1259 }
1260 }
1261 }
1262
1263 intel_region_unmap(intel, s_mt->region);
1264 intel_region_unmap(intel, z_mt->region);
1265
1266 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1267 __FUNCTION__,
1268 map->x, map->y, map->w, map->h,
1269 z_mt, _mesa_get_format_name(z_mt->format),
1270 map->x + z_image_x, map->y + z_image_y,
1271 s_mt, map->x + s_image_x, map->y + s_image_y,
1272 map->ptr, map->stride);
1273 }
1274
1275 free(map->buffer);
1276 }
1277
1278 void
1279 intel_miptree_map(struct intel_context *intel,
1280 struct intel_mipmap_tree *mt,
1281 unsigned int level,
1282 unsigned int slice,
1283 unsigned int x,
1284 unsigned int y,
1285 unsigned int w,
1286 unsigned int h,
1287 GLbitfield mode,
1288 void **out_ptr,
1289 int *out_stride)
1290 {
1291 struct intel_miptree_map *map;
1292
1293 map = calloc(1, sizeof(struct intel_miptree_map));
1294 if (!map){
1295 *out_ptr = NULL;
1296 *out_stride = 0;
1297 return;
1298 }
1299
1300 assert(!mt->level[level].slice[slice].map);
1301 mt->level[level].slice[slice].map = map;
1302 map->mode = mode;
1303 map->x = x;
1304 map->y = y;
1305 map->w = w;
1306 map->h = h;
1307
1308 intel_miptree_slice_resolve_depth(intel, mt, level, slice);
1309 if (map->mode & GL_MAP_WRITE_BIT) {
1310 intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
1311 }
1312
1313 if (mt->format == MESA_FORMAT_S8) {
1314 intel_miptree_map_s8(intel, mt, map, level, slice);
1315 } else if (mt->wraps_etc1) {
1316 intel_miptree_map_etc1(intel, mt, map, level, slice);
1317 } else if (mt->stencil_mt) {
1318 intel_miptree_map_depthstencil(intel, mt, map, level, slice);
1319 } else if (intel->has_llc &&
1320 !(mode & GL_MAP_WRITE_BIT) &&
1321 !mt->compressed &&
1322 mt->region->tiling == I915_TILING_X) {
1323 intel_miptree_map_blit(intel, mt, map, level, slice);
1324 } else {
1325 intel_miptree_map_gtt(intel, mt, map, level, slice);
1326 }
1327
1328 *out_ptr = map->ptr;
1329 *out_stride = map->stride;
1330
1331 if (map->ptr == NULL) {
1332 mt->level[level].slice[slice].map = NULL;
1333 free(map);
1334 }
1335 }
1336
1337 void
1338 intel_miptree_unmap(struct intel_context *intel,
1339 struct intel_mipmap_tree *mt,
1340 unsigned int level,
1341 unsigned int slice)
1342 {
1343 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1344
1345 if (!map)
1346 return;
1347
1348 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
1349 mt, _mesa_get_format_name(mt->format), level, slice);
1350
1351 if (mt->format == MESA_FORMAT_S8) {
1352 intel_miptree_unmap_s8(intel, mt, map, level, slice);
1353 } else if (mt->wraps_etc1) {
1354 intel_miptree_unmap_etc1(intel, mt, map, level, slice);
1355 } else if (mt->stencil_mt) {
1356 intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
1357 } else if (map->bo) {
1358 intel_miptree_unmap_blit(intel, mt, map, level, slice);
1359 } else {
1360 intel_miptree_unmap_gtt(intel, mt, map, level, slice);
1361 }
1362
1363 mt->level[level].slice[slice].map = NULL;
1364 free(map);
1365 }