i915: Remove the I915 macro from the formerly shared code.
[mesa.git] / src / mesa / drivers / dri / i915 / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <GL/gl.h>
29 #include <GL/internal/dri_interface.h>
30
31 #include "intel_batchbuffer.h"
32 #include "intel_chipset.h"
33 #include "intel_context.h"
34 #include "intel_mipmap_tree.h"
35 #include "intel_regions.h"
36 #include "intel_tex_layout.h"
37 #include "intel_tex.h"
38 #include "intel_blit.h"
39
40 #include "main/enums.h"
41 #include "main/formats.h"
42 #include "main/glformats.h"
43 #include "main/texcompress_etc.h"
44 #include "main/teximage.h"
45
46 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
47
48 static GLenum
49 target_to_target(GLenum target)
50 {
51 switch (target) {
52 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
53 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
54 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
55 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
56 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
57 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
58 return GL_TEXTURE_CUBE_MAP_ARB;
59 default:
60 return target;
61 }
62 }
63
64 /**
65 * @param for_bo Indicates that the caller is
66 * intel_miptree_create_for_bo(). If true, then do not create
67 * \c stencil_mt.
68 */
69 struct intel_mipmap_tree *
70 intel_miptree_create_layout(struct intel_context *intel,
71 GLenum target,
72 gl_format format,
73 GLuint first_level,
74 GLuint last_level,
75 GLuint width0,
76 GLuint height0,
77 GLuint depth0,
78 bool for_bo)
79 {
80 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
81 if (!mt)
82 return NULL;
83
84 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
85 _mesa_lookup_enum_by_nr(target),
86 _mesa_get_format_name(format),
87 first_level, last_level, mt);
88
89 mt->target = target_to_target(target);
90 mt->format = format;
91 mt->first_level = first_level;
92 mt->last_level = last_level;
93 mt->logical_width0 = width0;
94 mt->logical_height0 = height0;
95 mt->logical_depth0 = depth0;
96
97 /* The cpp is bytes per (1, blockheight)-sized block for compressed
98 * textures. This is why you'll see divides by blockheight all over
99 */
100 unsigned bw, bh;
101 _mesa_get_format_block_size(format, &bw, &bh);
102 assert(_mesa_get_format_bytes(mt->format) % bw == 0);
103 mt->cpp = _mesa_get_format_bytes(mt->format) / bw;
104
105 mt->compressed = _mesa_is_format_compressed(format);
106 mt->refcount = 1;
107
108 if (target == GL_TEXTURE_CUBE_MAP) {
109 assert(depth0 == 1);
110 depth0 = 6;
111 }
112
113 mt->physical_width0 = width0;
114 mt->physical_height0 = height0;
115 mt->physical_depth0 = depth0;
116
117 if (!for_bo &&
118 _mesa_get_format_base_format(format) == GL_DEPTH_STENCIL &&
119 (intel->must_use_separate_stencil)) {
120 mt->stencil_mt = intel_miptree_create(intel,
121 mt->target,
122 MESA_FORMAT_S8,
123 mt->first_level,
124 mt->last_level,
125 mt->logical_width0,
126 mt->logical_height0,
127 mt->logical_depth0,
128 true,
129 INTEL_MIPTREE_TILING_ANY);
130 if (!mt->stencil_mt) {
131 intel_miptree_release(&mt);
132 return NULL;
133 }
134
135 /* Fix up the Z miptree format for how we're splitting out separate
136 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
137 */
138 if (mt->format == MESA_FORMAT_S8_Z24) {
139 mt->format = MESA_FORMAT_X8_Z24;
140 } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
141 mt->format = MESA_FORMAT_Z32_FLOAT;
142 mt->cpp = 4;
143 } else {
144 _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
145 _mesa_get_format_name(mt->format));
146 }
147 }
148
149 intel_get_texture_alignment_unit(intel, mt->format,
150 &mt->align_w, &mt->align_h);
151
152 (void) intel;
153 if (intel->is_945)
154 i945_miptree_layout(mt);
155 else
156 i915_miptree_layout(mt);
157
158 return mt;
159 }
160
161 /**
162 * \brief Helper function for intel_miptree_create().
163 */
164 static uint32_t
165 intel_miptree_choose_tiling(struct intel_context *intel,
166 gl_format format,
167 uint32_t width0,
168 enum intel_miptree_tiling_mode requested,
169 struct intel_mipmap_tree *mt)
170 {
171
172 if (format == MESA_FORMAT_S8) {
173 /* The stencil buffer is W tiled. However, we request from the kernel a
174 * non-tiled buffer because the GTT is incapable of W fencing.
175 */
176 return I915_TILING_NONE;
177 }
178
179 /* Some usages may want only one type of tiling, like depth miptrees (Y
180 * tiled), or temporary BOs for uploading data once (linear).
181 */
182 switch (requested) {
183 case INTEL_MIPTREE_TILING_ANY:
184 break;
185 case INTEL_MIPTREE_TILING_Y:
186 return I915_TILING_Y;
187 case INTEL_MIPTREE_TILING_NONE:
188 return I915_TILING_NONE;
189 }
190
191 GLenum base_format = _mesa_get_format_base_format(format);
192 if (intel->gen >= 4 &&
193 (base_format == GL_DEPTH_COMPONENT ||
194 base_format == GL_DEPTH_STENCIL_EXT))
195 return I915_TILING_Y;
196
197 int minimum_pitch = mt->total_width * mt->cpp;
198
199 /* If the width is much smaller than a tile, don't bother tiling. */
200 if (minimum_pitch < 64)
201 return I915_TILING_NONE;
202
203 if (ALIGN(minimum_pitch, 512) >= 32768) {
204 perf_debug("%dx%d miptree too large to blit, falling back to untiled",
205 mt->total_width, mt->total_height);
206 return I915_TILING_NONE;
207 }
208
209 /* Pre-gen6 doesn't have BLORP to handle Y-tiling, so use X-tiling. */
210 if (intel->gen < 6)
211 return I915_TILING_X;
212
213 return I915_TILING_Y | I915_TILING_X;
214 }
215
216 struct intel_mipmap_tree *
217 intel_miptree_create(struct intel_context *intel,
218 GLenum target,
219 gl_format format,
220 GLuint first_level,
221 GLuint last_level,
222 GLuint width0,
223 GLuint height0,
224 GLuint depth0,
225 bool expect_accelerated_upload,
226 enum intel_miptree_tiling_mode requested_tiling)
227 {
228 struct intel_mipmap_tree *mt;
229 gl_format tex_format = format;
230 gl_format etc_format = MESA_FORMAT_NONE;
231 GLuint total_width, total_height;
232
233 if (!intel->is_baytrail) {
234 switch (format) {
235 case MESA_FORMAT_ETC1_RGB8:
236 format = MESA_FORMAT_RGBX8888_REV;
237 break;
238 case MESA_FORMAT_ETC2_RGB8:
239 format = MESA_FORMAT_RGBX8888_REV;
240 break;
241 case MESA_FORMAT_ETC2_SRGB8:
242 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC:
243 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1:
244 format = MESA_FORMAT_SARGB8;
245 break;
246 case MESA_FORMAT_ETC2_RGBA8_EAC:
247 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1:
248 format = MESA_FORMAT_RGBA8888_REV;
249 break;
250 case MESA_FORMAT_ETC2_R11_EAC:
251 format = MESA_FORMAT_R16;
252 break;
253 case MESA_FORMAT_ETC2_SIGNED_R11_EAC:
254 format = MESA_FORMAT_SIGNED_R16;
255 break;
256 case MESA_FORMAT_ETC2_RG11_EAC:
257 format = MESA_FORMAT_GR1616;
258 break;
259 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC:
260 format = MESA_FORMAT_SIGNED_GR1616;
261 break;
262 default:
263 /* Non ETC1 / ETC2 format */
264 break;
265 }
266 }
267
268 etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
269
270 mt = intel_miptree_create_layout(intel, target, format,
271 first_level, last_level, width0,
272 height0, depth0,
273 false);
274 /*
275 * pitch == 0 || height == 0 indicates the null texture
276 */
277 if (!mt || !mt->total_width || !mt->total_height) {
278 intel_miptree_release(&mt);
279 return NULL;
280 }
281
282 total_width = mt->total_width;
283 total_height = mt->total_height;
284
285 if (format == MESA_FORMAT_S8) {
286 /* Align to size of W tile, 64x64. */
287 total_width = ALIGN(total_width, 64);
288 total_height = ALIGN(total_height, 64);
289 }
290
291 uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
292 requested_tiling,
293 mt);
294 bool y_or_x = tiling == (I915_TILING_Y | I915_TILING_X);
295
296 mt->etc_format = etc_format;
297 mt->region = intel_region_alloc(intel->intelScreen,
298 y_or_x ? I915_TILING_Y : tiling,
299 mt->cpp,
300 total_width,
301 total_height,
302 expect_accelerated_upload);
303
304 /* If the region is too large to fit in the aperture, we need to use the
305 * BLT engine to support it. The BLT paths can't currently handle Y-tiling,
306 * so we need to fall back to X.
307 */
308 if (y_or_x && mt->region->bo->size >= intel->max_gtt_map_object_size) {
309 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
310 mt->total_width, mt->total_height);
311 intel_region_release(&mt->region);
312
313 mt->region = intel_region_alloc(intel->intelScreen,
314 I915_TILING_X,
315 mt->cpp,
316 total_width,
317 total_height,
318 expect_accelerated_upload);
319 }
320
321 mt->offset = 0;
322
323 if (!mt->region) {
324 intel_miptree_release(&mt);
325 return NULL;
326 }
327
328 return mt;
329 }
330
331 struct intel_mipmap_tree *
332 intel_miptree_create_for_bo(struct intel_context *intel,
333 drm_intel_bo *bo,
334 gl_format format,
335 uint32_t offset,
336 uint32_t width,
337 uint32_t height,
338 int pitch,
339 uint32_t tiling)
340 {
341 struct intel_mipmap_tree *mt;
342
343 struct intel_region *region = calloc(1, sizeof(*region));
344 if (!region)
345 return NULL;
346
347 /* Nothing will be able to use this miptree with the BO if the offset isn't
348 * aligned.
349 */
350 if (tiling != I915_TILING_NONE)
351 assert(offset % 4096 == 0);
352
353 /* miptrees can't handle negative pitch. If you need flipping of images,
354 * that's outside of the scope of the mt.
355 */
356 assert(pitch >= 0);
357
358 mt = intel_miptree_create_layout(intel, GL_TEXTURE_2D, format,
359 0, 0,
360 width, height, 1,
361 true);
362 if (!mt)
363 return mt;
364
365 region->cpp = mt->cpp;
366 region->width = width;
367 region->height = height;
368 region->pitch = pitch;
369 region->refcount = 1;
370 drm_intel_bo_reference(bo);
371 region->bo = bo;
372 region->tiling = tiling;
373
374 mt->region = region;
375 mt->offset = offset;
376
377 return mt;
378 }
379
380
381 /**
382 * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
383 *
384 * For a multisample DRI2 buffer, this wraps the given region with
385 * a singlesample miptree, then creates a multisample miptree into which the
386 * singlesample miptree is embedded as a child.
387 */
388 struct intel_mipmap_tree*
389 intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
390 unsigned dri_attachment,
391 gl_format format,
392 struct intel_region *region)
393 {
394 struct intel_mipmap_tree *mt = NULL;
395
396 /* Only the front and back buffers, which are color buffers, are shared
397 * through DRI2.
398 */
399 assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
400 dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
401 dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
402 assert(_mesa_get_format_base_format(format) == GL_RGB ||
403 _mesa_get_format_base_format(format) == GL_RGBA);
404
405 mt = intel_miptree_create_for_bo(intel,
406 region->bo,
407 format,
408 0,
409 region->width,
410 region->height,
411 region->pitch,
412 region->tiling);
413 if (!mt)
414 return NULL;
415 mt->region->name = region->name;
416
417 return mt;
418 }
419
420 struct intel_mipmap_tree*
421 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
422 gl_format format,
423 uint32_t width,
424 uint32_t height)
425 {
426 uint32_t depth = 1;
427
428 return intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
429 width, height, depth, true,
430 INTEL_MIPTREE_TILING_ANY);
431 }
432
433 void
434 intel_miptree_reference(struct intel_mipmap_tree **dst,
435 struct intel_mipmap_tree *src)
436 {
437 if (*dst == src)
438 return;
439
440 intel_miptree_release(dst);
441
442 if (src) {
443 src->refcount++;
444 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
445 }
446
447 *dst = src;
448 }
449
450
451 void
452 intel_miptree_release(struct intel_mipmap_tree **mt)
453 {
454 if (!*mt)
455 return;
456
457 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
458 if (--(*mt)->refcount <= 0) {
459 GLuint i;
460
461 DBG("%s deleting %p\n", __FUNCTION__, *mt);
462
463 intel_region_release(&((*mt)->region));
464 intel_miptree_release(&(*mt)->stencil_mt);
465
466 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
467 free((*mt)->level[i].slice);
468 }
469
470 free(*mt);
471 }
472 *mt = NULL;
473 }
474
475 void
476 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
477 int *width, int *height, int *depth)
478 {
479 switch (image->TexObject->Target) {
480 case GL_TEXTURE_1D_ARRAY:
481 *width = image->Width;
482 *height = 1;
483 *depth = image->Height;
484 break;
485 default:
486 *width = image->Width;
487 *height = image->Height;
488 *depth = image->Depth;
489 break;
490 }
491 }
492
493 /**
494 * Can the image be pulled into a unified mipmap tree? This mirrors
495 * the completeness test in a lot of ways.
496 *
497 * Not sure whether I want to pass gl_texture_image here.
498 */
499 bool
500 intel_miptree_match_image(struct intel_mipmap_tree *mt,
501 struct gl_texture_image *image)
502 {
503 struct intel_texture_image *intelImage = intel_texture_image(image);
504 GLuint level = intelImage->base.Base.Level;
505 int width, height, depth;
506
507 /* glTexImage* choose the texture object based on the target passed in, and
508 * objects can't change targets over their lifetimes, so this should be
509 * true.
510 */
511 assert(target_to_target(image->TexObject->Target) == mt->target);
512
513 gl_format mt_format = mt->format;
514 if (mt->format == MESA_FORMAT_X8_Z24 && mt->stencil_mt)
515 mt_format = MESA_FORMAT_S8_Z24;
516 if (mt->format == MESA_FORMAT_Z32_FLOAT && mt->stencil_mt)
517 mt_format = MESA_FORMAT_Z32_FLOAT_X24S8;
518 if (mt->etc_format != MESA_FORMAT_NONE)
519 mt_format = mt->etc_format;
520
521 if (image->TexFormat != mt_format)
522 return false;
523
524 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
525
526 if (mt->target == GL_TEXTURE_CUBE_MAP)
527 depth = 6;
528
529 /* Test image dimensions against the base level image adjusted for
530 * minification. This will also catch images not present in the
531 * tree, changed targets, etc.
532 */
533 if (mt->target == GL_TEXTURE_2D_MULTISAMPLE ||
534 mt->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
535 /* nonzero level here is always bogus */
536 assert(level == 0);
537
538 if (width != mt->logical_width0 ||
539 height != mt->logical_height0 ||
540 depth != mt->logical_depth0) {
541 return false;
542 }
543 }
544 else {
545 /* all normal textures, renderbuffers, etc */
546 if (width != mt->level[level].width ||
547 height != mt->level[level].height ||
548 depth != mt->level[level].depth) {
549 return false;
550 }
551 }
552
553 return true;
554 }
555
556
557 void
558 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
559 GLuint level,
560 GLuint x, GLuint y,
561 GLuint w, GLuint h, GLuint d)
562 {
563 mt->level[level].width = w;
564 mt->level[level].height = h;
565 mt->level[level].depth = d;
566 mt->level[level].level_x = x;
567 mt->level[level].level_y = y;
568
569 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
570 level, w, h, d, x, y);
571
572 assert(mt->level[level].slice == NULL);
573
574 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
575 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
576 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
577 }
578
579
580 void
581 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
582 GLuint level, GLuint img,
583 GLuint x, GLuint y)
584 {
585 if (img == 0 && level == 0)
586 assert(x == 0 && y == 0);
587
588 assert(img < mt->level[level].depth);
589
590 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
591 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
592
593 DBG("%s level %d img %d pos %d,%d\n",
594 __FUNCTION__, level, img,
595 mt->level[level].slice[img].x_offset,
596 mt->level[level].slice[img].y_offset);
597 }
598
599 void
600 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
601 GLuint level, GLuint slice,
602 GLuint *x, GLuint *y)
603 {
604 assert(slice < mt->level[level].depth);
605
606 *x = mt->level[level].slice[slice].x_offset;
607 *y = mt->level[level].slice[slice].y_offset;
608 }
609
610 /**
611 * Rendering with tiled buffers requires that the base address of the buffer
612 * be aligned to a page boundary. For renderbuffers, and sometimes with
613 * textures, we may want the surface to point at a texture image level that
614 * isn't at a page boundary.
615 *
616 * This function returns an appropriately-aligned base offset
617 * according to the tiling restrictions, plus any required x/y offset
618 * from there.
619 */
620 uint32_t
621 intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
622 GLuint level, GLuint slice,
623 uint32_t *tile_x,
624 uint32_t *tile_y)
625 {
626 struct intel_region *region = mt->region;
627 uint32_t x, y;
628 uint32_t mask_x, mask_y;
629
630 intel_region_get_tile_masks(region, &mask_x, &mask_y, false);
631 intel_miptree_get_image_offset(mt, level, slice, &x, &y);
632
633 *tile_x = x & mask_x;
634 *tile_y = y & mask_y;
635
636 return intel_region_get_aligned_offset(region, x & ~mask_x, y & ~mask_y,
637 false);
638 }
639
640 static void
641 intel_miptree_copy_slice_sw(struct intel_context *intel,
642 struct intel_mipmap_tree *dst_mt,
643 struct intel_mipmap_tree *src_mt,
644 int level,
645 int slice,
646 int width,
647 int height)
648 {
649 void *src, *dst;
650 int src_stride, dst_stride;
651 int cpp = dst_mt->cpp;
652
653 intel_miptree_map(intel, src_mt,
654 level, slice,
655 0, 0,
656 width, height,
657 GL_MAP_READ_BIT | BRW_MAP_DIRECT_BIT,
658 &src, &src_stride);
659
660 intel_miptree_map(intel, dst_mt,
661 level, slice,
662 0, 0,
663 width, height,
664 GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT |
665 BRW_MAP_DIRECT_BIT,
666 &dst, &dst_stride);
667
668 DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
669 _mesa_get_format_name(src_mt->format),
670 src_mt, src, src_stride,
671 _mesa_get_format_name(dst_mt->format),
672 dst_mt, dst, dst_stride,
673 width, height);
674
675 int row_size = cpp * width;
676 if (src_stride == row_size &&
677 dst_stride == row_size) {
678 memcpy(dst, src, row_size * height);
679 } else {
680 for (int i = 0; i < height; i++) {
681 memcpy(dst, src, row_size);
682 dst += dst_stride;
683 src += src_stride;
684 }
685 }
686
687 intel_miptree_unmap(intel, dst_mt, level, slice);
688 intel_miptree_unmap(intel, src_mt, level, slice);
689
690 /* Don't forget to copy the stencil data over, too. We could have skipped
691 * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
692 * shuffling the two data sources in/out of temporary storage instead of
693 * the direct mapping we get this way.
694 */
695 if (dst_mt->stencil_mt) {
696 assert(src_mt->stencil_mt);
697 intel_miptree_copy_slice_sw(intel, dst_mt->stencil_mt, src_mt->stencil_mt,
698 level, slice, width, height);
699 }
700 }
701
702 static void
703 intel_miptree_copy_slice(struct intel_context *intel,
704 struct intel_mipmap_tree *dst_mt,
705 struct intel_mipmap_tree *src_mt,
706 int level,
707 int face,
708 int depth)
709
710 {
711 gl_format format = src_mt->format;
712 uint32_t width = src_mt->level[level].width;
713 uint32_t height = src_mt->level[level].height;
714 int slice;
715
716 if (face > 0)
717 slice = face;
718 else
719 slice = depth;
720
721 assert(depth < src_mt->level[level].depth);
722 assert(src_mt->format == dst_mt->format);
723
724 if (dst_mt->compressed) {
725 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
726 width = ALIGN(width, dst_mt->align_w);
727 }
728
729 /* If it's a packed depth/stencil buffer with separate stencil, the blit
730 * below won't apply since we can't do the depth's Y tiling or the
731 * stencil's W tiling in the blitter.
732 */
733 if (src_mt->stencil_mt) {
734 intel_miptree_copy_slice_sw(intel,
735 dst_mt, src_mt,
736 level, slice,
737 width, height);
738 return;
739 }
740
741 uint32_t dst_x, dst_y, src_x, src_y;
742 intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
743 intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
744
745 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
746 _mesa_get_format_name(src_mt->format),
747 src_mt, src_x, src_y, src_mt->region->pitch,
748 _mesa_get_format_name(dst_mt->format),
749 dst_mt, dst_x, dst_y, dst_mt->region->pitch,
750 width, height);
751
752 if (!intel_miptree_blit(intel,
753 src_mt, level, slice, 0, 0, false,
754 dst_mt, level, slice, 0, 0, false,
755 width, height, GL_COPY)) {
756 perf_debug("miptree validate blit for %s failed\n",
757 _mesa_get_format_name(format));
758
759 intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
760 width, height);
761 }
762 }
763
764 /**
765 * Copies the image's current data to the given miptree, and associates that
766 * miptree with the image.
767 *
768 * If \c invalidate is true, then the actual image data does not need to be
769 * copied, but the image still needs to be associated to the new miptree (this
770 * is set to true if we're about to clear the image).
771 */
772 void
773 intel_miptree_copy_teximage(struct intel_context *intel,
774 struct intel_texture_image *intelImage,
775 struct intel_mipmap_tree *dst_mt,
776 bool invalidate)
777 {
778 struct intel_mipmap_tree *src_mt = intelImage->mt;
779 struct intel_texture_object *intel_obj =
780 intel_texture_object(intelImage->base.Base.TexObject);
781 int level = intelImage->base.Base.Level;
782 int face = intelImage->base.Base.Face;
783 GLuint depth = intelImage->base.Base.Depth;
784
785 if (!invalidate) {
786 for (int slice = 0; slice < depth; slice++) {
787 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
788 }
789 }
790
791 intel_miptree_reference(&intelImage->mt, dst_mt);
792 intel_obj->needs_validate = true;
793 }
794
795 /**
796 * \brief Get pointer offset into stencil buffer.
797 *
798 * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
799 * must decode the tile's layout in software.
800 *
801 * See
802 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
803 * Format.
804 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
805 *
806 * Even though the returned offset is always positive, the return type is
807 * signed due to
808 * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
809 * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
810 */
811 static intptr_t
812 intel_offset_S8(uint32_t stride, uint32_t x, uint32_t y, bool swizzled)
813 {
814 uint32_t tile_size = 4096;
815 uint32_t tile_width = 64;
816 uint32_t tile_height = 64;
817 uint32_t row_size = 64 * stride;
818
819 uint32_t tile_x = x / tile_width;
820 uint32_t tile_y = y / tile_height;
821
822 /* The byte's address relative to the tile's base addres. */
823 uint32_t byte_x = x % tile_width;
824 uint32_t byte_y = y % tile_height;
825
826 uintptr_t u = tile_y * row_size
827 + tile_x * tile_size
828 + 512 * (byte_x / 8)
829 + 64 * (byte_y / 8)
830 + 32 * ((byte_y / 4) % 2)
831 + 16 * ((byte_x / 4) % 2)
832 + 8 * ((byte_y / 2) % 2)
833 + 4 * ((byte_x / 2) % 2)
834 + 2 * (byte_y % 2)
835 + 1 * (byte_x % 2);
836
837 if (swizzled) {
838 /* adjust for bit6 swizzling */
839 if (((byte_x / 8) % 2) == 1) {
840 if (((byte_y / 8) % 2) == 0) {
841 u += 64;
842 } else {
843 u -= 64;
844 }
845 }
846 }
847
848 return u;
849 }
850
851 void *
852 intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
853 {
854 drm_intel_bo *bo = mt->region->bo;
855
856 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
857 if (drm_intel_bo_busy(bo)) {
858 perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
859 }
860 }
861
862 intel_flush(&intel->ctx);
863
864 if (mt->region->tiling != I915_TILING_NONE)
865 drm_intel_gem_bo_map_gtt(bo);
866 else
867 drm_intel_bo_map(bo, true);
868
869 return bo->virtual;
870 }
871
872 void
873 intel_miptree_unmap_raw(struct intel_context *intel,
874 struct intel_mipmap_tree *mt)
875 {
876 drm_intel_bo_unmap(mt->region->bo);
877 }
878
879 static void
880 intel_miptree_map_gtt(struct intel_context *intel,
881 struct intel_mipmap_tree *mt,
882 struct intel_miptree_map *map,
883 unsigned int level, unsigned int slice)
884 {
885 unsigned int bw, bh;
886 void *base;
887 unsigned int image_x, image_y;
888 int x = map->x;
889 int y = map->y;
890
891 /* For compressed formats, the stride is the number of bytes per
892 * row of blocks. intel_miptree_get_image_offset() already does
893 * the divide.
894 */
895 _mesa_get_format_block_size(mt->format, &bw, &bh);
896 assert(y % bh == 0);
897 y /= bh;
898
899 base = intel_miptree_map_raw(intel, mt) + mt->offset;
900
901 if (base == NULL)
902 map->ptr = NULL;
903 else {
904 /* Note that in the case of cube maps, the caller must have passed the
905 * slice number referencing the face.
906 */
907 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
908 x += image_x;
909 y += image_y;
910
911 map->stride = mt->region->pitch;
912 map->ptr = base + y * map->stride + x * mt->cpp;
913 }
914
915 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
916 map->x, map->y, map->w, map->h,
917 mt, _mesa_get_format_name(mt->format),
918 x, y, map->ptr, map->stride);
919 }
920
921 static void
922 intel_miptree_unmap_gtt(struct intel_context *intel,
923 struct intel_mipmap_tree *mt,
924 struct intel_miptree_map *map,
925 unsigned int level,
926 unsigned int slice)
927 {
928 intel_miptree_unmap_raw(intel, mt);
929 }
930
931 static void
932 intel_miptree_map_blit(struct intel_context *intel,
933 struct intel_mipmap_tree *mt,
934 struct intel_miptree_map *map,
935 unsigned int level, unsigned int slice)
936 {
937 map->mt = intel_miptree_create(intel, GL_TEXTURE_2D, mt->format,
938 0, 0,
939 map->w, map->h, 1,
940 false,
941 INTEL_MIPTREE_TILING_NONE);
942 if (!map->mt) {
943 fprintf(stderr, "Failed to allocate blit temporary\n");
944 goto fail;
945 }
946 map->stride = map->mt->region->pitch;
947
948 if (!intel_miptree_blit(intel,
949 mt, level, slice,
950 map->x, map->y, false,
951 map->mt, 0, 0,
952 0, 0, false,
953 map->w, map->h, GL_COPY)) {
954 fprintf(stderr, "Failed to blit\n");
955 goto fail;
956 }
957
958 intel_batchbuffer_flush(intel);
959 map->ptr = intel_miptree_map_raw(intel, map->mt);
960
961 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
962 map->x, map->y, map->w, map->h,
963 mt, _mesa_get_format_name(mt->format),
964 level, slice, map->ptr, map->stride);
965
966 return;
967
968 fail:
969 intel_miptree_release(&map->mt);
970 map->ptr = NULL;
971 map->stride = 0;
972 }
973
974 static void
975 intel_miptree_unmap_blit(struct intel_context *intel,
976 struct intel_mipmap_tree *mt,
977 struct intel_miptree_map *map,
978 unsigned int level,
979 unsigned int slice)
980 {
981 struct gl_context *ctx = &intel->ctx;
982
983 intel_miptree_unmap_raw(intel, map->mt);
984
985 if (map->mode & GL_MAP_WRITE_BIT) {
986 bool ok = intel_miptree_blit(intel,
987 map->mt, 0, 0,
988 0, 0, false,
989 mt, level, slice,
990 map->x, map->y, false,
991 map->w, map->h, GL_COPY);
992 WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
993 }
994
995 intel_miptree_release(&map->mt);
996 }
997
998 static void
999 intel_miptree_map_s8(struct intel_context *intel,
1000 struct intel_mipmap_tree *mt,
1001 struct intel_miptree_map *map,
1002 unsigned int level, unsigned int slice)
1003 {
1004 map->stride = map->w;
1005 map->buffer = map->ptr = malloc(map->stride * map->h);
1006 if (!map->buffer)
1007 return;
1008
1009 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1010 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1011 * invalidate is set, since we'll be writing the whole rectangle from our
1012 * temporary buffer back out.
1013 */
1014 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1015 uint8_t *untiled_s8_map = map->ptr;
1016 uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
1017 unsigned int image_x, image_y;
1018
1019 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1020
1021 for (uint32_t y = 0; y < map->h; y++) {
1022 for (uint32_t x = 0; x < map->w; x++) {
1023 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1024 x + image_x + map->x,
1025 y + image_y + map->y,
1026 intel->has_swizzling);
1027 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
1028 }
1029 }
1030
1031 intel_miptree_unmap_raw(intel, mt);
1032
1033 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
1034 map->x, map->y, map->w, map->h,
1035 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
1036 } else {
1037 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1038 map->x, map->y, map->w, map->h,
1039 mt, map->ptr, map->stride);
1040 }
1041 }
1042
1043 static void
1044 intel_miptree_unmap_s8(struct intel_context *intel,
1045 struct intel_mipmap_tree *mt,
1046 struct intel_miptree_map *map,
1047 unsigned int level,
1048 unsigned int slice)
1049 {
1050 if (map->mode & GL_MAP_WRITE_BIT) {
1051 unsigned int image_x, image_y;
1052 uint8_t *untiled_s8_map = map->ptr;
1053 uint8_t *tiled_s8_map = intel_miptree_map_raw(intel, mt);
1054
1055 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1056
1057 for (uint32_t y = 0; y < map->h; y++) {
1058 for (uint32_t x = 0; x < map->w; x++) {
1059 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1060 x + map->x,
1061 y + map->y,
1062 intel->has_swizzling);
1063 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
1064 }
1065 }
1066
1067 intel_miptree_unmap_raw(intel, mt);
1068 }
1069
1070 free(map->buffer);
1071 }
1072
1073 static void
1074 intel_miptree_map_etc(struct intel_context *intel,
1075 struct intel_mipmap_tree *mt,
1076 struct intel_miptree_map *map,
1077 unsigned int level,
1078 unsigned int slice)
1079 {
1080 assert(mt->etc_format != MESA_FORMAT_NONE);
1081 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) {
1082 assert(mt->format == MESA_FORMAT_RGBX8888_REV);
1083 }
1084
1085 assert(map->mode & GL_MAP_WRITE_BIT);
1086 assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
1087
1088 map->stride = _mesa_format_row_stride(mt->etc_format, map->w);
1089 map->buffer = malloc(_mesa_format_image_size(mt->etc_format,
1090 map->w, map->h, 1));
1091 map->ptr = map->buffer;
1092 }
1093
1094 static void
1095 intel_miptree_unmap_etc(struct intel_context *intel,
1096 struct intel_mipmap_tree *mt,
1097 struct intel_miptree_map *map,
1098 unsigned int level,
1099 unsigned int slice)
1100 {
1101 uint32_t image_x;
1102 uint32_t image_y;
1103 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1104
1105 image_x += map->x;
1106 image_y += map->y;
1107
1108 uint8_t *dst = intel_miptree_map_raw(intel, mt)
1109 + image_y * mt->region->pitch
1110 + image_x * mt->region->cpp;
1111
1112 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8)
1113 _mesa_etc1_unpack_rgba8888(dst, mt->region->pitch,
1114 map->ptr, map->stride,
1115 map->w, map->h);
1116 else
1117 _mesa_unpack_etc2_format(dst, mt->region->pitch,
1118 map->ptr, map->stride,
1119 map->w, map->h, mt->etc_format);
1120
1121 intel_miptree_unmap_raw(intel, mt);
1122 free(map->buffer);
1123 }
1124
1125 /**
1126 * Mapping function for packed depth/stencil miptrees backed by real separate
1127 * miptrees for depth and stencil.
1128 *
1129 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
1130 * separate from the depth buffer. Yet at the GL API level, we have to expose
1131 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
1132 * be able to map that memory for texture storage and glReadPixels-type
1133 * operations. We give Mesa core that access by mallocing a temporary and
1134 * copying the data between the actual backing store and the temporary.
1135 */
1136 static void
1137 intel_miptree_map_depthstencil(struct intel_context *intel,
1138 struct intel_mipmap_tree *mt,
1139 struct intel_miptree_map *map,
1140 unsigned int level, unsigned int slice)
1141 {
1142 struct intel_mipmap_tree *z_mt = mt;
1143 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1144 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1145 int packed_bpp = map_z32f_x24s8 ? 8 : 4;
1146
1147 map->stride = map->w * packed_bpp;
1148 map->buffer = map->ptr = malloc(map->stride * map->h);
1149 if (!map->buffer)
1150 return;
1151
1152 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1153 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1154 * invalidate is set, since we'll be writing the whole rectangle from our
1155 * temporary buffer back out.
1156 */
1157 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1158 uint32_t *packed_map = map->ptr;
1159 uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
1160 uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
1161 unsigned int s_image_x, s_image_y;
1162 unsigned int z_image_x, z_image_y;
1163
1164 intel_miptree_get_image_offset(s_mt, level, slice,
1165 &s_image_x, &s_image_y);
1166 intel_miptree_get_image_offset(z_mt, level, slice,
1167 &z_image_x, &z_image_y);
1168
1169 for (uint32_t y = 0; y < map->h; y++) {
1170 for (uint32_t x = 0; x < map->w; x++) {
1171 int map_x = map->x + x, map_y = map->y + y;
1172 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1173 map_x + s_image_x,
1174 map_y + s_image_y,
1175 intel->has_swizzling);
1176 ptrdiff_t z_offset = ((map_y + z_image_y) *
1177 (z_mt->region->pitch / 4) +
1178 (map_x + z_image_x));
1179 uint8_t s = s_map[s_offset];
1180 uint32_t z = z_map[z_offset];
1181
1182 if (map_z32f_x24s8) {
1183 packed_map[(y * map->w + x) * 2 + 0] = z;
1184 packed_map[(y * map->w + x) * 2 + 1] = s;
1185 } else {
1186 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
1187 }
1188 }
1189 }
1190
1191 intel_miptree_unmap_raw(intel, s_mt);
1192 intel_miptree_unmap_raw(intel, z_mt);
1193
1194 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
1195 __FUNCTION__,
1196 map->x, map->y, map->w, map->h,
1197 z_mt, map->x + z_image_x, map->y + z_image_y,
1198 s_mt, map->x + s_image_x, map->y + s_image_y,
1199 map->ptr, map->stride);
1200 } else {
1201 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1202 map->x, map->y, map->w, map->h,
1203 mt, map->ptr, map->stride);
1204 }
1205 }
1206
1207 static void
1208 intel_miptree_unmap_depthstencil(struct intel_context *intel,
1209 struct intel_mipmap_tree *mt,
1210 struct intel_miptree_map *map,
1211 unsigned int level,
1212 unsigned int slice)
1213 {
1214 struct intel_mipmap_tree *z_mt = mt;
1215 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1216 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1217
1218 if (map->mode & GL_MAP_WRITE_BIT) {
1219 uint32_t *packed_map = map->ptr;
1220 uint8_t *s_map = intel_miptree_map_raw(intel, s_mt);
1221 uint32_t *z_map = intel_miptree_map_raw(intel, z_mt);
1222 unsigned int s_image_x, s_image_y;
1223 unsigned int z_image_x, z_image_y;
1224
1225 intel_miptree_get_image_offset(s_mt, level, slice,
1226 &s_image_x, &s_image_y);
1227 intel_miptree_get_image_offset(z_mt, level, slice,
1228 &z_image_x, &z_image_y);
1229
1230 for (uint32_t y = 0; y < map->h; y++) {
1231 for (uint32_t x = 0; x < map->w; x++) {
1232 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1233 x + s_image_x + map->x,
1234 y + s_image_y + map->y,
1235 intel->has_swizzling);
1236 ptrdiff_t z_offset = ((y + z_image_y) *
1237 (z_mt->region->pitch / 4) +
1238 (x + z_image_x));
1239
1240 if (map_z32f_x24s8) {
1241 z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
1242 s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
1243 } else {
1244 uint32_t packed = packed_map[y * map->w + x];
1245 s_map[s_offset] = packed >> 24;
1246 z_map[z_offset] = packed;
1247 }
1248 }
1249 }
1250
1251 intel_miptree_unmap_raw(intel, s_mt);
1252 intel_miptree_unmap_raw(intel, z_mt);
1253
1254 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1255 __FUNCTION__,
1256 map->x, map->y, map->w, map->h,
1257 z_mt, _mesa_get_format_name(z_mt->format),
1258 map->x + z_image_x, map->y + z_image_y,
1259 s_mt, map->x + s_image_x, map->y + s_image_y,
1260 map->ptr, map->stride);
1261 }
1262
1263 free(map->buffer);
1264 }
1265
1266 /**
1267 * Create and attach a map to the miptree at (level, slice). Return the
1268 * attached map.
1269 */
1270 static struct intel_miptree_map*
1271 intel_miptree_attach_map(struct intel_mipmap_tree *mt,
1272 unsigned int level,
1273 unsigned int slice,
1274 unsigned int x,
1275 unsigned int y,
1276 unsigned int w,
1277 unsigned int h,
1278 GLbitfield mode)
1279 {
1280 struct intel_miptree_map *map = calloc(1, sizeof(*map));
1281
1282 if (!map)
1283 return NULL;
1284
1285 assert(mt->level[level].slice[slice].map == NULL);
1286 mt->level[level].slice[slice].map = map;
1287
1288 map->mode = mode;
1289 map->x = x;
1290 map->y = y;
1291 map->w = w;
1292 map->h = h;
1293
1294 return map;
1295 }
1296
1297 /**
1298 * Release the map at (level, slice).
1299 */
1300 static void
1301 intel_miptree_release_map(struct intel_mipmap_tree *mt,
1302 unsigned int level,
1303 unsigned int slice)
1304 {
1305 struct intel_miptree_map **map;
1306
1307 map = &mt->level[level].slice[slice].map;
1308 free(*map);
1309 *map = NULL;
1310 }
1311
1312 void
1313 intel_miptree_map(struct intel_context *intel,
1314 struct intel_mipmap_tree *mt,
1315 unsigned int level,
1316 unsigned int slice,
1317 unsigned int x,
1318 unsigned int y,
1319 unsigned int w,
1320 unsigned int h,
1321 GLbitfield mode,
1322 void **out_ptr,
1323 int *out_stride)
1324 {
1325 struct intel_miptree_map *map;
1326
1327 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
1328 if (!map){
1329 *out_ptr = NULL;
1330 *out_stride = 0;
1331 return;
1332 }
1333
1334 if (mt->format == MESA_FORMAT_S8) {
1335 intel_miptree_map_s8(intel, mt, map, level, slice);
1336 } else if (mt->etc_format != MESA_FORMAT_NONE &&
1337 !(mode & BRW_MAP_DIRECT_BIT)) {
1338 intel_miptree_map_etc(intel, mt, map, level, slice);
1339 } else if (mt->stencil_mt && !(mode & BRW_MAP_DIRECT_BIT)) {
1340 intel_miptree_map_depthstencil(intel, mt, map, level, slice);
1341 }
1342 /* See intel_miptree_blit() for details on the 32k pitch limit. */
1343 else if (intel->has_llc &&
1344 !(mode & GL_MAP_WRITE_BIT) &&
1345 !mt->compressed &&
1346 (mt->region->tiling == I915_TILING_X ||
1347 (intel->gen >= 6 && mt->region->tiling == I915_TILING_Y)) &&
1348 mt->region->pitch < 32768) {
1349 intel_miptree_map_blit(intel, mt, map, level, slice);
1350 } else if (mt->region->tiling != I915_TILING_NONE &&
1351 mt->region->bo->size >= intel->max_gtt_map_object_size) {
1352 assert(mt->region->pitch < 32768);
1353 intel_miptree_map_blit(intel, mt, map, level, slice);
1354 } else {
1355 intel_miptree_map_gtt(intel, mt, map, level, slice);
1356 }
1357
1358 *out_ptr = map->ptr;
1359 *out_stride = map->stride;
1360
1361 if (map->ptr == NULL)
1362 intel_miptree_release_map(mt, level, slice);
1363 }
1364
1365 void
1366 intel_miptree_unmap(struct intel_context *intel,
1367 struct intel_mipmap_tree *mt,
1368 unsigned int level,
1369 unsigned int slice)
1370 {
1371 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1372
1373 if (!map)
1374 return;
1375
1376 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
1377 mt, _mesa_get_format_name(mt->format), level, slice);
1378
1379 if (mt->format == MESA_FORMAT_S8) {
1380 intel_miptree_unmap_s8(intel, mt, map, level, slice);
1381 } else if (mt->etc_format != MESA_FORMAT_NONE &&
1382 !(map->mode & BRW_MAP_DIRECT_BIT)) {
1383 intel_miptree_unmap_etc(intel, mt, map, level, slice);
1384 } else if (mt->stencil_mt && !(map->mode & BRW_MAP_DIRECT_BIT)) {
1385 intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
1386 } else if (map->mt) {
1387 intel_miptree_unmap_blit(intel, mt, map, level, slice);
1388 } else {
1389 intel_miptree_unmap_gtt(intel, mt, map, level, slice);
1390 }
1391
1392 intel_miptree_release_map(mt, level, slice);
1393 }