i965/msaa: Allocate MCS buffer when CMS MSAA is in use.
[mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "intel_batchbuffer.h"
29 #include "intel_context.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_regions.h"
32 #include "intel_resolve_map.h"
33 #include "intel_span.h"
34 #include "intel_tex_layout.h"
35 #include "intel_tex.h"
36 #include "intel_blit.h"
37
38 #include "main/enums.h"
39 #include "main/formats.h"
40 #include "main/image.h"
41 #include "main/teximage.h"
42
43 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
44
45 static GLenum
46 target_to_target(GLenum target)
47 {
48 switch (target) {
49 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
50 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
51 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
52 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
53 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
54 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
55 return GL_TEXTURE_CUBE_MAP_ARB;
56 default:
57 return target;
58 }
59 }
60
61 /**
62 * @param for_region Indicates that the caller is
63 * intel_miptree_create_for_region(). If true, then do not create
64 * \c stencil_mt.
65 */
66 static struct intel_mipmap_tree *
67 intel_miptree_create_internal(struct intel_context *intel,
68 GLenum target,
69 gl_format format,
70 GLuint first_level,
71 GLuint last_level,
72 GLuint width0,
73 GLuint height0,
74 GLuint depth0,
75 bool for_region,
76 GLuint num_samples,
77 enum intel_msaa_layout msaa_layout)
78 {
79 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
80 int compress_byte = 0;
81
82 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
83 _mesa_lookup_enum_by_nr(target),
84 _mesa_get_format_name(format),
85 first_level, last_level, mt);
86
87 if (_mesa_is_format_compressed(format))
88 compress_byte = intel_compressed_num_bytes(format);
89
90 mt->target = target_to_target(target);
91 mt->format = format;
92 mt->first_level = first_level;
93 mt->last_level = last_level;
94 mt->width0 = width0;
95 mt->height0 = height0;
96 mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
97 mt->num_samples = num_samples;
98 mt->compressed = compress_byte ? 1 : 0;
99 mt->msaa_layout = msaa_layout;
100 mt->refcount = 1;
101
102 /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we
103 * use it elsewhere?
104 */
105 switch (msaa_layout) {
106 case INTEL_MSAA_LAYOUT_NONE:
107 case INTEL_MSAA_LAYOUT_IMS:
108 mt->array_spacing_lod0 = false;
109 break;
110 case INTEL_MSAA_LAYOUT_UMS:
111 case INTEL_MSAA_LAYOUT_CMS:
112 mt->array_spacing_lod0 = true;
113 break;
114 }
115
116 if (target == GL_TEXTURE_CUBE_MAP) {
117 assert(depth0 == 1);
118 mt->depth0 = 6;
119 } else {
120 mt->depth0 = depth0;
121 }
122
123 if (!for_region &&
124 _mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
125 (intel->must_use_separate_stencil ||
126 (intel->has_separate_stencil &&
127 intel->vtbl.is_hiz_depth_format(intel, format)))) {
128 /* MSAA stencil surfaces always use IMS layout. */
129 enum intel_msaa_layout msaa_layout =
130 num_samples > 0 ? INTEL_MSAA_LAYOUT_IMS : INTEL_MSAA_LAYOUT_NONE;
131 mt->stencil_mt = intel_miptree_create(intel,
132 mt->target,
133 MESA_FORMAT_S8,
134 mt->first_level,
135 mt->last_level,
136 mt->width0,
137 mt->height0,
138 mt->depth0,
139 true,
140 num_samples,
141 msaa_layout);
142 if (!mt->stencil_mt) {
143 intel_miptree_release(&mt);
144 return NULL;
145 }
146
147 /* Fix up the Z miptree format for how we're splitting out separate
148 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
149 */
150 if (mt->format == MESA_FORMAT_S8_Z24) {
151 mt->format = MESA_FORMAT_X8_Z24;
152 } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
153 mt->format = MESA_FORMAT_Z32_FLOAT;
154 mt->cpp = 4;
155 } else {
156 _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
157 _mesa_get_format_name(mt->format));
158 }
159 }
160
161 intel_get_texture_alignment_unit(intel, mt->format,
162 &mt->align_w, &mt->align_h);
163
164 #ifdef I915
165 (void) intel;
166 if (intel->is_945)
167 i945_miptree_layout(mt);
168 else
169 i915_miptree_layout(mt);
170 #else
171 brw_miptree_layout(intel, mt);
172 #endif
173
174 return mt;
175 }
176
177
178 struct intel_mipmap_tree *
179 intel_miptree_create(struct intel_context *intel,
180 GLenum target,
181 gl_format format,
182 GLuint first_level,
183 GLuint last_level,
184 GLuint width0,
185 GLuint height0,
186 GLuint depth0,
187 bool expect_accelerated_upload,
188 GLuint num_samples,
189 enum intel_msaa_layout msaa_layout)
190 {
191 struct intel_mipmap_tree *mt;
192 uint32_t tiling = I915_TILING_NONE;
193 GLenum base_format = _mesa_get_format_base_format(format);
194
195 if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
196 if (intel->gen >= 4 &&
197 (base_format == GL_DEPTH_COMPONENT ||
198 base_format == GL_DEPTH_STENCIL_EXT))
199 tiling = I915_TILING_Y;
200 else if (msaa_layout != INTEL_MSAA_LAYOUT_NONE) {
201 /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
202 * Surface"):
203 *
204 * [DevSNB+]: For multi-sample render targets, this field must be
205 * 1. MSRTs can only be tiled.
206 *
207 * Our usual reason for preferring X tiling (fast blits using the
208 * blitting engine) doesn't apply to MSAA, since we'll generally be
209 * downsampling or upsampling when blitting between the MSAA buffer
210 * and another buffer, and the blitting engine doesn't support that.
211 * So use Y tiling, since it makes better use of the cache.
212 */
213 tiling = I915_TILING_Y;
214 } else if (width0 >= 64)
215 tiling = I915_TILING_X;
216 }
217
218 if (format == MESA_FORMAT_S8) {
219 /* The stencil buffer is W tiled. However, we request from the kernel a
220 * non-tiled buffer because the GTT is incapable of W fencing. So round
221 * up the width and height to match the size of W tiles (64x64).
222 */
223 tiling = I915_TILING_NONE;
224 width0 = ALIGN(width0, 64);
225 height0 = ALIGN(height0, 64);
226 }
227
228 mt = intel_miptree_create_internal(intel, target, format,
229 first_level, last_level, width0,
230 height0, depth0,
231 false, num_samples, msaa_layout);
232 /*
233 * pitch == 0 || height == 0 indicates the null texture
234 */
235 if (!mt || !mt->total_width || !mt->total_height) {
236 intel_miptree_release(&mt);
237 return NULL;
238 }
239
240 mt->region = intel_region_alloc(intel->intelScreen,
241 tiling,
242 mt->cpp,
243 mt->total_width,
244 mt->total_height,
245 expect_accelerated_upload);
246 mt->offset = 0;
247
248 if (!mt->region) {
249 intel_miptree_release(&mt);
250 return NULL;
251 }
252
253 return mt;
254 }
255
256
257 struct intel_mipmap_tree *
258 intel_miptree_create_for_region(struct intel_context *intel,
259 GLenum target,
260 gl_format format,
261 struct intel_region *region)
262 {
263 struct intel_mipmap_tree *mt;
264
265 mt = intel_miptree_create_internal(intel, target, format,
266 0, 0,
267 region->width, region->height, 1,
268 true, 0 /* num_samples */,
269 INTEL_MSAA_LAYOUT_NONE);
270 if (!mt)
271 return mt;
272
273 intel_region_reference(&mt->region, region);
274
275 return mt;
276 }
277
278 /**
279 * Determine which MSAA layout should be used by the MSAA surface being
280 * created, based on the chip generation and the surface type.
281 */
282 static enum intel_msaa_layout
283 compute_msaa_layout(struct intel_context *intel, gl_format format)
284 {
285 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
286 if (intel->gen < 7)
287 return INTEL_MSAA_LAYOUT_IMS;
288
289 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
290 switch (_mesa_get_format_base_format(format)) {
291 case GL_DEPTH_COMPONENT:
292 case GL_STENCIL_INDEX:
293 case GL_DEPTH_STENCIL:
294 return INTEL_MSAA_LAYOUT_IMS;
295 default:
296 return INTEL_MSAA_LAYOUT_UMS;
297 }
298 }
299
300 struct intel_mipmap_tree*
301 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
302 gl_format format,
303 uint32_t width,
304 uint32_t height,
305 uint32_t num_samples)
306 {
307 struct intel_mipmap_tree *mt;
308 uint32_t depth = 1;
309 enum intel_msaa_layout msaa_layout = INTEL_MSAA_LAYOUT_NONE;
310
311 if (num_samples > 0) {
312 /* Adjust width/height/depth for MSAA */
313 msaa_layout = compute_msaa_layout(intel, format);
314 if (msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
315 /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
316 *
317 * "Any of the other messages (sample*, LOD, load4) used with a
318 * (4x) multisampled surface will in-effect sample a surface with
319 * double the height and width as that indicated in the surface
320 * state. Each pixel position on the original-sized surface is
321 * replaced with a 2x2 of samples with the following arrangement:
322 *
323 * sample 0 sample 2
324 * sample 1 sample 3"
325 *
326 * Thus, when sampling from a multisampled texture, it behaves as
327 * though the layout in memory for (x,y,sample) is:
328 *
329 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
330 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
331 *
332 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
333 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
334 *
335 * However, the actual layout of multisampled data in memory is:
336 *
337 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
338 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
339 *
340 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
341 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
342 *
343 * This pattern repeats for each 2x2 pixel block.
344 *
345 * As a result, when calculating the size of our 4-sample buffer for
346 * an odd width or height, we have to align before scaling up because
347 * sample 3 is in that bottom right 2x2 block.
348 */
349 switch (num_samples) {
350 case 4:
351 width = ALIGN(width, 2) * 2;
352 height = ALIGN(height, 2) * 2;
353 break;
354 case 8:
355 width = ALIGN(width, 2) * 4;
356 height = ALIGN(height, 2) * 2;
357 break;
358 default:
359 /* num_samples should already have been quantized to 0, 4, or
360 * 8.
361 */
362 assert(false);
363 }
364 } else {
365 /* Non-interleaved */
366 depth = num_samples;
367 }
368 }
369
370 mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
371 width, height, depth, true, num_samples,
372 msaa_layout);
373
374 return mt;
375 }
376
377 void
378 intel_miptree_reference(struct intel_mipmap_tree **dst,
379 struct intel_mipmap_tree *src)
380 {
381 if (*dst == src)
382 return;
383
384 intel_miptree_release(dst);
385
386 if (src) {
387 src->refcount++;
388 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
389 }
390
391 *dst = src;
392 }
393
394
395 void
396 intel_miptree_release(struct intel_mipmap_tree **mt)
397 {
398 if (!*mt)
399 return;
400
401 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
402 if (--(*mt)->refcount <= 0) {
403 GLuint i;
404
405 DBG("%s deleting %p\n", __FUNCTION__, *mt);
406
407 intel_region_release(&((*mt)->region));
408 intel_miptree_release(&(*mt)->stencil_mt);
409 intel_miptree_release(&(*mt)->hiz_mt);
410 intel_miptree_release(&(*mt)->mcs_mt);
411 intel_resolve_map_clear(&(*mt)->hiz_map);
412
413 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
414 free((*mt)->level[i].slice);
415 }
416
417 free(*mt);
418 }
419 *mt = NULL;
420 }
421
422 void
423 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
424 int *width, int *height, int *depth)
425 {
426 switch (image->TexObject->Target) {
427 case GL_TEXTURE_1D_ARRAY:
428 *width = image->Width;
429 *height = 1;
430 *depth = image->Height;
431 break;
432 default:
433 *width = image->Width;
434 *height = image->Height;
435 *depth = image->Depth;
436 break;
437 }
438 }
439
440 /**
441 * Can the image be pulled into a unified mipmap tree? This mirrors
442 * the completeness test in a lot of ways.
443 *
444 * Not sure whether I want to pass gl_texture_image here.
445 */
446 bool
447 intel_miptree_match_image(struct intel_mipmap_tree *mt,
448 struct gl_texture_image *image)
449 {
450 struct intel_texture_image *intelImage = intel_texture_image(image);
451 GLuint level = intelImage->base.Base.Level;
452 int width, height, depth;
453
454 if (target_to_target(image->TexObject->Target) != mt->target)
455 return false;
456
457 if (image->TexFormat != mt->format &&
458 !(image->TexFormat == MESA_FORMAT_S8_Z24 &&
459 mt->format == MESA_FORMAT_X8_Z24 &&
460 mt->stencil_mt)) {
461 return false;
462 }
463
464 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
465
466 if (mt->target == GL_TEXTURE_CUBE_MAP)
467 depth = 6;
468
469 /* Test image dimensions against the base level image adjusted for
470 * minification. This will also catch images not present in the
471 * tree, changed targets, etc.
472 */
473 if (width != mt->level[level].width ||
474 height != mt->level[level].height ||
475 depth != mt->level[level].depth)
476 return false;
477
478 return true;
479 }
480
481
482 void
483 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
484 GLuint level,
485 GLuint x, GLuint y,
486 GLuint w, GLuint h, GLuint d)
487 {
488 mt->level[level].width = w;
489 mt->level[level].height = h;
490 mt->level[level].depth = d;
491 mt->level[level].level_x = x;
492 mt->level[level].level_y = y;
493
494 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
495 level, w, h, d, x, y);
496
497 assert(mt->level[level].slice == NULL);
498
499 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
500 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
501 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
502 }
503
504
505 void
506 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
507 GLuint level, GLuint img,
508 GLuint x, GLuint y)
509 {
510 if (img == 0 && level == 0)
511 assert(x == 0 && y == 0);
512
513 assert(img < mt->level[level].depth);
514
515 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
516 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
517
518 DBG("%s level %d img %d pos %d,%d\n",
519 __FUNCTION__, level, img,
520 mt->level[level].slice[img].x_offset,
521 mt->level[level].slice[img].y_offset);
522 }
523
524
525 /**
526 * For cube map textures, either the \c face parameter can be used, of course,
527 * or the cube face can be interpreted as a depth layer and the \c layer
528 * parameter used.
529 */
530 void
531 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
532 GLuint level, GLuint face, GLuint layer,
533 GLuint *x, GLuint *y)
534 {
535 int slice;
536
537 if (face > 0) {
538 assert(mt->target == GL_TEXTURE_CUBE_MAP);
539 assert(face < 6);
540 assert(layer == 0);
541 slice = face;
542 } else {
543 /* This branch may be taken even if the texture target is a cube map. In
544 * that case, the caller chose to interpret each cube face as a layer.
545 */
546 assert(face == 0);
547 slice = layer;
548 }
549
550 *x = mt->level[level].slice[slice].x_offset;
551 *y = mt->level[level].slice[slice].y_offset;
552 }
553
554 static void
555 intel_miptree_copy_slice(struct intel_context *intel,
556 struct intel_mipmap_tree *dst_mt,
557 struct intel_mipmap_tree *src_mt,
558 int level,
559 int face,
560 int depth)
561
562 {
563 gl_format format = src_mt->format;
564 uint32_t width = src_mt->level[level].width;
565 uint32_t height = src_mt->level[level].height;
566
567 assert(depth < src_mt->level[level].depth);
568
569 if (dst_mt->compressed) {
570 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
571 width = ALIGN(width, dst_mt->align_w);
572 }
573
574 uint32_t dst_x, dst_y, src_x, src_y;
575 intel_miptree_get_image_offset(dst_mt, level, face, depth,
576 &dst_x, &dst_y);
577 intel_miptree_get_image_offset(src_mt, level, face, depth,
578 &src_x, &src_y);
579
580 DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
581 src_mt, src_x, src_y, src_mt->region->pitch * src_mt->region->cpp,
582 dst_mt, dst_x, dst_y, dst_mt->region->pitch * dst_mt->region->cpp,
583 width, height);
584
585 if (!intelEmitCopyBlit(intel,
586 dst_mt->region->cpp,
587 src_mt->region->pitch, src_mt->region->bo,
588 0, src_mt->region->tiling,
589 dst_mt->region->pitch, dst_mt->region->bo,
590 0, dst_mt->region->tiling,
591 src_x, src_y,
592 dst_x, dst_y,
593 width, height,
594 GL_COPY)) {
595
596 fallback_debug("miptree validate blit for %s failed\n",
597 _mesa_get_format_name(format));
598 void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
599 void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
600
601 _mesa_copy_rect(dst,
602 dst_mt->cpp,
603 dst_mt->region->pitch,
604 dst_x, dst_y,
605 width, height,
606 src, src_mt->region->pitch,
607 src_x, src_y);
608
609 intel_region_unmap(intel, dst_mt->region);
610 intel_region_unmap(intel, src_mt->region);
611 }
612
613 if (src_mt->stencil_mt) {
614 intel_miptree_copy_slice(intel,
615 dst_mt->stencil_mt, src_mt->stencil_mt,
616 level, face, depth);
617 }
618 }
619
620 /**
621 * Copies the image's current data to the given miptree, and associates that
622 * miptree with the image.
623 */
624 void
625 intel_miptree_copy_teximage(struct intel_context *intel,
626 struct intel_texture_image *intelImage,
627 struct intel_mipmap_tree *dst_mt)
628 {
629 struct intel_mipmap_tree *src_mt = intelImage->mt;
630 int level = intelImage->base.Base.Level;
631 int face = intelImage->base.Base.Face;
632 GLuint depth = intelImage->base.Base.Depth;
633
634 for (int slice = 0; slice < depth; slice++) {
635 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
636 }
637
638 intel_miptree_reference(&intelImage->mt, dst_mt);
639 }
640
641 bool
642 intel_miptree_alloc_mcs(struct intel_context *intel,
643 struct intel_mipmap_tree *mt,
644 GLuint num_samples)
645 {
646 assert(mt->mcs_mt == NULL);
647 assert(intel->gen >= 7); /* MCS only used on Gen7+ */
648 assert(num_samples == 4); /* TODO: support 8x MSAA */
649
650 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
651 *
652 * "The MCS surface must be stored as Tile Y."
653 *
654 * We set msaa_format to INTEL_MSAA_LAYOUT_CMS to force
655 * intel_miptree_create() to use Y tiling. msaa_format is otherwise
656 * ignored for the MCS miptree.
657 */
658 mt->mcs_mt = intel_miptree_create(intel,
659 mt->target,
660 MESA_FORMAT_A8,
661 mt->first_level,
662 mt->last_level,
663 mt->width0,
664 mt->height0,
665 mt->depth0,
666 true,
667 0 /* num_samples */,
668 INTEL_MSAA_LAYOUT_CMS);
669
670 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
671 *
672 * When MCS buffer is enabled and bound to MSRT, it is required that it
673 * is cleared prior to any rendering.
674 *
675 * Since we don't use the MCS buffer for any purpose other than rendering,
676 * it makes sense to just clear it immediately upon allocation.
677 *
678 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
679 */
680 void *data = intel_region_map(intel, mt->mcs_mt->region, 0);
681 memset(data, 0xff, mt->mcs_mt->region->bo->size);
682 intel_region_unmap(intel, mt->mcs_mt->region);
683
684 return mt->mcs_mt;
685 }
686
687 bool
688 intel_miptree_alloc_hiz(struct intel_context *intel,
689 struct intel_mipmap_tree *mt,
690 GLuint num_samples)
691 {
692 assert(mt->hiz_mt == NULL);
693 /* MSAA HiZ surfaces always use IMS layout. */
694 mt->hiz_mt = intel_miptree_create(intel,
695 mt->target,
696 MESA_FORMAT_X8_Z24,
697 mt->first_level,
698 mt->last_level,
699 mt->width0,
700 mt->height0,
701 mt->depth0,
702 true,
703 num_samples,
704 INTEL_MSAA_LAYOUT_IMS);
705
706 if (!mt->hiz_mt)
707 return false;
708
709 /* Mark that all slices need a HiZ resolve. */
710 struct intel_resolve_map *head = &mt->hiz_map;
711 for (int level = mt->first_level; level <= mt->last_level; ++level) {
712 for (int layer = 0; layer < mt->level[level].depth; ++layer) {
713 head->next = malloc(sizeof(*head->next));
714 head->next->prev = head;
715 head->next->next = NULL;
716 head = head->next;
717
718 head->level = level;
719 head->layer = layer;
720 head->need = GEN6_HIZ_OP_HIZ_RESOLVE;
721 }
722 }
723
724 return true;
725 }
726
727 void
728 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
729 uint32_t level,
730 uint32_t layer)
731 {
732 intel_miptree_check_level_layer(mt, level, layer);
733
734 if (!mt->hiz_mt)
735 return;
736
737 intel_resolve_map_set(&mt->hiz_map,
738 level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
739 }
740
741
742 void
743 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
744 uint32_t level,
745 uint32_t layer)
746 {
747 intel_miptree_check_level_layer(mt, level, layer);
748
749 if (!mt->hiz_mt)
750 return;
751
752 intel_resolve_map_set(&mt->hiz_map,
753 level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
754 }
755
756 static bool
757 intel_miptree_slice_resolve(struct intel_context *intel,
758 struct intel_mipmap_tree *mt,
759 uint32_t level,
760 uint32_t layer,
761 enum gen6_hiz_op need)
762 {
763 intel_miptree_check_level_layer(mt, level, layer);
764
765 struct intel_resolve_map *item =
766 intel_resolve_map_get(&mt->hiz_map, level, layer);
767
768 if (!item || item->need != need)
769 return false;
770
771 intel_hiz_exec(intel, mt, level, layer, need);
772 intel_resolve_map_remove(item);
773 return true;
774 }
775
776 bool
777 intel_miptree_slice_resolve_hiz(struct intel_context *intel,
778 struct intel_mipmap_tree *mt,
779 uint32_t level,
780 uint32_t layer)
781 {
782 return intel_miptree_slice_resolve(intel, mt, level, layer,
783 GEN6_HIZ_OP_HIZ_RESOLVE);
784 }
785
786 bool
787 intel_miptree_slice_resolve_depth(struct intel_context *intel,
788 struct intel_mipmap_tree *mt,
789 uint32_t level,
790 uint32_t layer)
791 {
792 return intel_miptree_slice_resolve(intel, mt, level, layer,
793 GEN6_HIZ_OP_DEPTH_RESOLVE);
794 }
795
796 static bool
797 intel_miptree_all_slices_resolve(struct intel_context *intel,
798 struct intel_mipmap_tree *mt,
799 enum gen6_hiz_op need)
800 {
801 bool did_resolve = false;
802 struct intel_resolve_map *i, *next;
803
804 for (i = mt->hiz_map.next; i; i = next) {
805 next = i->next;
806 if (i->need != need)
807 continue;
808
809 intel_hiz_exec(intel, mt, i->level, i->layer, need);
810 intel_resolve_map_remove(i);
811 did_resolve = true;
812 }
813
814 return did_resolve;
815 }
816
817 bool
818 intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
819 struct intel_mipmap_tree *mt)
820 {
821 return intel_miptree_all_slices_resolve(intel, mt,
822 GEN6_HIZ_OP_HIZ_RESOLVE);
823 }
824
825 bool
826 intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
827 struct intel_mipmap_tree *mt)
828 {
829 return intel_miptree_all_slices_resolve(intel, mt,
830 GEN6_HIZ_OP_DEPTH_RESOLVE);
831 }
832
833 static void
834 intel_miptree_map_gtt(struct intel_context *intel,
835 struct intel_mipmap_tree *mt,
836 struct intel_miptree_map *map,
837 unsigned int level, unsigned int slice)
838 {
839 unsigned int bw, bh;
840 void *base;
841 unsigned int image_x, image_y;
842 int x = map->x;
843 int y = map->y;
844
845 /* For compressed formats, the stride is the number of bytes per
846 * row of blocks. intel_miptree_get_image_offset() already does
847 * the divide.
848 */
849 _mesa_get_format_block_size(mt->format, &bw, &bh);
850 assert(y % bh == 0);
851 y /= bh;
852
853 base = intel_region_map(intel, mt->region, map->mode);
854
855 if (base == NULL)
856 map->ptr = NULL;
857 else {
858 /* Note that in the case of cube maps, the caller must have passed the
859 * slice number referencing the face.
860 */
861 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
862 x += image_x;
863 y += image_y;
864
865 map->stride = mt->region->pitch * mt->cpp;
866 map->ptr = base + y * map->stride + x * mt->cpp;
867 }
868
869 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
870 map->x, map->y, map->w, map->h,
871 mt, _mesa_get_format_name(mt->format),
872 x, y, map->ptr, map->stride);
873 }
874
875 static void
876 intel_miptree_unmap_gtt(struct intel_context *intel,
877 struct intel_mipmap_tree *mt,
878 struct intel_miptree_map *map,
879 unsigned int level,
880 unsigned int slice)
881 {
882 intel_region_unmap(intel, mt->region);
883 }
884
885 static void
886 intel_miptree_map_blit(struct intel_context *intel,
887 struct intel_mipmap_tree *mt,
888 struct intel_miptree_map *map,
889 unsigned int level, unsigned int slice)
890 {
891 unsigned int image_x, image_y;
892 int x = map->x;
893 int y = map->y;
894 int ret;
895
896 /* The blitter requires the pitch to be aligned to 4. */
897 map->stride = ALIGN(map->w * mt->region->cpp, 4);
898
899 map->bo = drm_intel_bo_alloc(intel->bufmgr, "intel_miptree_map_blit() temp",
900 map->stride * map->h, 4096);
901 if (!map->bo) {
902 fprintf(stderr, "Failed to allocate blit temporary\n");
903 goto fail;
904 }
905
906 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
907 x += image_x;
908 y += image_y;
909
910 if (!intelEmitCopyBlit(intel,
911 mt->region->cpp,
912 mt->region->pitch, mt->region->bo,
913 0, mt->region->tiling,
914 map->stride / mt->region->cpp, map->bo,
915 0, I915_TILING_NONE,
916 x, y,
917 0, 0,
918 map->w, map->h,
919 GL_COPY)) {
920 fprintf(stderr, "Failed to blit\n");
921 goto fail;
922 }
923
924 intel_batchbuffer_flush(intel);
925 ret = drm_intel_bo_map(map->bo, (map->mode & GL_MAP_WRITE_BIT) != 0);
926 if (ret) {
927 fprintf(stderr, "Failed to map blit temporary\n");
928 goto fail;
929 }
930
931 map->ptr = map->bo->virtual;
932
933 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
934 map->x, map->y, map->w, map->h,
935 mt, _mesa_get_format_name(mt->format),
936 x, y, map->ptr, map->stride);
937
938 return;
939
940 fail:
941 drm_intel_bo_unreference(map->bo);
942 map->ptr = NULL;
943 map->stride = 0;
944 }
945
946 static void
947 intel_miptree_unmap_blit(struct intel_context *intel,
948 struct intel_mipmap_tree *mt,
949 struct intel_miptree_map *map,
950 unsigned int level,
951 unsigned int slice)
952 {
953 assert(!(map->mode & GL_MAP_WRITE_BIT));
954
955 drm_intel_bo_unmap(map->bo);
956 drm_intel_bo_unreference(map->bo);
957 }
958
959 static void
960 intel_miptree_map_s8(struct intel_context *intel,
961 struct intel_mipmap_tree *mt,
962 struct intel_miptree_map *map,
963 unsigned int level, unsigned int slice)
964 {
965 map->stride = map->w;
966 map->buffer = map->ptr = malloc(map->stride * map->h);
967 if (!map->buffer)
968 return;
969
970 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
971 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
972 * invalidate is set, since we'll be writing the whole rectangle from our
973 * temporary buffer back out.
974 */
975 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
976 uint8_t *untiled_s8_map = map->ptr;
977 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
978 GL_MAP_READ_BIT);
979 unsigned int image_x, image_y;
980
981 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
982
983 for (uint32_t y = 0; y < map->h; y++) {
984 for (uint32_t x = 0; x < map->w; x++) {
985 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
986 x + image_x + map->x,
987 y + image_y + map->y,
988 intel->has_swizzling);
989 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
990 }
991 }
992
993 intel_region_unmap(intel, mt->region);
994
995 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
996 map->x, map->y, map->w, map->h,
997 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
998 } else {
999 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1000 map->x, map->y, map->w, map->h,
1001 mt, map->ptr, map->stride);
1002 }
1003 }
1004
1005 static void
1006 intel_miptree_unmap_s8(struct intel_context *intel,
1007 struct intel_mipmap_tree *mt,
1008 struct intel_miptree_map *map,
1009 unsigned int level,
1010 unsigned int slice)
1011 {
1012 if (map->mode & GL_MAP_WRITE_BIT) {
1013 unsigned int image_x, image_y;
1014 uint8_t *untiled_s8_map = map->ptr;
1015 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
1016
1017 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
1018
1019 for (uint32_t y = 0; y < map->h; y++) {
1020 for (uint32_t x = 0; x < map->w; x++) {
1021 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1022 x + map->x,
1023 y + map->y,
1024 intel->has_swizzling);
1025 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
1026 }
1027 }
1028
1029 intel_region_unmap(intel, mt->region);
1030 }
1031
1032 free(map->buffer);
1033 }
1034
1035 /**
1036 * Mapping function for packed depth/stencil miptrees backed by real separate
1037 * miptrees for depth and stencil.
1038 *
1039 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
1040 * separate from the depth buffer. Yet at the GL API level, we have to expose
1041 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
1042 * be able to map that memory for texture storage and glReadPixels-type
1043 * operations. We give Mesa core that access by mallocing a temporary and
1044 * copying the data between the actual backing store and the temporary.
1045 */
1046 static void
1047 intel_miptree_map_depthstencil(struct intel_context *intel,
1048 struct intel_mipmap_tree *mt,
1049 struct intel_miptree_map *map,
1050 unsigned int level, unsigned int slice)
1051 {
1052 struct intel_mipmap_tree *z_mt = mt;
1053 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1054 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1055 int packed_bpp = map_z32f_x24s8 ? 8 : 4;
1056
1057 map->stride = map->w * packed_bpp;
1058 map->buffer = map->ptr = malloc(map->stride * map->h);
1059 if (!map->buffer)
1060 return;
1061
1062 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1063 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1064 * invalidate is set, since we'll be writing the whole rectangle from our
1065 * temporary buffer back out.
1066 */
1067 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1068 uint32_t *packed_map = map->ptr;
1069 uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
1070 uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
1071 unsigned int s_image_x, s_image_y;
1072 unsigned int z_image_x, z_image_y;
1073
1074 intel_miptree_get_image_offset(s_mt, level, 0, slice,
1075 &s_image_x, &s_image_y);
1076 intel_miptree_get_image_offset(z_mt, level, 0, slice,
1077 &z_image_x, &z_image_y);
1078
1079 for (uint32_t y = 0; y < map->h; y++) {
1080 for (uint32_t x = 0; x < map->w; x++) {
1081 int map_x = map->x + x, map_y = map->y + y;
1082 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1083 map_x + s_image_x,
1084 map_y + s_image_y,
1085 intel->has_swizzling);
1086 ptrdiff_t z_offset = ((map_y + z_image_y) * z_mt->region->pitch +
1087 (map_x + z_image_x));
1088 uint8_t s = s_map[s_offset];
1089 uint32_t z = z_map[z_offset];
1090
1091 if (map_z32f_x24s8) {
1092 packed_map[(y * map->w + x) * 2 + 0] = z;
1093 packed_map[(y * map->w + x) * 2 + 1] = s;
1094 } else {
1095 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
1096 }
1097 }
1098 }
1099
1100 intel_region_unmap(intel, s_mt->region);
1101 intel_region_unmap(intel, z_mt->region);
1102
1103 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
1104 __FUNCTION__,
1105 map->x, map->y, map->w, map->h,
1106 z_mt, map->x + z_image_x, map->y + z_image_y,
1107 s_mt, map->x + s_image_x, map->y + s_image_y,
1108 map->ptr, map->stride);
1109 } else {
1110 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1111 map->x, map->y, map->w, map->h,
1112 mt, map->ptr, map->stride);
1113 }
1114 }
1115
1116 static void
1117 intel_miptree_unmap_depthstencil(struct intel_context *intel,
1118 struct intel_mipmap_tree *mt,
1119 struct intel_miptree_map *map,
1120 unsigned int level,
1121 unsigned int slice)
1122 {
1123 struct intel_mipmap_tree *z_mt = mt;
1124 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1125 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1126
1127 if (map->mode & GL_MAP_WRITE_BIT) {
1128 uint32_t *packed_map = map->ptr;
1129 uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
1130 uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
1131 unsigned int s_image_x, s_image_y;
1132 unsigned int z_image_x, z_image_y;
1133
1134 intel_miptree_get_image_offset(s_mt, level, 0, slice,
1135 &s_image_x, &s_image_y);
1136 intel_miptree_get_image_offset(z_mt, level, 0, slice,
1137 &z_image_x, &z_image_y);
1138
1139 for (uint32_t y = 0; y < map->h; y++) {
1140 for (uint32_t x = 0; x < map->w; x++) {
1141 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1142 x + s_image_x + map->x,
1143 y + s_image_y + map->y,
1144 intel->has_swizzling);
1145 ptrdiff_t z_offset = ((y + z_image_y) * z_mt->region->pitch +
1146 (x + z_image_x));
1147
1148 if (map_z32f_x24s8) {
1149 z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
1150 s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
1151 } else {
1152 uint32_t packed = packed_map[y * map->w + x];
1153 s_map[s_offset] = packed >> 24;
1154 z_map[z_offset] = packed;
1155 }
1156 }
1157 }
1158
1159 intel_region_unmap(intel, s_mt->region);
1160 intel_region_unmap(intel, z_mt->region);
1161
1162 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1163 __FUNCTION__,
1164 map->x, map->y, map->w, map->h,
1165 z_mt, _mesa_get_format_name(z_mt->format),
1166 map->x + z_image_x, map->y + z_image_y,
1167 s_mt, map->x + s_image_x, map->y + s_image_y,
1168 map->ptr, map->stride);
1169 }
1170
1171 free(map->buffer);
1172 }
1173
1174 void
1175 intel_miptree_map(struct intel_context *intel,
1176 struct intel_mipmap_tree *mt,
1177 unsigned int level,
1178 unsigned int slice,
1179 unsigned int x,
1180 unsigned int y,
1181 unsigned int w,
1182 unsigned int h,
1183 GLbitfield mode,
1184 void **out_ptr,
1185 int *out_stride)
1186 {
1187 struct intel_miptree_map *map;
1188
1189 map = calloc(1, sizeof(struct intel_miptree_map));
1190 if (!map){
1191 *out_ptr = NULL;
1192 *out_stride = 0;
1193 return;
1194 }
1195
1196 assert(!mt->level[level].slice[slice].map);
1197 mt->level[level].slice[slice].map = map;
1198 map->mode = mode;
1199 map->x = x;
1200 map->y = y;
1201 map->w = w;
1202 map->h = h;
1203
1204 intel_miptree_slice_resolve_depth(intel, mt, level, slice);
1205 if (map->mode & GL_MAP_WRITE_BIT) {
1206 intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
1207 }
1208
1209 if (mt->format == MESA_FORMAT_S8) {
1210 intel_miptree_map_s8(intel, mt, map, level, slice);
1211 } else if (mt->stencil_mt) {
1212 intel_miptree_map_depthstencil(intel, mt, map, level, slice);
1213 } else if (intel->has_llc &&
1214 !(mode & GL_MAP_WRITE_BIT) &&
1215 !mt->compressed &&
1216 mt->region->tiling == I915_TILING_X) {
1217 intel_miptree_map_blit(intel, mt, map, level, slice);
1218 } else {
1219 intel_miptree_map_gtt(intel, mt, map, level, slice);
1220 }
1221
1222 *out_ptr = map->ptr;
1223 *out_stride = map->stride;
1224
1225 if (map->ptr == NULL) {
1226 mt->level[level].slice[slice].map = NULL;
1227 free(map);
1228 }
1229 }
1230
1231 void
1232 intel_miptree_unmap(struct intel_context *intel,
1233 struct intel_mipmap_tree *mt,
1234 unsigned int level,
1235 unsigned int slice)
1236 {
1237 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1238
1239 if (!map)
1240 return;
1241
1242 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
1243 mt, _mesa_get_format_name(mt->format), level, slice);
1244
1245 if (mt->format == MESA_FORMAT_S8) {
1246 intel_miptree_unmap_s8(intel, mt, map, level, slice);
1247 } else if (mt->stencil_mt) {
1248 intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
1249 } else if (map->bo) {
1250 intel_miptree_unmap_blit(intel, mt, map, level, slice);
1251 } else {
1252 intel_miptree_unmap_gtt(intel, mt, map, level, slice);
1253 }
1254
1255 mt->level[level].slice[slice].map = NULL;
1256 free(map);
1257 }