intel: Move compute_msaa_layout earlier in file.
[mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <GL/gl.h>
29 #include <GL/internal/dri_interface.h>
30
31 #include "intel_batchbuffer.h"
32 #include "intel_context.h"
33 #include "intel_mipmap_tree.h"
34 #include "intel_regions.h"
35 #include "intel_resolve_map.h"
36 #include "intel_span.h"
37 #include "intel_tex_layout.h"
38 #include "intel_tex.h"
39 #include "intel_blit.h"
40
41 #ifndef I915
42 #include "brw_blorp.h"
43 #endif
44
45 #include "main/enums.h"
46 #include "main/formats.h"
47 #include "main/glformats.h"
48 #include "main/texcompress_etc.h"
49 #include "main/teximage.h"
50
51 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
52
53 static GLenum
54 target_to_target(GLenum target)
55 {
56 switch (target) {
57 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
58 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
59 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
60 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
61 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
62 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
63 return GL_TEXTURE_CUBE_MAP_ARB;
64 default:
65 return target;
66 }
67 }
68
69
70 /**
71 * Determine which MSAA layout should be used by the MSAA surface being
72 * created, based on the chip generation and the surface type.
73 */
74 static enum intel_msaa_layout
75 compute_msaa_layout(struct intel_context *intel, gl_format format)
76 {
77 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
78 if (intel->gen < 7)
79 return INTEL_MSAA_LAYOUT_IMS;
80
81 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
82 switch (_mesa_get_format_base_format(format)) {
83 case GL_DEPTH_COMPONENT:
84 case GL_STENCIL_INDEX:
85 case GL_DEPTH_STENCIL:
86 return INTEL_MSAA_LAYOUT_IMS;
87 default:
88 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
89 *
90 * This field must be set to 0 for all SINT MSRTs when all RT channels
91 * are not written
92 *
93 * In practice this means that we have to disable MCS for all signed
94 * integer MSAA buffers. The alternative, to disable MCS only when one
95 * of the render target channels is disabled, is impractical because it
96 * would require converting between CMS and UMS MSAA layouts on the fly,
97 * which is expensive.
98 */
99 if (_mesa_get_format_datatype(format) == GL_INT) {
100 /* TODO: is this workaround needed for future chipsets? */
101 assert(intel->gen == 7);
102 return INTEL_MSAA_LAYOUT_UMS;
103 } else {
104 return INTEL_MSAA_LAYOUT_CMS;
105 }
106 }
107 }
108
109
110 /**
111 * @param for_region Indicates that the caller is
112 * intel_miptree_create_for_region(). If true, then do not create
113 * \c stencil_mt.
114 */
115 static struct intel_mipmap_tree *
116 intel_miptree_create_internal(struct intel_context *intel,
117 GLenum target,
118 gl_format format,
119 GLuint first_level,
120 GLuint last_level,
121 GLuint width0,
122 GLuint height0,
123 GLuint depth0,
124 bool for_region,
125 GLuint num_samples,
126 enum intel_msaa_layout msaa_layout)
127 {
128 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
129 int compress_byte = 0;
130
131 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
132 _mesa_lookup_enum_by_nr(target),
133 _mesa_get_format_name(format),
134 first_level, last_level, mt);
135
136 if (_mesa_is_format_compressed(format))
137 compress_byte = intel_compressed_num_bytes(format);
138
139 mt->target = target_to_target(target);
140 mt->format = format;
141 mt->first_level = first_level;
142 mt->last_level = last_level;
143 mt->width0 = width0;
144 mt->height0 = height0;
145 mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
146 mt->num_samples = num_samples;
147 mt->compressed = compress_byte ? 1 : 0;
148 mt->msaa_layout = msaa_layout;
149 mt->refcount = 1;
150
151 /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we
152 * use it elsewhere?
153 */
154 switch (msaa_layout) {
155 case INTEL_MSAA_LAYOUT_NONE:
156 case INTEL_MSAA_LAYOUT_IMS:
157 mt->array_spacing_lod0 = false;
158 break;
159 case INTEL_MSAA_LAYOUT_UMS:
160 case INTEL_MSAA_LAYOUT_CMS:
161 mt->array_spacing_lod0 = true;
162 break;
163 }
164
165 if (target == GL_TEXTURE_CUBE_MAP) {
166 assert(depth0 == 1);
167 mt->depth0 = 6;
168 } else {
169 mt->depth0 = depth0;
170 }
171
172 if (!for_region &&
173 _mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
174 (intel->must_use_separate_stencil ||
175 (intel->has_separate_stencil &&
176 intel->vtbl.is_hiz_depth_format(intel, format)))) {
177 /* MSAA stencil surfaces always use IMS layout. */
178 enum intel_msaa_layout msaa_layout =
179 num_samples > 1 ? INTEL_MSAA_LAYOUT_IMS : INTEL_MSAA_LAYOUT_NONE;
180 mt->stencil_mt = intel_miptree_create(intel,
181 mt->target,
182 MESA_FORMAT_S8,
183 mt->first_level,
184 mt->last_level,
185 mt->width0,
186 mt->height0,
187 mt->depth0,
188 true,
189 num_samples,
190 msaa_layout);
191 if (!mt->stencil_mt) {
192 intel_miptree_release(&mt);
193 return NULL;
194 }
195
196 /* Fix up the Z miptree format for how we're splitting out separate
197 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
198 */
199 if (mt->format == MESA_FORMAT_S8_Z24) {
200 mt->format = MESA_FORMAT_X8_Z24;
201 } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
202 mt->format = MESA_FORMAT_Z32_FLOAT;
203 mt->cpp = 4;
204 } else {
205 _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
206 _mesa_get_format_name(mt->format));
207 }
208 }
209
210 intel_get_texture_alignment_unit(intel, mt->format,
211 &mt->align_w, &mt->align_h);
212
213 #ifdef I915
214 (void) intel;
215 if (intel->is_945)
216 i945_miptree_layout(mt);
217 else
218 i915_miptree_layout(mt);
219 #else
220 brw_miptree_layout(intel, mt);
221 #endif
222
223 return mt;
224 }
225
226
227 struct intel_mipmap_tree *
228 intel_miptree_create(struct intel_context *intel,
229 GLenum target,
230 gl_format format,
231 GLuint first_level,
232 GLuint last_level,
233 GLuint width0,
234 GLuint height0,
235 GLuint depth0,
236 bool expect_accelerated_upload,
237 GLuint num_samples,
238 enum intel_msaa_layout msaa_layout)
239 {
240 struct intel_mipmap_tree *mt;
241 uint32_t tiling = I915_TILING_NONE;
242 GLenum base_format;
243 gl_format tex_format = format;
244 gl_format etc_format = MESA_FORMAT_NONE;
245 GLuint total_width, total_height;
246
247 switch (format) {
248 case MESA_FORMAT_ETC1_RGB8:
249 format = MESA_FORMAT_RGBX8888_REV;
250 break;
251 case MESA_FORMAT_ETC2_RGB8:
252 format = MESA_FORMAT_RGBX8888_REV;
253 break;
254 case MESA_FORMAT_ETC2_SRGB8:
255 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC:
256 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1:
257 format = MESA_FORMAT_SARGB8;
258 break;
259 case MESA_FORMAT_ETC2_RGBA8_EAC:
260 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1:
261 format = MESA_FORMAT_RGBA8888_REV;
262 break;
263 case MESA_FORMAT_ETC2_R11_EAC:
264 format = MESA_FORMAT_R16;
265 break;
266 case MESA_FORMAT_ETC2_SIGNED_R11_EAC:
267 format = MESA_FORMAT_SIGNED_R16;
268 break;
269 case MESA_FORMAT_ETC2_RG11_EAC:
270 format = MESA_FORMAT_RG1616;
271 break;
272 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC:
273 format = MESA_FORMAT_SIGNED_GR1616;
274 break;
275 default:
276 /* Non ETC1 / ETC2 format */
277 break;
278 }
279
280 etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
281 base_format = _mesa_get_format_base_format(format);
282
283 if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
284 if (intel->gen >= 4 &&
285 (base_format == GL_DEPTH_COMPONENT ||
286 base_format == GL_DEPTH_STENCIL_EXT))
287 tiling = I915_TILING_Y;
288 else if (msaa_layout != INTEL_MSAA_LAYOUT_NONE) {
289 /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
290 * Surface"):
291 *
292 * [DevSNB+]: For multi-sample render targets, this field must be
293 * 1. MSRTs can only be tiled.
294 *
295 * Our usual reason for preferring X tiling (fast blits using the
296 * blitting engine) doesn't apply to MSAA, since we'll generally be
297 * downsampling or upsampling when blitting between the MSAA buffer
298 * and another buffer, and the blitting engine doesn't support that.
299 * So use Y tiling, since it makes better use of the cache.
300 */
301 tiling = I915_TILING_Y;
302 } else if (width0 >= 64)
303 tiling = I915_TILING_X;
304 }
305
306 mt = intel_miptree_create_internal(intel, target, format,
307 first_level, last_level, width0,
308 height0, depth0,
309 false, num_samples, msaa_layout);
310 /*
311 * pitch == 0 || height == 0 indicates the null texture
312 */
313 if (!mt || !mt->total_width || !mt->total_height) {
314 intel_miptree_release(&mt);
315 return NULL;
316 }
317
318 total_width = mt->total_width;
319 total_height = mt->total_height;
320
321 if (format == MESA_FORMAT_S8) {
322 /* The stencil buffer is W tiled. However, we request from the kernel a
323 * non-tiled buffer because the GTT is incapable of W fencing. So round
324 * up the width and height to match the size of W tiles (64x64).
325 */
326 tiling = I915_TILING_NONE;
327 total_width = ALIGN(total_width, 64);
328 total_height = ALIGN(total_height, 64);
329 }
330
331 mt->wraps_etc = (etc_format != MESA_FORMAT_NONE) ? true : false;
332 mt->etc_format = etc_format;
333 mt->region = intel_region_alloc(intel->intelScreen,
334 tiling,
335 mt->cpp,
336 total_width,
337 total_height,
338 expect_accelerated_upload);
339 mt->offset = 0;
340
341 if (!mt->region) {
342 intel_miptree_release(&mt);
343 return NULL;
344 }
345
346 return mt;
347 }
348
349
350 struct intel_mipmap_tree *
351 intel_miptree_create_for_region(struct intel_context *intel,
352 GLenum target,
353 gl_format format,
354 struct intel_region *region)
355 {
356 struct intel_mipmap_tree *mt;
357
358 mt = intel_miptree_create_internal(intel, target, format,
359 0, 0,
360 region->width, region->height, 1,
361 true, 0 /* num_samples */,
362 INTEL_MSAA_LAYOUT_NONE);
363 if (!mt)
364 return mt;
365
366 intel_region_reference(&mt->region, region);
367
368 return mt;
369 }
370
371
372 /**
373 * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
374 *
375 * For a multisample DRI2 buffer, this wraps the given region with
376 * a singlesample miptree, then creates a multisample miptree into which the
377 * singlesample miptree is embedded as a child.
378 */
379 struct intel_mipmap_tree*
380 intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
381 unsigned dri_attachment,
382 gl_format format,
383 uint32_t num_samples,
384 struct intel_region *region)
385 {
386 struct intel_mipmap_tree *singlesample_mt = NULL;
387 struct intel_mipmap_tree *multisample_mt = NULL;
388 GLenum base_format = _mesa_get_format_base_format(format);
389
390 /* Only the front and back buffers, which are color buffers, are shared
391 * through DRI2.
392 */
393 assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
394 dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
395 dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
396 assert(base_format == GL_RGB || base_format == GL_RGBA);
397
398 singlesample_mt = intel_miptree_create_for_region(intel, GL_TEXTURE_2D,
399 format, region);
400 if (!singlesample_mt)
401 return NULL;
402
403 if (num_samples == 0)
404 return singlesample_mt;
405
406 multisample_mt = intel_miptree_create_for_renderbuffer(intel,
407 format,
408 region->width,
409 region->height,
410 num_samples);
411 if (!multisample_mt) {
412 intel_miptree_release(&singlesample_mt);
413 return NULL;
414 }
415
416 multisample_mt->singlesample_mt = singlesample_mt;
417 multisample_mt->need_downsample = false;
418
419 if (intel->is_front_buffer_rendering &&
420 (dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
421 dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT)) {
422 intel_miptree_upsample(intel, multisample_mt);
423 }
424
425 return multisample_mt;
426 }
427
428 struct intel_mipmap_tree*
429 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
430 gl_format format,
431 uint32_t width,
432 uint32_t height,
433 uint32_t num_samples)
434 {
435 struct intel_mipmap_tree *mt;
436 uint32_t depth = 1;
437 enum intel_msaa_layout msaa_layout = INTEL_MSAA_LAYOUT_NONE;
438 const uint32_t singlesample_width = width;
439 const uint32_t singlesample_height = height;
440 bool ok;
441
442 if (num_samples > 1) {
443 /* Adjust width/height/depth for MSAA */
444 msaa_layout = compute_msaa_layout(intel, format);
445 if (msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
446 /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
447 *
448 * "Any of the other messages (sample*, LOD, load4) used with a
449 * (4x) multisampled surface will in-effect sample a surface with
450 * double the height and width as that indicated in the surface
451 * state. Each pixel position on the original-sized surface is
452 * replaced with a 2x2 of samples with the following arrangement:
453 *
454 * sample 0 sample 2
455 * sample 1 sample 3"
456 *
457 * Thus, when sampling from a multisampled texture, it behaves as
458 * though the layout in memory for (x,y,sample) is:
459 *
460 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
461 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
462 *
463 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
464 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
465 *
466 * However, the actual layout of multisampled data in memory is:
467 *
468 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
469 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
470 *
471 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
472 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
473 *
474 * This pattern repeats for each 2x2 pixel block.
475 *
476 * As a result, when calculating the size of our 4-sample buffer for
477 * an odd width or height, we have to align before scaling up because
478 * sample 3 is in that bottom right 2x2 block.
479 */
480 switch (num_samples) {
481 case 4:
482 width = ALIGN(width, 2) * 2;
483 height = ALIGN(height, 2) * 2;
484 break;
485 case 8:
486 width = ALIGN(width, 2) * 4;
487 height = ALIGN(height, 2) * 2;
488 break;
489 default:
490 /* num_samples should already have been quantized to 0, 1, 4, or
491 * 8.
492 */
493 assert(false);
494 }
495 } else {
496 /* Non-interleaved */
497 depth = num_samples;
498 }
499 }
500
501 mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
502 width, height, depth, true, num_samples,
503 msaa_layout);
504 if (!mt)
505 goto fail;
506
507 if (intel->vtbl.is_hiz_depth_format(intel, format)) {
508 ok = intel_miptree_alloc_hiz(intel, mt, num_samples);
509 if (!ok)
510 goto fail;
511 }
512
513 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
514 ok = intel_miptree_alloc_mcs(intel, mt, num_samples);
515 if (!ok)
516 goto fail;
517 }
518
519 mt->singlesample_width0 = singlesample_width;
520 mt->singlesample_height0 = singlesample_height;
521
522 return mt;
523
524 fail:
525 intel_miptree_release(&mt);
526 return NULL;
527 }
528
529 void
530 intel_miptree_reference(struct intel_mipmap_tree **dst,
531 struct intel_mipmap_tree *src)
532 {
533 if (*dst == src)
534 return;
535
536 intel_miptree_release(dst);
537
538 if (src) {
539 src->refcount++;
540 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
541 }
542
543 *dst = src;
544 }
545
546
547 void
548 intel_miptree_release(struct intel_mipmap_tree **mt)
549 {
550 if (!*mt)
551 return;
552
553 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
554 if (--(*mt)->refcount <= 0) {
555 GLuint i;
556
557 DBG("%s deleting %p\n", __FUNCTION__, *mt);
558
559 intel_region_release(&((*mt)->region));
560 intel_miptree_release(&(*mt)->stencil_mt);
561 intel_miptree_release(&(*mt)->hiz_mt);
562 intel_miptree_release(&(*mt)->mcs_mt);
563 intel_miptree_release(&(*mt)->singlesample_mt);
564 intel_resolve_map_clear(&(*mt)->hiz_map);
565
566 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
567 free((*mt)->level[i].slice);
568 }
569
570 free(*mt);
571 }
572 *mt = NULL;
573 }
574
575 void
576 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
577 int *width, int *height, int *depth)
578 {
579 switch (image->TexObject->Target) {
580 case GL_TEXTURE_1D_ARRAY:
581 *width = image->Width;
582 *height = 1;
583 *depth = image->Height;
584 break;
585 default:
586 *width = image->Width;
587 *height = image->Height;
588 *depth = image->Depth;
589 break;
590 }
591 }
592
593 /**
594 * Can the image be pulled into a unified mipmap tree? This mirrors
595 * the completeness test in a lot of ways.
596 *
597 * Not sure whether I want to pass gl_texture_image here.
598 */
599 bool
600 intel_miptree_match_image(struct intel_mipmap_tree *mt,
601 struct gl_texture_image *image)
602 {
603 struct intel_texture_image *intelImage = intel_texture_image(image);
604 GLuint level = intelImage->base.Base.Level;
605 int width, height, depth;
606
607 /* glTexImage* choose the texture object based on the target passed in, and
608 * objects can't change targets over their lifetimes, so this should be
609 * true.
610 */
611 assert(target_to_target(image->TexObject->Target) == mt->target);
612
613 gl_format mt_format = mt->format;
614 if (mt->format == MESA_FORMAT_X8_Z24 && mt->stencil_mt)
615 mt_format = MESA_FORMAT_S8_Z24;
616 if (mt->format == MESA_FORMAT_Z32_FLOAT && mt->stencil_mt)
617 mt_format = MESA_FORMAT_Z32_FLOAT_X24S8;
618 if (mt->etc_format != MESA_FORMAT_NONE)
619 mt_format = mt->etc_format;
620
621 if (image->TexFormat != mt_format)
622 return false;
623
624 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
625
626 if (mt->target == GL_TEXTURE_CUBE_MAP)
627 depth = 6;
628
629 /* Test image dimensions against the base level image adjusted for
630 * minification. This will also catch images not present in the
631 * tree, changed targets, etc.
632 */
633 if (width != mt->level[level].width ||
634 height != mt->level[level].height ||
635 depth != mt->level[level].depth)
636 return false;
637
638 return true;
639 }
640
641
642 void
643 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
644 GLuint level,
645 GLuint x, GLuint y,
646 GLuint w, GLuint h, GLuint d)
647 {
648 mt->level[level].width = w;
649 mt->level[level].height = h;
650 mt->level[level].depth = d;
651 mt->level[level].level_x = x;
652 mt->level[level].level_y = y;
653
654 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
655 level, w, h, d, x, y);
656
657 assert(mt->level[level].slice == NULL);
658
659 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
660 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
661 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
662 }
663
664
665 void
666 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
667 GLuint level, GLuint img,
668 GLuint x, GLuint y)
669 {
670 if (img == 0 && level == 0)
671 assert(x == 0 && y == 0);
672
673 assert(img < mt->level[level].depth);
674
675 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
676 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
677
678 DBG("%s level %d img %d pos %d,%d\n",
679 __FUNCTION__, level, img,
680 mt->level[level].slice[img].x_offset,
681 mt->level[level].slice[img].y_offset);
682 }
683
684 void
685 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
686 GLuint level, GLuint slice,
687 GLuint *x, GLuint *y)
688 {
689 assert(slice < mt->level[level].depth);
690
691 *x = mt->level[level].slice[slice].x_offset;
692 *y = mt->level[level].slice[slice].y_offset;
693 }
694
695 static void
696 intel_miptree_copy_slice(struct intel_context *intel,
697 struct intel_mipmap_tree *dst_mt,
698 struct intel_mipmap_tree *src_mt,
699 int level,
700 int face,
701 int depth)
702
703 {
704 gl_format format = src_mt->format;
705 uint32_t width = src_mt->level[level].width;
706 uint32_t height = src_mt->level[level].height;
707 int slice;
708
709 if (face > 0)
710 slice = face;
711 else
712 slice = depth;
713
714 assert(depth < src_mt->level[level].depth);
715
716 if (dst_mt->compressed) {
717 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
718 width = ALIGN(width, dst_mt->align_w);
719 }
720
721 uint32_t dst_x, dst_y, src_x, src_y;
722 intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
723 intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
724
725 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
726 _mesa_get_format_name(src_mt->format),
727 src_mt, src_x, src_y, src_mt->region->pitch * src_mt->region->cpp,
728 _mesa_get_format_name(dst_mt->format),
729 dst_mt, dst_x, dst_y, dst_mt->region->pitch * dst_mt->region->cpp,
730 width, height);
731
732 if (!intelEmitCopyBlit(intel,
733 dst_mt->region->cpp,
734 src_mt->region->pitch, src_mt->region->bo,
735 0, src_mt->region->tiling,
736 dst_mt->region->pitch, dst_mt->region->bo,
737 0, dst_mt->region->tiling,
738 src_x, src_y,
739 dst_x, dst_y,
740 width, height,
741 GL_COPY)) {
742
743 fallback_debug("miptree validate blit for %s failed\n",
744 _mesa_get_format_name(format));
745 void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
746 void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
747
748 _mesa_copy_rect(dst,
749 dst_mt->cpp,
750 dst_mt->region->pitch,
751 dst_x, dst_y,
752 width, height,
753 src, src_mt->region->pitch,
754 src_x, src_y);
755
756 intel_region_unmap(intel, dst_mt->region);
757 intel_region_unmap(intel, src_mt->region);
758 }
759
760 if (src_mt->stencil_mt) {
761 intel_miptree_copy_slice(intel,
762 dst_mt->stencil_mt, src_mt->stencil_mt,
763 level, face, depth);
764 }
765 }
766
767 /**
768 * Copies the image's current data to the given miptree, and associates that
769 * miptree with the image.
770 */
771 void
772 intel_miptree_copy_teximage(struct intel_context *intel,
773 struct intel_texture_image *intelImage,
774 struct intel_mipmap_tree *dst_mt)
775 {
776 struct intel_mipmap_tree *src_mt = intelImage->mt;
777 struct intel_texture_object *intel_obj =
778 intel_texture_object(intelImage->base.Base.TexObject);
779 int level = intelImage->base.Base.Level;
780 int face = intelImage->base.Base.Face;
781 GLuint depth = intelImage->base.Base.Depth;
782
783 for (int slice = 0; slice < depth; slice++) {
784 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
785 }
786
787 intel_miptree_reference(&intelImage->mt, dst_mt);
788 intel_obj->needs_validate = true;
789 }
790
791 bool
792 intel_miptree_alloc_mcs(struct intel_context *intel,
793 struct intel_mipmap_tree *mt,
794 GLuint num_samples)
795 {
796 assert(mt->mcs_mt == NULL);
797 assert(intel->gen >= 7); /* MCS only used on Gen7+ */
798
799 /* Choose the correct format for the MCS buffer. All that really matters
800 * is that we allocate the right buffer size, since we'll always be
801 * accessing this miptree using MCS-specific hardware mechanisms, which
802 * infer the correct format based on num_samples.
803 */
804 gl_format format;
805 switch (num_samples) {
806 case 4:
807 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
808 * each sample).
809 */
810 format = MESA_FORMAT_R8;
811 break;
812 case 8:
813 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
814 * for each sample, plus 8 padding bits).
815 */
816 format = MESA_FORMAT_R_UINT32;
817 break;
818 default:
819 assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
820 break;
821 };
822
823 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
824 *
825 * "The MCS surface must be stored as Tile Y."
826 *
827 * We set msaa_format to INTEL_MSAA_LAYOUT_CMS to force
828 * intel_miptree_create() to use Y tiling. msaa_format is otherwise
829 * ignored for the MCS miptree.
830 */
831 mt->mcs_mt = intel_miptree_create(intel,
832 mt->target,
833 format,
834 mt->first_level,
835 mt->last_level,
836 mt->width0,
837 mt->height0,
838 mt->depth0,
839 true,
840 0 /* num_samples */,
841 INTEL_MSAA_LAYOUT_CMS);
842
843 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
844 *
845 * When MCS buffer is enabled and bound to MSRT, it is required that it
846 * is cleared prior to any rendering.
847 *
848 * Since we don't use the MCS buffer for any purpose other than rendering,
849 * it makes sense to just clear it immediately upon allocation.
850 *
851 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
852 */
853 void *data = intel_region_map(intel, mt->mcs_mt->region, 0);
854 memset(data, 0xff, mt->mcs_mt->region->bo->size);
855 intel_region_unmap(intel, mt->mcs_mt->region);
856
857 return mt->mcs_mt;
858 }
859
860 bool
861 intel_miptree_alloc_hiz(struct intel_context *intel,
862 struct intel_mipmap_tree *mt,
863 GLuint num_samples)
864 {
865 assert(mt->hiz_mt == NULL);
866 /* MSAA HiZ surfaces always use IMS layout. */
867 mt->hiz_mt = intel_miptree_create(intel,
868 mt->target,
869 mt->format,
870 mt->first_level,
871 mt->last_level,
872 mt->width0,
873 mt->height0,
874 mt->depth0,
875 true,
876 num_samples,
877 INTEL_MSAA_LAYOUT_IMS);
878
879 if (!mt->hiz_mt)
880 return false;
881
882 /* Mark that all slices need a HiZ resolve. */
883 struct intel_resolve_map *head = &mt->hiz_map;
884 for (int level = mt->first_level; level <= mt->last_level; ++level) {
885 for (int layer = 0; layer < mt->level[level].depth; ++layer) {
886 head->next = malloc(sizeof(*head->next));
887 head->next->prev = head;
888 head->next->next = NULL;
889 head = head->next;
890
891 head->level = level;
892 head->layer = layer;
893 head->need = GEN6_HIZ_OP_HIZ_RESOLVE;
894 }
895 }
896
897 return true;
898 }
899
900 void
901 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
902 uint32_t level,
903 uint32_t layer)
904 {
905 intel_miptree_check_level_layer(mt, level, layer);
906
907 if (!mt->hiz_mt)
908 return;
909
910 intel_resolve_map_set(&mt->hiz_map,
911 level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
912 }
913
914
915 void
916 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
917 uint32_t level,
918 uint32_t layer)
919 {
920 intel_miptree_check_level_layer(mt, level, layer);
921
922 if (!mt->hiz_mt)
923 return;
924
925 intel_resolve_map_set(&mt->hiz_map,
926 level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
927 }
928
929 static bool
930 intel_miptree_slice_resolve(struct intel_context *intel,
931 struct intel_mipmap_tree *mt,
932 uint32_t level,
933 uint32_t layer,
934 enum gen6_hiz_op need)
935 {
936 intel_miptree_check_level_layer(mt, level, layer);
937
938 struct intel_resolve_map *item =
939 intel_resolve_map_get(&mt->hiz_map, level, layer);
940
941 if (!item || item->need != need)
942 return false;
943
944 intel_hiz_exec(intel, mt, level, layer, need);
945 intel_resolve_map_remove(item);
946 return true;
947 }
948
949 bool
950 intel_miptree_slice_resolve_hiz(struct intel_context *intel,
951 struct intel_mipmap_tree *mt,
952 uint32_t level,
953 uint32_t layer)
954 {
955 return intel_miptree_slice_resolve(intel, mt, level, layer,
956 GEN6_HIZ_OP_HIZ_RESOLVE);
957 }
958
959 bool
960 intel_miptree_slice_resolve_depth(struct intel_context *intel,
961 struct intel_mipmap_tree *mt,
962 uint32_t level,
963 uint32_t layer)
964 {
965 return intel_miptree_slice_resolve(intel, mt, level, layer,
966 GEN6_HIZ_OP_DEPTH_RESOLVE);
967 }
968
969 static bool
970 intel_miptree_all_slices_resolve(struct intel_context *intel,
971 struct intel_mipmap_tree *mt,
972 enum gen6_hiz_op need)
973 {
974 bool did_resolve = false;
975 struct intel_resolve_map *i, *next;
976
977 for (i = mt->hiz_map.next; i; i = next) {
978 next = i->next;
979 if (i->need != need)
980 continue;
981
982 intel_hiz_exec(intel, mt, i->level, i->layer, need);
983 intel_resolve_map_remove(i);
984 did_resolve = true;
985 }
986
987 return did_resolve;
988 }
989
990 bool
991 intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
992 struct intel_mipmap_tree *mt)
993 {
994 return intel_miptree_all_slices_resolve(intel, mt,
995 GEN6_HIZ_OP_HIZ_RESOLVE);
996 }
997
998 bool
999 intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
1000 struct intel_mipmap_tree *mt)
1001 {
1002 return intel_miptree_all_slices_resolve(intel, mt,
1003 GEN6_HIZ_OP_DEPTH_RESOLVE);
1004 }
1005
1006 static void
1007 intel_miptree_updownsample(struct intel_context *intel,
1008 struct intel_mipmap_tree *src,
1009 struct intel_mipmap_tree *dst,
1010 unsigned width,
1011 unsigned height)
1012 {
1013 #ifndef I915
1014 int src_x0 = 0;
1015 int src_y0 = 0;
1016 int dst_x0 = 0;
1017 int dst_y0 = 0;
1018
1019 intel_miptree_slice_resolve_depth(intel, src, 0, 0);
1020 intel_miptree_slice_resolve_depth(intel, dst, 0, 0);
1021
1022 brw_blorp_blit_miptrees(intel,
1023 src, 0 /* level */, 0 /* layer */,
1024 dst, 0 /* level */, 0 /* layer */,
1025 src_x0, src_y0,
1026 dst_x0, dst_y0,
1027 width, height,
1028 false, false /*mirror x, y*/);
1029
1030 if (src->stencil_mt) {
1031 brw_blorp_blit_miptrees(intel,
1032 src->stencil_mt, 0 /* level */, 0 /* layer */,
1033 dst->stencil_mt, 0 /* level */, 0 /* layer */,
1034 src_x0, src_y0,
1035 dst_x0, dst_y0,
1036 width, height,
1037 false, false /*mirror x, y*/);
1038 }
1039 #endif /* I915 */
1040 }
1041
1042 static void
1043 assert_is_flat(struct intel_mipmap_tree *mt)
1044 {
1045 assert(mt->target == GL_TEXTURE_2D);
1046 assert(mt->first_level == 0);
1047 assert(mt->last_level == 0);
1048 }
1049
1050 /**
1051 * \brief Downsample from mt to mt->singlesample_mt.
1052 *
1053 * If the miptree needs no downsample, then skip.
1054 */
1055 void
1056 intel_miptree_downsample(struct intel_context *intel,
1057 struct intel_mipmap_tree *mt)
1058 {
1059 /* Only flat, renderbuffer-like miptrees are supported. */
1060 assert_is_flat(mt);
1061
1062 if (!mt->need_downsample)
1063 return;
1064 intel_miptree_updownsample(intel,
1065 mt, mt->singlesample_mt,
1066 mt->singlesample_mt->width0,
1067 mt->singlesample_mt->height0);
1068 mt->need_downsample = false;
1069
1070 /* Strictly speaking, after a downsample on a depth miptree, a hiz
1071 * resolve is needed on the singlesample miptree. However, since the
1072 * singlesample miptree is never rendered to, the hiz resolve will never
1073 * occur. Therefore we do not mark the needed hiz resolve after
1074 * downsampling.
1075 */
1076 }
1077
1078 /**
1079 * \brief Upsample from mt->singlesample_mt to mt.
1080 *
1081 * The upsample is done unconditionally.
1082 */
1083 void
1084 intel_miptree_upsample(struct intel_context *intel,
1085 struct intel_mipmap_tree *mt)
1086 {
1087 /* Only flat, renderbuffer-like miptrees are supported. */
1088 assert_is_flat(mt);
1089 assert(!mt->need_downsample);
1090
1091 intel_miptree_updownsample(intel,
1092 mt->singlesample_mt, mt,
1093 mt->singlesample_mt->width0,
1094 mt->singlesample_mt->height0);
1095 intel_miptree_slice_set_needs_hiz_resolve(mt, 0, 0);
1096 }
1097
1098 static void
1099 intel_miptree_map_gtt(struct intel_context *intel,
1100 struct intel_mipmap_tree *mt,
1101 struct intel_miptree_map *map,
1102 unsigned int level, unsigned int slice)
1103 {
1104 unsigned int bw, bh;
1105 void *base;
1106 unsigned int image_x, image_y;
1107 int x = map->x;
1108 int y = map->y;
1109
1110 /* For compressed formats, the stride is the number of bytes per
1111 * row of blocks. intel_miptree_get_image_offset() already does
1112 * the divide.
1113 */
1114 _mesa_get_format_block_size(mt->format, &bw, &bh);
1115 assert(y % bh == 0);
1116 y /= bh;
1117
1118 base = intel_region_map(intel, mt->region, map->mode);
1119
1120 if (base == NULL)
1121 map->ptr = NULL;
1122 else {
1123 /* Note that in the case of cube maps, the caller must have passed the
1124 * slice number referencing the face.
1125 */
1126 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1127 x += image_x;
1128 y += image_y;
1129
1130 map->stride = mt->region->pitch * mt->cpp;
1131 map->ptr = base + y * map->stride + x * mt->cpp;
1132 }
1133
1134 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
1135 map->x, map->y, map->w, map->h,
1136 mt, _mesa_get_format_name(mt->format),
1137 x, y, map->ptr, map->stride);
1138 }
1139
1140 static void
1141 intel_miptree_unmap_gtt(struct intel_context *intel,
1142 struct intel_mipmap_tree *mt,
1143 struct intel_miptree_map *map,
1144 unsigned int level,
1145 unsigned int slice)
1146 {
1147 intel_region_unmap(intel, mt->region);
1148 }
1149
1150 static void
1151 intel_miptree_map_blit(struct intel_context *intel,
1152 struct intel_mipmap_tree *mt,
1153 struct intel_miptree_map *map,
1154 unsigned int level, unsigned int slice)
1155 {
1156 unsigned int image_x, image_y;
1157 int x = map->x;
1158 int y = map->y;
1159 int ret;
1160
1161 /* The blitter requires the pitch to be aligned to 4. */
1162 map->stride = ALIGN(map->w * mt->region->cpp, 4);
1163
1164 map->bo = drm_intel_bo_alloc(intel->bufmgr, "intel_miptree_map_blit() temp",
1165 map->stride * map->h, 4096);
1166 if (!map->bo) {
1167 fprintf(stderr, "Failed to allocate blit temporary\n");
1168 goto fail;
1169 }
1170
1171 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1172 x += image_x;
1173 y += image_y;
1174
1175 if (!intelEmitCopyBlit(intel,
1176 mt->region->cpp,
1177 mt->region->pitch, mt->region->bo,
1178 0, mt->region->tiling,
1179 map->stride / mt->region->cpp, map->bo,
1180 0, I915_TILING_NONE,
1181 x, y,
1182 0, 0,
1183 map->w, map->h,
1184 GL_COPY)) {
1185 fprintf(stderr, "Failed to blit\n");
1186 goto fail;
1187 }
1188
1189 intel_batchbuffer_flush(intel);
1190 ret = drm_intel_bo_map(map->bo, (map->mode & GL_MAP_WRITE_BIT) != 0);
1191 if (ret) {
1192 fprintf(stderr, "Failed to map blit temporary\n");
1193 goto fail;
1194 }
1195
1196 map->ptr = map->bo->virtual;
1197
1198 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
1199 map->x, map->y, map->w, map->h,
1200 mt, _mesa_get_format_name(mt->format),
1201 x, y, map->ptr, map->stride);
1202
1203 return;
1204
1205 fail:
1206 drm_intel_bo_unreference(map->bo);
1207 map->ptr = NULL;
1208 map->stride = 0;
1209 }
1210
1211 static void
1212 intel_miptree_unmap_blit(struct intel_context *intel,
1213 struct intel_mipmap_tree *mt,
1214 struct intel_miptree_map *map,
1215 unsigned int level,
1216 unsigned int slice)
1217 {
1218 assert(!(map->mode & GL_MAP_WRITE_BIT));
1219
1220 drm_intel_bo_unmap(map->bo);
1221 drm_intel_bo_unreference(map->bo);
1222 }
1223
1224 static void
1225 intel_miptree_map_s8(struct intel_context *intel,
1226 struct intel_mipmap_tree *mt,
1227 struct intel_miptree_map *map,
1228 unsigned int level, unsigned int slice)
1229 {
1230 map->stride = map->w;
1231 map->buffer = map->ptr = malloc(map->stride * map->h);
1232 if (!map->buffer)
1233 return;
1234
1235 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1236 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1237 * invalidate is set, since we'll be writing the whole rectangle from our
1238 * temporary buffer back out.
1239 */
1240 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1241 uint8_t *untiled_s8_map = map->ptr;
1242 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
1243 GL_MAP_READ_BIT);
1244 unsigned int image_x, image_y;
1245
1246 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1247
1248 for (uint32_t y = 0; y < map->h; y++) {
1249 for (uint32_t x = 0; x < map->w; x++) {
1250 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1251 x + image_x + map->x,
1252 y + image_y + map->y,
1253 intel->has_swizzling);
1254 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
1255 }
1256 }
1257
1258 intel_region_unmap(intel, mt->region);
1259
1260 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
1261 map->x, map->y, map->w, map->h,
1262 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
1263 } else {
1264 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1265 map->x, map->y, map->w, map->h,
1266 mt, map->ptr, map->stride);
1267 }
1268 }
1269
1270 static void
1271 intel_miptree_unmap_s8(struct intel_context *intel,
1272 struct intel_mipmap_tree *mt,
1273 struct intel_miptree_map *map,
1274 unsigned int level,
1275 unsigned int slice)
1276 {
1277 if (map->mode & GL_MAP_WRITE_BIT) {
1278 unsigned int image_x, image_y;
1279 uint8_t *untiled_s8_map = map->ptr;
1280 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
1281
1282 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1283
1284 for (uint32_t y = 0; y < map->h; y++) {
1285 for (uint32_t x = 0; x < map->w; x++) {
1286 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1287 x + map->x,
1288 y + map->y,
1289 intel->has_swizzling);
1290 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
1291 }
1292 }
1293
1294 intel_region_unmap(intel, mt->region);
1295 }
1296
1297 free(map->buffer);
1298 }
1299
1300 static void
1301 intel_miptree_map_etc(struct intel_context *intel,
1302 struct intel_mipmap_tree *mt,
1303 struct intel_miptree_map *map,
1304 unsigned int level,
1305 unsigned int slice)
1306 {
1307 /* For justification see intel_mipmap_tree:wraps_etc.
1308 */
1309 assert(mt->wraps_etc);
1310
1311 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) {
1312 assert(mt->format == MESA_FORMAT_RGBX8888_REV);
1313 }
1314
1315 assert(map->mode & GL_MAP_WRITE_BIT);
1316 assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
1317
1318 map->stride = _mesa_format_row_stride(mt->etc_format, map->w);
1319 map->buffer = malloc(_mesa_format_image_size(mt->etc_format,
1320 map->w, map->h, 1));
1321 map->ptr = map->buffer;
1322 }
1323
1324 static void
1325 intel_miptree_unmap_etc(struct intel_context *intel,
1326 struct intel_mipmap_tree *mt,
1327 struct intel_miptree_map *map,
1328 unsigned int level,
1329 unsigned int slice)
1330 {
1331 uint32_t image_x;
1332 uint32_t image_y;
1333 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
1334
1335 image_x += map->x;
1336 image_y += map->y;
1337
1338 uint8_t *dst = intel_region_map(intel, mt->region, map->mode)
1339 + image_y * mt->region->pitch * mt->region->cpp
1340 + image_x * mt->region->cpp;
1341
1342 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8)
1343 _mesa_etc1_unpack_rgba8888(dst, mt->region->pitch * mt->region->cpp,
1344 map->ptr, map->stride,
1345 map->w, map->h);
1346 else
1347 _mesa_unpack_etc2_format(dst, mt->region->pitch * mt->region->cpp,
1348 map->ptr, map->stride,
1349 map->w, map->h, mt->etc_format);
1350
1351 intel_region_unmap(intel, mt->region);
1352 free(map->buffer);
1353 }
1354
1355 /**
1356 * Mapping function for packed depth/stencil miptrees backed by real separate
1357 * miptrees for depth and stencil.
1358 *
1359 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
1360 * separate from the depth buffer. Yet at the GL API level, we have to expose
1361 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
1362 * be able to map that memory for texture storage and glReadPixels-type
1363 * operations. We give Mesa core that access by mallocing a temporary and
1364 * copying the data between the actual backing store and the temporary.
1365 */
1366 static void
1367 intel_miptree_map_depthstencil(struct intel_context *intel,
1368 struct intel_mipmap_tree *mt,
1369 struct intel_miptree_map *map,
1370 unsigned int level, unsigned int slice)
1371 {
1372 struct intel_mipmap_tree *z_mt = mt;
1373 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1374 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1375 int packed_bpp = map_z32f_x24s8 ? 8 : 4;
1376
1377 map->stride = map->w * packed_bpp;
1378 map->buffer = map->ptr = malloc(map->stride * map->h);
1379 if (!map->buffer)
1380 return;
1381
1382 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1383 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1384 * invalidate is set, since we'll be writing the whole rectangle from our
1385 * temporary buffer back out.
1386 */
1387 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1388 uint32_t *packed_map = map->ptr;
1389 uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
1390 uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
1391 unsigned int s_image_x, s_image_y;
1392 unsigned int z_image_x, z_image_y;
1393
1394 intel_miptree_get_image_offset(s_mt, level, slice,
1395 &s_image_x, &s_image_y);
1396 intel_miptree_get_image_offset(z_mt, level, slice,
1397 &z_image_x, &z_image_y);
1398
1399 for (uint32_t y = 0; y < map->h; y++) {
1400 for (uint32_t x = 0; x < map->w; x++) {
1401 int map_x = map->x + x, map_y = map->y + y;
1402 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1403 map_x + s_image_x,
1404 map_y + s_image_y,
1405 intel->has_swizzling);
1406 ptrdiff_t z_offset = ((map_y + z_image_y) * z_mt->region->pitch +
1407 (map_x + z_image_x));
1408 uint8_t s = s_map[s_offset];
1409 uint32_t z = z_map[z_offset];
1410
1411 if (map_z32f_x24s8) {
1412 packed_map[(y * map->w + x) * 2 + 0] = z;
1413 packed_map[(y * map->w + x) * 2 + 1] = s;
1414 } else {
1415 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
1416 }
1417 }
1418 }
1419
1420 intel_region_unmap(intel, s_mt->region);
1421 intel_region_unmap(intel, z_mt->region);
1422
1423 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
1424 __FUNCTION__,
1425 map->x, map->y, map->w, map->h,
1426 z_mt, map->x + z_image_x, map->y + z_image_y,
1427 s_mt, map->x + s_image_x, map->y + s_image_y,
1428 map->ptr, map->stride);
1429 } else {
1430 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1431 map->x, map->y, map->w, map->h,
1432 mt, map->ptr, map->stride);
1433 }
1434 }
1435
1436 static void
1437 intel_miptree_unmap_depthstencil(struct intel_context *intel,
1438 struct intel_mipmap_tree *mt,
1439 struct intel_miptree_map *map,
1440 unsigned int level,
1441 unsigned int slice)
1442 {
1443 struct intel_mipmap_tree *z_mt = mt;
1444 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1445 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1446
1447 if (map->mode & GL_MAP_WRITE_BIT) {
1448 uint32_t *packed_map = map->ptr;
1449 uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
1450 uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
1451 unsigned int s_image_x, s_image_y;
1452 unsigned int z_image_x, z_image_y;
1453
1454 intel_miptree_get_image_offset(s_mt, level, slice,
1455 &s_image_x, &s_image_y);
1456 intel_miptree_get_image_offset(z_mt, level, slice,
1457 &z_image_x, &z_image_y);
1458
1459 for (uint32_t y = 0; y < map->h; y++) {
1460 for (uint32_t x = 0; x < map->w; x++) {
1461 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1462 x + s_image_x + map->x,
1463 y + s_image_y + map->y,
1464 intel->has_swizzling);
1465 ptrdiff_t z_offset = ((y + z_image_y) * z_mt->region->pitch +
1466 (x + z_image_x));
1467
1468 if (map_z32f_x24s8) {
1469 z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
1470 s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
1471 } else {
1472 uint32_t packed = packed_map[y * map->w + x];
1473 s_map[s_offset] = packed >> 24;
1474 z_map[z_offset] = packed;
1475 }
1476 }
1477 }
1478
1479 intel_region_unmap(intel, s_mt->region);
1480 intel_region_unmap(intel, z_mt->region);
1481
1482 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1483 __FUNCTION__,
1484 map->x, map->y, map->w, map->h,
1485 z_mt, _mesa_get_format_name(z_mt->format),
1486 map->x + z_image_x, map->y + z_image_y,
1487 s_mt, map->x + s_image_x, map->y + s_image_y,
1488 map->ptr, map->stride);
1489 }
1490
1491 free(map->buffer);
1492 }
1493
1494 /**
1495 * Create and attach a map to the miptree at (level, slice). Return the
1496 * attached map.
1497 */
1498 static struct intel_miptree_map*
1499 intel_miptree_attach_map(struct intel_mipmap_tree *mt,
1500 unsigned int level,
1501 unsigned int slice,
1502 unsigned int x,
1503 unsigned int y,
1504 unsigned int w,
1505 unsigned int h,
1506 GLbitfield mode)
1507 {
1508 struct intel_miptree_map *map = calloc(1, sizeof(*map));
1509
1510 if (!map)
1511 return NULL;
1512
1513 assert(mt->level[level].slice[slice].map == NULL);
1514 mt->level[level].slice[slice].map = map;
1515
1516 map->mode = mode;
1517 map->x = x;
1518 map->y = y;
1519 map->w = w;
1520 map->h = h;
1521
1522 return map;
1523 }
1524
1525 /**
1526 * Release the map at (level, slice).
1527 */
1528 static void
1529 intel_miptree_release_map(struct intel_mipmap_tree *mt,
1530 unsigned int level,
1531 unsigned int slice)
1532 {
1533 struct intel_miptree_map **map;
1534
1535 map = &mt->level[level].slice[slice].map;
1536 free(*map);
1537 *map = NULL;
1538 }
1539
1540 static void
1541 intel_miptree_map_singlesample(struct intel_context *intel,
1542 struct intel_mipmap_tree *mt,
1543 unsigned int level,
1544 unsigned int slice,
1545 unsigned int x,
1546 unsigned int y,
1547 unsigned int w,
1548 unsigned int h,
1549 GLbitfield mode,
1550 void **out_ptr,
1551 int *out_stride)
1552 {
1553 struct intel_miptree_map *map;
1554
1555 assert(mt->num_samples <= 1);
1556
1557 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
1558 if (!map){
1559 *out_ptr = NULL;
1560 *out_stride = 0;
1561 return;
1562 }
1563
1564 intel_miptree_slice_resolve_depth(intel, mt, level, slice);
1565 if (map->mode & GL_MAP_WRITE_BIT) {
1566 intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
1567 }
1568
1569 if (mt->format == MESA_FORMAT_S8) {
1570 intel_miptree_map_s8(intel, mt, map, level, slice);
1571 } else if (mt->wraps_etc) {
1572 intel_miptree_map_etc(intel, mt, map, level, slice);
1573 } else if (mt->stencil_mt) {
1574 intel_miptree_map_depthstencil(intel, mt, map, level, slice);
1575 } else if (intel->has_llc &&
1576 !(mode & GL_MAP_WRITE_BIT) &&
1577 !mt->compressed &&
1578 mt->region->tiling == I915_TILING_X) {
1579 intel_miptree_map_blit(intel, mt, map, level, slice);
1580 } else {
1581 intel_miptree_map_gtt(intel, mt, map, level, slice);
1582 }
1583
1584 *out_ptr = map->ptr;
1585 *out_stride = map->stride;
1586
1587 if (map->ptr == NULL)
1588 intel_miptree_release_map(mt, level, slice);
1589 }
1590
1591 static void
1592 intel_miptree_unmap_singlesample(struct intel_context *intel,
1593 struct intel_mipmap_tree *mt,
1594 unsigned int level,
1595 unsigned int slice)
1596 {
1597 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1598
1599 assert(mt->num_samples <= 1);
1600
1601 if (!map)
1602 return;
1603
1604 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
1605 mt, _mesa_get_format_name(mt->format), level, slice);
1606
1607 if (mt->format == MESA_FORMAT_S8) {
1608 intel_miptree_unmap_s8(intel, mt, map, level, slice);
1609 } else if (mt->wraps_etc) {
1610 intel_miptree_unmap_etc(intel, mt, map, level, slice);
1611 } else if (mt->stencil_mt) {
1612 intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
1613 } else if (map->bo) {
1614 intel_miptree_unmap_blit(intel, mt, map, level, slice);
1615 } else {
1616 intel_miptree_unmap_gtt(intel, mt, map, level, slice);
1617 }
1618
1619 intel_miptree_release_map(mt, level, slice);
1620 }
1621
1622 static void
1623 intel_miptree_map_multisample(struct intel_context *intel,
1624 struct intel_mipmap_tree *mt,
1625 unsigned int level,
1626 unsigned int slice,
1627 unsigned int x,
1628 unsigned int y,
1629 unsigned int w,
1630 unsigned int h,
1631 GLbitfield mode,
1632 void **out_ptr,
1633 int *out_stride)
1634 {
1635 struct intel_miptree_map *map;
1636
1637 assert(mt->num_samples > 1);
1638
1639 /* Only flat, renderbuffer-like miptrees are supported. */
1640 if (mt->target != GL_TEXTURE_2D ||
1641 mt->first_level != 0 ||
1642 mt->last_level != 0) {
1643 _mesa_problem(&intel->ctx, "attempt to map a multisample miptree for "
1644 "which (target, first_level, last_level != "
1645 "(GL_TEXTURE_2D, 0, 0)");
1646 goto fail;
1647 }
1648
1649 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
1650 if (!map)
1651 goto fail;
1652
1653 if (!mt->singlesample_mt) {
1654 mt->singlesample_mt =
1655 intel_miptree_create_for_renderbuffer(intel,
1656 mt->format,
1657 mt->singlesample_width0,
1658 mt->singlesample_height0,
1659 0 /*num_samples*/);
1660 if (!mt->singlesample_mt)
1661 goto fail;
1662
1663 map->singlesample_mt_is_tmp = true;
1664 mt->need_downsample = true;
1665 }
1666
1667 intel_miptree_downsample(intel, mt);
1668 intel_miptree_map_singlesample(intel, mt->singlesample_mt,
1669 level, slice,
1670 x, y, w, h,
1671 mode,
1672 out_ptr, out_stride);
1673 return;
1674
1675 fail:
1676 intel_miptree_release_map(mt, level, slice);
1677 *out_ptr = NULL;
1678 *out_stride = 0;
1679 }
1680
1681 static void
1682 intel_miptree_unmap_multisample(struct intel_context *intel,
1683 struct intel_mipmap_tree *mt,
1684 unsigned int level,
1685 unsigned int slice)
1686 {
1687 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1688
1689 assert(mt->num_samples > 1);
1690
1691 if (!map)
1692 return;
1693
1694 intel_miptree_unmap_singlesample(intel, mt->singlesample_mt, level, slice);
1695
1696 mt->need_downsample = false;
1697 if (map->mode & GL_MAP_WRITE_BIT)
1698 intel_miptree_upsample(intel, mt);
1699
1700 if (map->singlesample_mt_is_tmp)
1701 intel_miptree_release(&mt->singlesample_mt);
1702
1703 intel_miptree_release_map(mt, level, slice);
1704 }
1705
1706 void
1707 intel_miptree_map(struct intel_context *intel,
1708 struct intel_mipmap_tree *mt,
1709 unsigned int level,
1710 unsigned int slice,
1711 unsigned int x,
1712 unsigned int y,
1713 unsigned int w,
1714 unsigned int h,
1715 GLbitfield mode,
1716 void **out_ptr,
1717 int *out_stride)
1718 {
1719 if (mt->num_samples <= 1)
1720 intel_miptree_map_singlesample(intel, mt,
1721 level, slice,
1722 x, y, w, h,
1723 mode,
1724 out_ptr, out_stride);
1725 else
1726 intel_miptree_map_multisample(intel, mt,
1727 level, slice,
1728 x, y, w, h,
1729 mode,
1730 out_ptr, out_stride);
1731 }
1732
1733 void
1734 intel_miptree_unmap(struct intel_context *intel,
1735 struct intel_mipmap_tree *mt,
1736 unsigned int level,
1737 unsigned int slice)
1738 {
1739 if (mt->num_samples <= 1)
1740 intel_miptree_unmap_singlesample(intel, mt, level, slice);
1741 else
1742 intel_miptree_unmap_multisample(intel, mt, level, slice);
1743 }