i965: Drop a layer of indirection in doing HiZ resolves.
[mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "intel_batchbuffer.h"
29 #include "intel_context.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_regions.h"
32 #include "intel_resolve_map.h"
33 #include "intel_span.h"
34 #include "intel_tex_layout.h"
35 #include "intel_tex.h"
36 #include "intel_blit.h"
37
38 #include "main/enums.h"
39 #include "main/formats.h"
40 #include "main/image.h"
41 #include "main/teximage.h"
42
43 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
44
45 static GLenum
46 target_to_target(GLenum target)
47 {
48 switch (target) {
49 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
50 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
51 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
52 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
53 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
54 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
55 return GL_TEXTURE_CUBE_MAP_ARB;
56 default:
57 return target;
58 }
59 }
60
61 /**
62 * @param for_region Indicates that the caller is
63 * intel_miptree_create_for_region(). If true, then do not create
64 * \c stencil_mt.
65 */
66 static struct intel_mipmap_tree *
67 intel_miptree_create_internal(struct intel_context *intel,
68 GLenum target,
69 gl_format format,
70 GLuint first_level,
71 GLuint last_level,
72 GLuint width0,
73 GLuint height0,
74 GLuint depth0,
75 bool for_region,
76 GLuint num_samples)
77 {
78 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
79 int compress_byte = 0;
80
81 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
82 _mesa_lookup_enum_by_nr(target),
83 _mesa_get_format_name(format),
84 first_level, last_level, mt);
85
86 if (_mesa_is_format_compressed(format))
87 compress_byte = intel_compressed_num_bytes(format);
88
89 mt->target = target_to_target(target);
90 mt->format = format;
91 mt->first_level = first_level;
92 mt->last_level = last_level;
93 mt->width0 = width0;
94 mt->height0 = height0;
95 mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
96 mt->num_samples = num_samples;
97 mt->compressed = compress_byte ? 1 : 0;
98 mt->refcount = 1;
99
100 if (target == GL_TEXTURE_CUBE_MAP) {
101 assert(depth0 == 1);
102 mt->depth0 = 6;
103 } else {
104 mt->depth0 = depth0;
105 }
106
107 if (!for_region &&
108 _mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
109 (intel->must_use_separate_stencil ||
110 (intel->has_separate_stencil &&
111 intel->vtbl.is_hiz_depth_format(intel, format)))) {
112 mt->stencil_mt = intel_miptree_create(intel,
113 mt->target,
114 MESA_FORMAT_S8,
115 mt->first_level,
116 mt->last_level,
117 mt->width0,
118 mt->height0,
119 mt->depth0,
120 true,
121 num_samples);
122 if (!mt->stencil_mt) {
123 intel_miptree_release(&mt);
124 return NULL;
125 }
126
127 /* Fix up the Z miptree format for how we're splitting out separate
128 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
129 */
130 if (mt->format == MESA_FORMAT_S8_Z24) {
131 mt->format = MESA_FORMAT_X8_Z24;
132 } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
133 mt->format = MESA_FORMAT_Z32_FLOAT;
134 mt->cpp = 4;
135 } else {
136 _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
137 _mesa_get_format_name(mt->format));
138 }
139 }
140
141 intel_get_texture_alignment_unit(intel, mt->format,
142 &mt->align_w, &mt->align_h);
143
144 #ifdef I915
145 (void) intel;
146 if (intel->is_945)
147 i945_miptree_layout(mt);
148 else
149 i915_miptree_layout(mt);
150 #else
151 brw_miptree_layout(intel, mt);
152 #endif
153
154 return mt;
155 }
156
157
158 struct intel_mipmap_tree *
159 intel_miptree_create(struct intel_context *intel,
160 GLenum target,
161 gl_format format,
162 GLuint first_level,
163 GLuint last_level,
164 GLuint width0,
165 GLuint height0,
166 GLuint depth0,
167 bool expect_accelerated_upload,
168 GLuint num_samples)
169 {
170 struct intel_mipmap_tree *mt;
171 uint32_t tiling = I915_TILING_NONE;
172 GLenum base_format = _mesa_get_format_base_format(format);
173
174 if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
175 if (intel->gen >= 4 &&
176 (base_format == GL_DEPTH_COMPONENT ||
177 base_format == GL_DEPTH_STENCIL_EXT))
178 tiling = I915_TILING_Y;
179 else if (num_samples > 0) {
180 /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
181 * Surface"):
182 *
183 * [DevSNB+]: For multi-sample render targets, this field must be
184 * 1. MSRTs can only be tiled.
185 *
186 * Our usual reason for preferring X tiling (fast blits using the
187 * blitting engine) doesn't apply to MSAA, since we'll generally be
188 * downsampling or upsampling when blitting between the MSAA buffer
189 * and another buffer, and the blitting engine doesn't support that.
190 * So use Y tiling, since it makes better use of the cache.
191 */
192 tiling = I915_TILING_Y;
193 } else if (width0 >= 64)
194 tiling = I915_TILING_X;
195 }
196
197 if (format == MESA_FORMAT_S8) {
198 /* The stencil buffer is W tiled. However, we request from the kernel a
199 * non-tiled buffer because the GTT is incapable of W fencing. So round
200 * up the width and height to match the size of W tiles (64x64).
201 */
202 tiling = I915_TILING_NONE;
203 width0 = ALIGN(width0, 64);
204 height0 = ALIGN(height0, 64);
205 }
206
207 mt = intel_miptree_create_internal(intel, target, format,
208 first_level, last_level, width0,
209 height0, depth0,
210 false, num_samples);
211 /*
212 * pitch == 0 || height == 0 indicates the null texture
213 */
214 if (!mt || !mt->total_width || !mt->total_height) {
215 intel_miptree_release(&mt);
216 return NULL;
217 }
218
219 mt->region = intel_region_alloc(intel->intelScreen,
220 tiling,
221 mt->cpp,
222 mt->total_width,
223 mt->total_height,
224 expect_accelerated_upload);
225
226 if (!mt->region) {
227 intel_miptree_release(&mt);
228 return NULL;
229 }
230
231 return mt;
232 }
233
234
235 struct intel_mipmap_tree *
236 intel_miptree_create_for_region(struct intel_context *intel,
237 GLenum target,
238 gl_format format,
239 struct intel_region *region)
240 {
241 struct intel_mipmap_tree *mt;
242
243 mt = intel_miptree_create_internal(intel, target, format,
244 0, 0,
245 region->width, region->height, 1,
246 true, 0 /* num_samples */);
247 if (!mt)
248 return mt;
249
250 intel_region_reference(&mt->region, region);
251
252 return mt;
253 }
254
255 struct intel_mipmap_tree*
256 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
257 gl_format format,
258 uint32_t width,
259 uint32_t height,
260 uint32_t num_samples)
261 {
262 struct intel_mipmap_tree *mt;
263
264 /* Adjust width/height for MSAA */
265 if (num_samples > 4) {
266 num_samples = 8;
267 width *= 4;
268 height *= 2;
269 } else if (num_samples > 0) {
270 num_samples = 4;
271 width *= 2;
272 height *= 2;
273 }
274
275 mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
276 width, height, 1, true, num_samples);
277
278 return mt;
279 }
280
281 void
282 intel_miptree_reference(struct intel_mipmap_tree **dst,
283 struct intel_mipmap_tree *src)
284 {
285 if (*dst == src)
286 return;
287
288 intel_miptree_release(dst);
289
290 if (src) {
291 src->refcount++;
292 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
293 }
294
295 *dst = src;
296 }
297
298
299 void
300 intel_miptree_release(struct intel_mipmap_tree **mt)
301 {
302 if (!*mt)
303 return;
304
305 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
306 if (--(*mt)->refcount <= 0) {
307 GLuint i;
308
309 DBG("%s deleting %p\n", __FUNCTION__, *mt);
310
311 intel_region_release(&((*mt)->region));
312 intel_miptree_release(&(*mt)->stencil_mt);
313 intel_miptree_release(&(*mt)->hiz_mt);
314 intel_resolve_map_clear(&(*mt)->hiz_map);
315
316 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
317 free((*mt)->level[i].slice);
318 }
319
320 free(*mt);
321 }
322 *mt = NULL;
323 }
324
325 void
326 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
327 int *width, int *height, int *depth)
328 {
329 switch (image->TexObject->Target) {
330 case GL_TEXTURE_1D_ARRAY:
331 *width = image->Width;
332 *height = 1;
333 *depth = image->Height;
334 break;
335 default:
336 *width = image->Width;
337 *height = image->Height;
338 *depth = image->Depth;
339 break;
340 }
341 }
342
343 /**
344 * Can the image be pulled into a unified mipmap tree? This mirrors
345 * the completeness test in a lot of ways.
346 *
347 * Not sure whether I want to pass gl_texture_image here.
348 */
349 bool
350 intel_miptree_match_image(struct intel_mipmap_tree *mt,
351 struct gl_texture_image *image)
352 {
353 struct intel_texture_image *intelImage = intel_texture_image(image);
354 GLuint level = intelImage->base.Base.Level;
355 int width, height, depth;
356
357 if (target_to_target(image->TexObject->Target) != mt->target)
358 return false;
359
360 if (image->TexFormat != mt->format &&
361 !(image->TexFormat == MESA_FORMAT_S8_Z24 &&
362 mt->format == MESA_FORMAT_X8_Z24 &&
363 mt->stencil_mt)) {
364 return false;
365 }
366
367 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
368
369 if (mt->target == GL_TEXTURE_CUBE_MAP)
370 depth = 6;
371
372 /* Test image dimensions against the base level image adjusted for
373 * minification. This will also catch images not present in the
374 * tree, changed targets, etc.
375 */
376 if (width != mt->level[level].width ||
377 height != mt->level[level].height ||
378 depth != mt->level[level].depth)
379 return false;
380
381 return true;
382 }
383
384
385 void
386 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
387 GLuint level,
388 GLuint x, GLuint y,
389 GLuint w, GLuint h, GLuint d)
390 {
391 mt->level[level].width = w;
392 mt->level[level].height = h;
393 mt->level[level].depth = d;
394 mt->level[level].level_x = x;
395 mt->level[level].level_y = y;
396
397 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
398 level, w, h, d, x, y);
399
400 assert(mt->level[level].slice == NULL);
401
402 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
403 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
404 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
405 }
406
407
408 void
409 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
410 GLuint level, GLuint img,
411 GLuint x, GLuint y)
412 {
413 if (img == 0 && level == 0)
414 assert(x == 0 && y == 0);
415
416 assert(img < mt->level[level].depth);
417
418 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
419 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
420
421 DBG("%s level %d img %d pos %d,%d\n",
422 __FUNCTION__, level, img,
423 mt->level[level].slice[img].x_offset,
424 mt->level[level].slice[img].y_offset);
425 }
426
427
428 /**
429 * For cube map textures, either the \c face parameter can be used, of course,
430 * or the cube face can be interpreted as a depth layer and the \c layer
431 * parameter used.
432 */
433 void
434 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
435 GLuint level, GLuint face, GLuint layer,
436 GLuint *x, GLuint *y)
437 {
438 int slice;
439
440 if (face > 0) {
441 assert(mt->target == GL_TEXTURE_CUBE_MAP);
442 assert(face < 6);
443 assert(layer == 0);
444 slice = face;
445 } else {
446 /* This branch may be taken even if the texture target is a cube map. In
447 * that case, the caller chose to interpret each cube face as a layer.
448 */
449 assert(face == 0);
450 slice = layer;
451 }
452
453 *x = mt->level[level].slice[slice].x_offset;
454 *y = mt->level[level].slice[slice].y_offset;
455 }
456
457 static void
458 intel_miptree_copy_slice(struct intel_context *intel,
459 struct intel_mipmap_tree *dst_mt,
460 struct intel_mipmap_tree *src_mt,
461 int level,
462 int face,
463 int depth)
464
465 {
466 gl_format format = src_mt->format;
467 uint32_t width = src_mt->level[level].width;
468 uint32_t height = src_mt->level[level].height;
469
470 assert(depth < src_mt->level[level].depth);
471
472 if (dst_mt->compressed) {
473 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
474 width = ALIGN(width, dst_mt->align_w);
475 }
476
477 uint32_t dst_x, dst_y, src_x, src_y;
478 intel_miptree_get_image_offset(dst_mt, level, face, depth,
479 &dst_x, &dst_y);
480 intel_miptree_get_image_offset(src_mt, level, face, depth,
481 &src_x, &src_y);
482
483 DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
484 src_mt, src_x, src_y, src_mt->region->pitch * src_mt->region->cpp,
485 dst_mt, dst_x, dst_y, dst_mt->region->pitch * dst_mt->region->cpp,
486 width, height);
487
488 if (!intelEmitCopyBlit(intel,
489 dst_mt->region->cpp,
490 src_mt->region->pitch, src_mt->region->bo,
491 0, src_mt->region->tiling,
492 dst_mt->region->pitch, dst_mt->region->bo,
493 0, dst_mt->region->tiling,
494 src_x, src_y,
495 dst_x, dst_y,
496 width, height,
497 GL_COPY)) {
498
499 fallback_debug("miptree validate blit for %s failed\n",
500 _mesa_get_format_name(format));
501 void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
502 void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
503
504 _mesa_copy_rect(dst,
505 dst_mt->cpp,
506 dst_mt->region->pitch,
507 dst_x, dst_y,
508 width, height,
509 src, src_mt->region->pitch,
510 src_x, src_y);
511
512 intel_region_unmap(intel, dst_mt->region);
513 intel_region_unmap(intel, src_mt->region);
514 }
515
516 if (src_mt->stencil_mt) {
517 intel_miptree_copy_slice(intel,
518 dst_mt->stencil_mt, src_mt->stencil_mt,
519 level, face, depth);
520 }
521 }
522
523 /**
524 * Copies the image's current data to the given miptree, and associates that
525 * miptree with the image.
526 */
527 void
528 intel_miptree_copy_teximage(struct intel_context *intel,
529 struct intel_texture_image *intelImage,
530 struct intel_mipmap_tree *dst_mt)
531 {
532 struct intel_mipmap_tree *src_mt = intelImage->mt;
533 int level = intelImage->base.Base.Level;
534 int face = intelImage->base.Base.Face;
535 GLuint depth = intelImage->base.Base.Depth;
536
537 for (int slice = 0; slice < depth; slice++) {
538 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
539 }
540
541 intel_miptree_reference(&intelImage->mt, dst_mt);
542 }
543
544 bool
545 intel_miptree_alloc_hiz(struct intel_context *intel,
546 struct intel_mipmap_tree *mt,
547 GLuint num_samples)
548 {
549 assert(mt->hiz_mt == NULL);
550 mt->hiz_mt = intel_miptree_create(intel,
551 mt->target,
552 MESA_FORMAT_X8_Z24,
553 mt->first_level,
554 mt->last_level,
555 mt->width0,
556 mt->height0,
557 mt->depth0,
558 true,
559 num_samples);
560
561 if (!mt->hiz_mt)
562 return false;
563
564 /* Mark that all slices need a HiZ resolve. */
565 struct intel_resolve_map *head = &mt->hiz_map;
566 for (int level = mt->first_level; level <= mt->last_level; ++level) {
567 for (int layer = 0; layer < mt->level[level].depth; ++layer) {
568 head->next = malloc(sizeof(*head->next));
569 head->next->prev = head;
570 head->next->next = NULL;
571 head = head->next;
572
573 head->level = level;
574 head->layer = layer;
575 head->need = GEN6_HIZ_OP_HIZ_RESOLVE;
576 }
577 }
578
579 return true;
580 }
581
582 void
583 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
584 uint32_t level,
585 uint32_t layer)
586 {
587 intel_miptree_check_level_layer(mt, level, layer);
588
589 if (!mt->hiz_mt)
590 return;
591
592 intel_resolve_map_set(&mt->hiz_map,
593 level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
594 }
595
596
597 void
598 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
599 uint32_t level,
600 uint32_t layer)
601 {
602 intel_miptree_check_level_layer(mt, level, layer);
603
604 if (!mt->hiz_mt)
605 return;
606
607 intel_resolve_map_set(&mt->hiz_map,
608 level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
609 }
610
611 static bool
612 intel_miptree_slice_resolve(struct intel_context *intel,
613 struct intel_mipmap_tree *mt,
614 uint32_t level,
615 uint32_t layer,
616 enum gen6_hiz_op need)
617 {
618 intel_miptree_check_level_layer(mt, level, layer);
619
620 struct intel_resolve_map *item =
621 intel_resolve_map_get(&mt->hiz_map, level, layer);
622
623 if (!item || item->need != need)
624 return false;
625
626 intel_hiz_exec(intel, mt, level, layer, need);
627 intel_resolve_map_remove(item);
628 return true;
629 }
630
631 bool
632 intel_miptree_slice_resolve_hiz(struct intel_context *intel,
633 struct intel_mipmap_tree *mt,
634 uint32_t level,
635 uint32_t layer)
636 {
637 return intel_miptree_slice_resolve(intel, mt, level, layer,
638 GEN6_HIZ_OP_HIZ_RESOLVE);
639 }
640
641 bool
642 intel_miptree_slice_resolve_depth(struct intel_context *intel,
643 struct intel_mipmap_tree *mt,
644 uint32_t level,
645 uint32_t layer)
646 {
647 return intel_miptree_slice_resolve(intel, mt, level, layer,
648 GEN6_HIZ_OP_DEPTH_RESOLVE);
649 }
650
651 static bool
652 intel_miptree_all_slices_resolve(struct intel_context *intel,
653 struct intel_mipmap_tree *mt,
654 enum gen6_hiz_op need)
655 {
656 bool did_resolve = false;
657 struct intel_resolve_map *i, *next;
658
659 for (i = mt->hiz_map.next; i; i = next) {
660 next = i->next;
661 if (i->need != need)
662 continue;
663
664 intel_hiz_exec(intel, mt, i->level, i->layer, need);
665 intel_resolve_map_remove(i);
666 did_resolve = true;
667 }
668
669 return did_resolve;
670 }
671
672 bool
673 intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
674 struct intel_mipmap_tree *mt)
675 {
676 return intel_miptree_all_slices_resolve(intel, mt,
677 GEN6_HIZ_OP_HIZ_RESOLVE);
678 }
679
680 bool
681 intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
682 struct intel_mipmap_tree *mt)
683 {
684 return intel_miptree_all_slices_resolve(intel, mt,
685 GEN6_HIZ_OP_DEPTH_RESOLVE);
686 }
687
688 static void
689 intel_miptree_map_gtt(struct intel_context *intel,
690 struct intel_mipmap_tree *mt,
691 struct intel_miptree_map *map,
692 unsigned int level, unsigned int slice)
693 {
694 unsigned int bw, bh;
695 void *base;
696 unsigned int image_x, image_y;
697 int x = map->x;
698 int y = map->y;
699
700 /* For compressed formats, the stride is the number of bytes per
701 * row of blocks. intel_miptree_get_image_offset() already does
702 * the divide.
703 */
704 _mesa_get_format_block_size(mt->format, &bw, &bh);
705 assert(y % bh == 0);
706 y /= bh;
707
708 base = intel_region_map(intel, mt->region, map->mode);
709
710 if (base == NULL)
711 map->ptr = NULL;
712 else {
713 /* Note that in the case of cube maps, the caller must have passed the
714 * slice number referencing the face.
715 */
716 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
717 x += image_x;
718 y += image_y;
719
720 map->stride = mt->region->pitch * mt->cpp;
721 map->ptr = base + y * map->stride + x * mt->cpp;
722 }
723
724 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
725 map->x, map->y, map->w, map->h,
726 mt, _mesa_get_format_name(mt->format),
727 x, y, map->ptr, map->stride);
728 }
729
730 static void
731 intel_miptree_unmap_gtt(struct intel_context *intel,
732 struct intel_mipmap_tree *mt,
733 struct intel_miptree_map *map,
734 unsigned int level,
735 unsigned int slice)
736 {
737 intel_region_unmap(intel, mt->region);
738 }
739
740 static void
741 intel_miptree_map_blit(struct intel_context *intel,
742 struct intel_mipmap_tree *mt,
743 struct intel_miptree_map *map,
744 unsigned int level, unsigned int slice)
745 {
746 unsigned int image_x, image_y;
747 int x = map->x;
748 int y = map->y;
749 int ret;
750
751 /* The blitter requires the pitch to be aligned to 4. */
752 map->stride = ALIGN(map->w * mt->region->cpp, 4);
753
754 map->bo = drm_intel_bo_alloc(intel->bufmgr, "intel_miptree_map_blit() temp",
755 map->stride * map->h, 4096);
756 if (!map->bo) {
757 fprintf(stderr, "Failed to allocate blit temporary\n");
758 goto fail;
759 }
760
761 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
762 x += image_x;
763 y += image_y;
764
765 if (!intelEmitCopyBlit(intel,
766 mt->region->cpp,
767 mt->region->pitch, mt->region->bo,
768 0, mt->region->tiling,
769 map->stride / mt->region->cpp, map->bo,
770 0, I915_TILING_NONE,
771 x, y,
772 0, 0,
773 map->w, map->h,
774 GL_COPY)) {
775 fprintf(stderr, "Failed to blit\n");
776 goto fail;
777 }
778
779 intel_batchbuffer_flush(intel);
780 ret = drm_intel_bo_map(map->bo, (map->mode & GL_MAP_WRITE_BIT) != 0);
781 if (ret) {
782 fprintf(stderr, "Failed to map blit temporary\n");
783 goto fail;
784 }
785
786 map->ptr = map->bo->virtual;
787
788 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
789 map->x, map->y, map->w, map->h,
790 mt, _mesa_get_format_name(mt->format),
791 x, y, map->ptr, map->stride);
792
793 return;
794
795 fail:
796 drm_intel_bo_unreference(map->bo);
797 map->ptr = NULL;
798 map->stride = 0;
799 }
800
801 static void
802 intel_miptree_unmap_blit(struct intel_context *intel,
803 struct intel_mipmap_tree *mt,
804 struct intel_miptree_map *map,
805 unsigned int level,
806 unsigned int slice)
807 {
808 assert(!(map->mode & GL_MAP_WRITE_BIT));
809
810 drm_intel_bo_unmap(map->bo);
811 drm_intel_bo_unreference(map->bo);
812 }
813
814 static void
815 intel_miptree_map_s8(struct intel_context *intel,
816 struct intel_mipmap_tree *mt,
817 struct intel_miptree_map *map,
818 unsigned int level, unsigned int slice)
819 {
820 map->stride = map->w;
821 map->buffer = map->ptr = malloc(map->stride * map->h);
822 if (!map->buffer)
823 return;
824
825 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
826 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
827 * invalidate is set, since we'll be writing the whole rectangle from our
828 * temporary buffer back out.
829 */
830 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
831 uint8_t *untiled_s8_map = map->ptr;
832 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
833 GL_MAP_READ_BIT);
834 unsigned int image_x, image_y;
835
836 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
837
838 for (uint32_t y = 0; y < map->h; y++) {
839 for (uint32_t x = 0; x < map->w; x++) {
840 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
841 x + image_x + map->x,
842 y + image_y + map->y,
843 intel->has_swizzling);
844 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
845 }
846 }
847
848 intel_region_unmap(intel, mt->region);
849
850 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
851 map->x, map->y, map->w, map->h,
852 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
853 } else {
854 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
855 map->x, map->y, map->w, map->h,
856 mt, map->ptr, map->stride);
857 }
858 }
859
860 static void
861 intel_miptree_unmap_s8(struct intel_context *intel,
862 struct intel_mipmap_tree *mt,
863 struct intel_miptree_map *map,
864 unsigned int level,
865 unsigned int slice)
866 {
867 if (map->mode & GL_MAP_WRITE_BIT) {
868 unsigned int image_x, image_y;
869 uint8_t *untiled_s8_map = map->ptr;
870 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
871
872 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
873
874 for (uint32_t y = 0; y < map->h; y++) {
875 for (uint32_t x = 0; x < map->w; x++) {
876 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
877 x + map->x,
878 y + map->y,
879 intel->has_swizzling);
880 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
881 }
882 }
883
884 intel_region_unmap(intel, mt->region);
885 }
886
887 free(map->buffer);
888 }
889
890 /**
891 * Mapping function for packed depth/stencil miptrees backed by real separate
892 * miptrees for depth and stencil.
893 *
894 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
895 * separate from the depth buffer. Yet at the GL API level, we have to expose
896 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
897 * be able to map that memory for texture storage and glReadPixels-type
898 * operations. We give Mesa core that access by mallocing a temporary and
899 * copying the data between the actual backing store and the temporary.
900 */
901 static void
902 intel_miptree_map_depthstencil(struct intel_context *intel,
903 struct intel_mipmap_tree *mt,
904 struct intel_miptree_map *map,
905 unsigned int level, unsigned int slice)
906 {
907 struct intel_mipmap_tree *z_mt = mt;
908 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
909 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
910 int packed_bpp = map_z32f_x24s8 ? 8 : 4;
911
912 map->stride = map->w * packed_bpp;
913 map->buffer = map->ptr = malloc(map->stride * map->h);
914 if (!map->buffer)
915 return;
916
917 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
918 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
919 * invalidate is set, since we'll be writing the whole rectangle from our
920 * temporary buffer back out.
921 */
922 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
923 uint32_t *packed_map = map->ptr;
924 uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
925 uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
926 unsigned int s_image_x, s_image_y;
927 unsigned int z_image_x, z_image_y;
928
929 intel_miptree_get_image_offset(s_mt, level, 0, slice,
930 &s_image_x, &s_image_y);
931 intel_miptree_get_image_offset(z_mt, level, 0, slice,
932 &z_image_x, &z_image_y);
933
934 for (uint32_t y = 0; y < map->h; y++) {
935 for (uint32_t x = 0; x < map->w; x++) {
936 int map_x = map->x + x, map_y = map->y + y;
937 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
938 map_x + s_image_x,
939 map_y + s_image_y,
940 intel->has_swizzling);
941 ptrdiff_t z_offset = ((map_y + z_image_y) * z_mt->region->pitch +
942 (map_x + z_image_x));
943 uint8_t s = s_map[s_offset];
944 uint32_t z = z_map[z_offset];
945
946 if (map_z32f_x24s8) {
947 packed_map[(y * map->w + x) * 2 + 0] = z;
948 packed_map[(y * map->w + x) * 2 + 1] = s;
949 } else {
950 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
951 }
952 }
953 }
954
955 intel_region_unmap(intel, s_mt->region);
956 intel_region_unmap(intel, z_mt->region);
957
958 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
959 __FUNCTION__,
960 map->x, map->y, map->w, map->h,
961 z_mt, map->x + z_image_x, map->y + z_image_y,
962 s_mt, map->x + s_image_x, map->y + s_image_y,
963 map->ptr, map->stride);
964 } else {
965 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
966 map->x, map->y, map->w, map->h,
967 mt, map->ptr, map->stride);
968 }
969 }
970
971 static void
972 intel_miptree_unmap_depthstencil(struct intel_context *intel,
973 struct intel_mipmap_tree *mt,
974 struct intel_miptree_map *map,
975 unsigned int level,
976 unsigned int slice)
977 {
978 struct intel_mipmap_tree *z_mt = mt;
979 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
980 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
981
982 if (map->mode & GL_MAP_WRITE_BIT) {
983 uint32_t *packed_map = map->ptr;
984 uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
985 uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
986 unsigned int s_image_x, s_image_y;
987 unsigned int z_image_x, z_image_y;
988
989 intel_miptree_get_image_offset(s_mt, level, 0, slice,
990 &s_image_x, &s_image_y);
991 intel_miptree_get_image_offset(z_mt, level, 0, slice,
992 &z_image_x, &z_image_y);
993
994 for (uint32_t y = 0; y < map->h; y++) {
995 for (uint32_t x = 0; x < map->w; x++) {
996 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
997 x + s_image_x + map->x,
998 y + s_image_y + map->y,
999 intel->has_swizzling);
1000 ptrdiff_t z_offset = ((y + z_image_y) * z_mt->region->pitch +
1001 (x + z_image_x));
1002
1003 if (map_z32f_x24s8) {
1004 z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
1005 s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
1006 } else {
1007 uint32_t packed = packed_map[y * map->w + x];
1008 s_map[s_offset] = packed >> 24;
1009 z_map[z_offset] = packed;
1010 }
1011 }
1012 }
1013
1014 intel_region_unmap(intel, s_mt->region);
1015 intel_region_unmap(intel, z_mt->region);
1016
1017 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1018 __FUNCTION__,
1019 map->x, map->y, map->w, map->h,
1020 z_mt, _mesa_get_format_name(z_mt->format),
1021 map->x + z_image_x, map->y + z_image_y,
1022 s_mt, map->x + s_image_x, map->y + s_image_y,
1023 map->ptr, map->stride);
1024 }
1025
1026 free(map->buffer);
1027 }
1028
1029 void
1030 intel_miptree_map(struct intel_context *intel,
1031 struct intel_mipmap_tree *mt,
1032 unsigned int level,
1033 unsigned int slice,
1034 unsigned int x,
1035 unsigned int y,
1036 unsigned int w,
1037 unsigned int h,
1038 GLbitfield mode,
1039 void **out_ptr,
1040 int *out_stride)
1041 {
1042 struct intel_miptree_map *map;
1043
1044 map = calloc(1, sizeof(struct intel_miptree_map));
1045 if (!map){
1046 *out_ptr = NULL;
1047 *out_stride = 0;
1048 return;
1049 }
1050
1051 assert(!mt->level[level].slice[slice].map);
1052 mt->level[level].slice[slice].map = map;
1053 map->mode = mode;
1054 map->x = x;
1055 map->y = y;
1056 map->w = w;
1057 map->h = h;
1058
1059 intel_miptree_slice_resolve_depth(intel, mt, level, slice);
1060 if (map->mode & GL_MAP_WRITE_BIT) {
1061 intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
1062 }
1063
1064 if (mt->format == MESA_FORMAT_S8) {
1065 intel_miptree_map_s8(intel, mt, map, level, slice);
1066 } else if (mt->stencil_mt) {
1067 intel_miptree_map_depthstencil(intel, mt, map, level, slice);
1068 } else if (intel->has_llc &&
1069 !(mode & GL_MAP_WRITE_BIT) &&
1070 !mt->compressed &&
1071 mt->region->tiling == I915_TILING_X) {
1072 intel_miptree_map_blit(intel, mt, map, level, slice);
1073 } else {
1074 intel_miptree_map_gtt(intel, mt, map, level, slice);
1075 }
1076
1077 *out_ptr = map->ptr;
1078 *out_stride = map->stride;
1079
1080 if (map->ptr == NULL) {
1081 mt->level[level].slice[slice].map = NULL;
1082 free(map);
1083 }
1084 }
1085
1086 void
1087 intel_miptree_unmap(struct intel_context *intel,
1088 struct intel_mipmap_tree *mt,
1089 unsigned int level,
1090 unsigned int slice)
1091 {
1092 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1093
1094 if (!map)
1095 return;
1096
1097 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
1098 mt, _mesa_get_format_name(mt->format), level, slice);
1099
1100 if (mt->format == MESA_FORMAT_S8) {
1101 intel_miptree_unmap_s8(intel, mt, map, level, slice);
1102 } else if (mt->stencil_mt) {
1103 intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
1104 } else if (map->bo) {
1105 intel_miptree_unmap_blit(intel, mt, map, level, slice);
1106 } else {
1107 intel_miptree_unmap_gtt(intel, mt, map, level, slice);
1108 }
1109
1110 mt->level[level].slice[slice].map = NULL;
1111 free(map);
1112 }