i965: Fix infinite loop regression in intel_miptree_all_slices_resolve.
[mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "intel_batchbuffer.h"
29 #include "intel_context.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_regions.h"
32 #include "intel_resolve_map.h"
33 #include "intel_span.h"
34 #include "intel_tex_layout.h"
35 #include "intel_tex.h"
36 #include "intel_blit.h"
37
38 #include "main/enums.h"
39 #include "main/formats.h"
40 #include "main/image.h"
41 #include "main/teximage.h"
42
43 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
44
45 static GLenum
46 target_to_target(GLenum target)
47 {
48 switch (target) {
49 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
50 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
51 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
52 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
53 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
54 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
55 return GL_TEXTURE_CUBE_MAP_ARB;
56 default:
57 return target;
58 }
59 }
60
61 static struct intel_mipmap_tree *
62 intel_miptree_create_internal(struct intel_context *intel,
63 GLenum target,
64 gl_format format,
65 GLuint first_level,
66 GLuint last_level,
67 GLuint width0,
68 GLuint height0,
69 GLuint depth0)
70 {
71 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
72 int compress_byte = 0;
73
74 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
75 _mesa_lookup_enum_by_nr(target),
76 _mesa_get_format_name(format),
77 first_level, last_level, mt);
78
79 if (_mesa_is_format_compressed(format))
80 compress_byte = intel_compressed_num_bytes(format);
81
82 mt->target = target_to_target(target);
83 mt->format = format;
84 mt->first_level = first_level;
85 mt->last_level = last_level;
86 mt->width0 = width0;
87 mt->height0 = height0;
88 mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
89 mt->compressed = compress_byte ? 1 : 0;
90 mt->refcount = 1;
91
92 if (target == GL_TEXTURE_CUBE_MAP) {
93 assert(depth0 == 1);
94 mt->depth0 = 6;
95 } else {
96 mt->depth0 = depth0;
97 }
98
99 if (format == MESA_FORMAT_S8) {
100 /* The stencil buffer has quirky pitch requirements. From Vol 2a,
101 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
102 * The pitch must be set to 2x the value computed based on width, as
103 * the stencil buffer is stored with two rows interleaved.
104 */
105 assert(intel->has_separate_stencil);
106 mt->cpp = 2;
107 }
108
109 if (_mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
110 (intel->must_use_separate_stencil ||
111 (intel->has_separate_stencil &&
112 intel->vtbl.is_hiz_depth_format(intel, format)))) {
113 mt->stencil_mt = intel_miptree_create(intel,
114 mt->target,
115 MESA_FORMAT_S8,
116 mt->first_level,
117 mt->last_level,
118 mt->width0,
119 mt->height0,
120 mt->depth0,
121 true);
122 if (!mt->stencil_mt) {
123 intel_miptree_release(&mt);
124 return NULL;
125 }
126
127 /* Fix up the Z miptree format for how we're splitting out separate
128 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
129 */
130 if (mt->format == MESA_FORMAT_S8_Z24) {
131 mt->format = MESA_FORMAT_X8_Z24;
132 } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
133 mt->format = MESA_FORMAT_Z32_FLOAT;
134 mt->cpp = 4;
135 } else {
136 _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
137 _mesa_get_format_name(mt->format));
138 }
139 }
140
141 intel_get_texture_alignment_unit(intel, mt->format,
142 &mt->align_w, &mt->align_h);
143
144 #ifdef I915
145 (void) intel;
146 if (intel->is_945)
147 i945_miptree_layout(mt);
148 else
149 i915_miptree_layout(mt);
150 #else
151 brw_miptree_layout(intel, mt);
152 #endif
153
154 return mt;
155 }
156
157
158 struct intel_mipmap_tree *
159 intel_miptree_create(struct intel_context *intel,
160 GLenum target,
161 gl_format format,
162 GLuint first_level,
163 GLuint last_level,
164 GLuint width0,
165 GLuint height0,
166 GLuint depth0,
167 bool expect_accelerated_upload)
168 {
169 struct intel_mipmap_tree *mt;
170 uint32_t tiling = I915_TILING_NONE;
171 GLenum base_format = _mesa_get_format_base_format(format);
172
173 if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
174 if (intel->gen >= 4 &&
175 (base_format == GL_DEPTH_COMPONENT ||
176 base_format == GL_DEPTH_STENCIL_EXT))
177 tiling = I915_TILING_Y;
178 else if (width0 >= 64)
179 tiling = I915_TILING_X;
180 }
181
182 if (format == MESA_FORMAT_S8) {
183 /* The stencil buffer is W tiled. However, we request from the kernel a
184 * non-tiled buffer because the GTT is incapable of W fencing.
185 *
186 * The stencil buffer has quirky pitch requirements. From Vol 2a,
187 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
188 * The pitch must be set to 2x the value computed based on width, as
189 * the stencil buffer is stored with two rows interleaved.
190 * To accomplish this, we resort to the nasty hack of doubling the drm
191 * region's cpp and halving its height.
192 *
193 * If we neglect to double the pitch, then render corruption occurs.
194 */
195 tiling = I915_TILING_NONE;
196 width0 = ALIGN(width0, 64);
197 height0 = ALIGN((height0 + 1) / 2, 64);
198 }
199
200 mt = intel_miptree_create_internal(intel, target, format,
201 first_level, last_level, width0,
202 height0, depth0);
203 /*
204 * pitch == 0 || height == 0 indicates the null texture
205 */
206 if (!mt || !mt->total_width || !mt->total_height) {
207 intel_miptree_release(&mt);
208 return NULL;
209 }
210
211 mt->region = intel_region_alloc(intel->intelScreen,
212 tiling,
213 mt->cpp,
214 mt->total_width,
215 mt->total_height,
216 expect_accelerated_upload);
217
218 if (!mt->region) {
219 intel_miptree_release(&mt);
220 return NULL;
221 }
222
223 return mt;
224 }
225
226
227 struct intel_mipmap_tree *
228 intel_miptree_create_for_region(struct intel_context *intel,
229 GLenum target,
230 gl_format format,
231 struct intel_region *region)
232 {
233 struct intel_mipmap_tree *mt;
234
235 mt = intel_miptree_create_internal(intel, target, format,
236 0, 0,
237 region->width, region->height, 1);
238 if (!mt)
239 return mt;
240
241 intel_region_reference(&mt->region, region);
242
243 return mt;
244 }
245
246 struct intel_mipmap_tree*
247 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
248 gl_format format,
249 uint32_t width,
250 uint32_t height)
251 {
252 struct intel_mipmap_tree *mt;
253
254 mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
255 width, height, 1, true);
256
257 return mt;
258 }
259
260 void
261 intel_miptree_reference(struct intel_mipmap_tree **dst,
262 struct intel_mipmap_tree *src)
263 {
264 if (*dst == src)
265 return;
266
267 intel_miptree_release(dst);
268
269 if (src) {
270 src->refcount++;
271 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
272 }
273
274 *dst = src;
275 }
276
277
278 void
279 intel_miptree_release(struct intel_mipmap_tree **mt)
280 {
281 if (!*mt)
282 return;
283
284 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
285 if (--(*mt)->refcount <= 0) {
286 GLuint i;
287
288 DBG("%s deleting %p\n", __FUNCTION__, *mt);
289
290 intel_region_release(&((*mt)->region));
291 intel_miptree_release(&(*mt)->stencil_mt);
292 intel_miptree_release(&(*mt)->hiz_mt);
293 intel_resolve_map_clear(&(*mt)->hiz_map);
294
295 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
296 free((*mt)->level[i].slice);
297 }
298
299 free(*mt);
300 }
301 *mt = NULL;
302 }
303
304 void
305 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
306 int *width, int *height, int *depth)
307 {
308 switch (image->TexObject->Target) {
309 case GL_TEXTURE_1D_ARRAY:
310 *width = image->Width;
311 *height = 1;
312 *depth = image->Height;
313 break;
314 default:
315 *width = image->Width;
316 *height = image->Height;
317 *depth = image->Depth;
318 break;
319 }
320 }
321
322 /**
323 * Can the image be pulled into a unified mipmap tree? This mirrors
324 * the completeness test in a lot of ways.
325 *
326 * Not sure whether I want to pass gl_texture_image here.
327 */
328 bool
329 intel_miptree_match_image(struct intel_mipmap_tree *mt,
330 struct gl_texture_image *image)
331 {
332 struct intel_texture_image *intelImage = intel_texture_image(image);
333 GLuint level = intelImage->base.Base.Level;
334 int width, height, depth;
335
336 if (target_to_target(image->TexObject->Target) != mt->target)
337 return false;
338
339 if (image->TexFormat != mt->format &&
340 !(image->TexFormat == MESA_FORMAT_S8_Z24 &&
341 mt->format == MESA_FORMAT_X8_Z24 &&
342 mt->stencil_mt)) {
343 return false;
344 }
345
346 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
347
348 if (mt->target == GL_TEXTURE_CUBE_MAP)
349 depth = 6;
350
351 /* Test image dimensions against the base level image adjusted for
352 * minification. This will also catch images not present in the
353 * tree, changed targets, etc.
354 */
355 if (width != mt->level[level].width ||
356 height != mt->level[level].height ||
357 depth != mt->level[level].depth)
358 return false;
359
360 return true;
361 }
362
363
364 void
365 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
366 GLuint level,
367 GLuint x, GLuint y,
368 GLuint w, GLuint h, GLuint d)
369 {
370 mt->level[level].width = w;
371 mt->level[level].height = h;
372 mt->level[level].depth = d;
373 mt->level[level].level_x = x;
374 mt->level[level].level_y = y;
375
376 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
377 level, w, h, d, x, y);
378
379 assert(mt->level[level].slice == NULL);
380
381 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
382 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
383 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
384 }
385
386
387 void
388 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
389 GLuint level, GLuint img,
390 GLuint x, GLuint y)
391 {
392 if (img == 0 && level == 0)
393 assert(x == 0 && y == 0);
394
395 assert(img < mt->level[level].depth);
396
397 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
398 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
399
400 DBG("%s level %d img %d pos %d,%d\n",
401 __FUNCTION__, level, img,
402 mt->level[level].slice[img].x_offset,
403 mt->level[level].slice[img].y_offset);
404 }
405
406
407 /**
408 * For cube map textures, either the \c face parameter can be used, of course,
409 * or the cube face can be interpreted as a depth layer and the \c layer
410 * parameter used.
411 */
412 void
413 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
414 GLuint level, GLuint face, GLuint layer,
415 GLuint *x, GLuint *y)
416 {
417 int slice;
418
419 if (face > 0) {
420 assert(mt->target == GL_TEXTURE_CUBE_MAP);
421 assert(face < 6);
422 assert(layer == 0);
423 slice = face;
424 } else {
425 /* This branch may be taken even if the texture target is a cube map. In
426 * that case, the caller chose to interpret each cube face as a layer.
427 */
428 assert(face == 0);
429 slice = layer;
430 }
431
432 *x = mt->level[level].slice[slice].x_offset;
433 *y = mt->level[level].slice[slice].y_offset;
434 }
435
436 static void
437 intel_miptree_copy_slice(struct intel_context *intel,
438 struct intel_mipmap_tree *dst_mt,
439 struct intel_mipmap_tree *src_mt,
440 int level,
441 int face,
442 int depth)
443
444 {
445 gl_format format = src_mt->format;
446 uint32_t width = src_mt->level[level].width;
447 uint32_t height = src_mt->level[level].height;
448
449 assert(depth < src_mt->level[level].depth);
450
451 if (dst_mt->compressed) {
452 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
453 width = ALIGN(width, dst_mt->align_w);
454 }
455
456 uint32_t dst_x, dst_y, src_x, src_y;
457 intel_miptree_get_image_offset(dst_mt, level, face, depth,
458 &dst_x, &dst_y);
459 intel_miptree_get_image_offset(src_mt, level, face, depth,
460 &src_x, &src_y);
461
462 DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
463 src_mt, src_x, src_y, src_mt->region->pitch * src_mt->region->cpp,
464 dst_mt, dst_x, dst_y, dst_mt->region->pitch * dst_mt->region->cpp,
465 width, height);
466
467 if (!intelEmitCopyBlit(intel,
468 dst_mt->region->cpp,
469 src_mt->region->pitch, src_mt->region->bo,
470 0, src_mt->region->tiling,
471 dst_mt->region->pitch, dst_mt->region->bo,
472 0, dst_mt->region->tiling,
473 src_x, src_y,
474 dst_x, dst_y,
475 width, height,
476 GL_COPY)) {
477
478 fallback_debug("miptree validate blit for %s failed\n",
479 _mesa_get_format_name(format));
480 void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
481 void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
482
483 _mesa_copy_rect(dst,
484 dst_mt->cpp,
485 dst_mt->region->pitch,
486 dst_x, dst_y,
487 width, height,
488 src, src_mt->region->pitch,
489 src_x, src_y);
490
491 intel_region_unmap(intel, dst_mt->region);
492 intel_region_unmap(intel, src_mt->region);
493 }
494
495 if (src_mt->stencil_mt) {
496 intel_miptree_copy_slice(intel,
497 dst_mt->stencil_mt, src_mt->stencil_mt,
498 level, face, depth);
499 }
500 }
501
502 /**
503 * Copies the image's current data to the given miptree, and associates that
504 * miptree with the image.
505 */
506 void
507 intel_miptree_copy_teximage(struct intel_context *intel,
508 struct intel_texture_image *intelImage,
509 struct intel_mipmap_tree *dst_mt)
510 {
511 struct intel_mipmap_tree *src_mt = intelImage->mt;
512 int level = intelImage->base.Base.Level;
513 int face = intelImage->base.Base.Face;
514 GLuint depth = intelImage->base.Base.Depth;
515
516 for (int slice = 0; slice < depth; slice++) {
517 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
518 }
519
520 intel_miptree_reference(&intelImage->mt, dst_mt);
521 }
522
523 bool
524 intel_miptree_alloc_hiz(struct intel_context *intel,
525 struct intel_mipmap_tree *mt)
526 {
527 assert(mt->hiz_mt == NULL);
528 mt->hiz_mt = intel_miptree_create(intel,
529 mt->target,
530 MESA_FORMAT_X8_Z24,
531 mt->first_level,
532 mt->last_level,
533 mt->width0,
534 mt->height0,
535 mt->depth0,
536 true);
537
538 if (!mt->hiz_mt)
539 return false;
540
541 /* Mark that all slices need a HiZ resolve. */
542 struct intel_resolve_map *head = &mt->hiz_map;
543 for (int level = mt->first_level; level <= mt->last_level; ++level) {
544 for (int layer = 0; layer < mt->level[level].depth; ++layer) {
545 head->next = malloc(sizeof(*head->next));
546 head->next->prev = head;
547 head->next->next = NULL;
548 head = head->next;
549
550 head->level = level;
551 head->layer = layer;
552 head->need = INTEL_NEED_HIZ_RESOLVE;
553 }
554 }
555
556 return true;
557 }
558
559 void
560 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
561 uint32_t level,
562 uint32_t layer)
563 {
564 intel_miptree_check_level_layer(mt, level, layer);
565
566 if (!mt->hiz_mt)
567 return;
568
569 intel_resolve_map_set(&mt->hiz_map,
570 level, layer, INTEL_NEED_HIZ_RESOLVE);
571 }
572
573
574 void
575 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
576 uint32_t level,
577 uint32_t layer)
578 {
579 intel_miptree_check_level_layer(mt, level, layer);
580
581 if (!mt->hiz_mt)
582 return;
583
584 intel_resolve_map_set(&mt->hiz_map,
585 level, layer, INTEL_NEED_DEPTH_RESOLVE);
586 }
587
588 typedef void (*resolve_func_t)(struct intel_context *intel,
589 struct intel_mipmap_tree *mt,
590 uint32_t level,
591 uint32_t layer);
592
593 static bool
594 intel_miptree_slice_resolve(struct intel_context *intel,
595 struct intel_mipmap_tree *mt,
596 uint32_t level,
597 uint32_t layer,
598 enum intel_need_resolve need,
599 resolve_func_t func)
600 {
601 intel_miptree_check_level_layer(mt, level, layer);
602
603 struct intel_resolve_map *item =
604 intel_resolve_map_get(&mt->hiz_map, level, layer);
605
606 if (!item || item->need != need)
607 return false;
608
609 func(intel, mt, level, layer);
610 intel_resolve_map_remove(item);
611 return true;
612 }
613
614 bool
615 intel_miptree_slice_resolve_hiz(struct intel_context *intel,
616 struct intel_mipmap_tree *mt,
617 uint32_t level,
618 uint32_t layer)
619 {
620 return intel_miptree_slice_resolve(intel, mt, level, layer,
621 INTEL_NEED_HIZ_RESOLVE,
622 intel->vtbl.resolve_hiz_slice);
623 }
624
625 bool
626 intel_miptree_slice_resolve_depth(struct intel_context *intel,
627 struct intel_mipmap_tree *mt,
628 uint32_t level,
629 uint32_t layer)
630 {
631 return intel_miptree_slice_resolve(intel, mt, level, layer,
632 INTEL_NEED_DEPTH_RESOLVE,
633 intel->vtbl.resolve_depth_slice);
634 }
635
636 static bool
637 intel_miptree_all_slices_resolve(struct intel_context *intel,
638 struct intel_mipmap_tree *mt,
639 enum intel_need_resolve need,
640 resolve_func_t func)
641 {
642 bool did_resolve = false;
643 struct intel_resolve_map *i, *next;
644
645 for (i = mt->hiz_map.next; i; i = next) {
646 next = i->next;
647 if (i->need != need)
648 continue;
649 func(intel, mt, i->level, i->layer);
650 intel_resolve_map_remove(i);
651 did_resolve = true;
652 }
653
654 return did_resolve;
655 }
656
657 bool
658 intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
659 struct intel_mipmap_tree *mt)
660 {
661 return intel_miptree_all_slices_resolve(intel, mt,
662 INTEL_NEED_HIZ_RESOLVE,
663 intel->vtbl.resolve_hiz_slice);
664 }
665
666 bool
667 intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
668 struct intel_mipmap_tree *mt)
669 {
670 return intel_miptree_all_slices_resolve(intel, mt,
671 INTEL_NEED_DEPTH_RESOLVE,
672 intel->vtbl.resolve_depth_slice);
673 }
674
675 static void
676 intel_miptree_map_gtt(struct intel_context *intel,
677 struct intel_mipmap_tree *mt,
678 struct intel_miptree_map *map,
679 unsigned int level, unsigned int slice)
680 {
681 unsigned int bw, bh;
682 void *base;
683 unsigned int image_x, image_y;
684 int x = map->x;
685 int y = map->y;
686
687 /* For compressed formats, the stride is the number of bytes per
688 * row of blocks. intel_miptree_get_image_offset() already does
689 * the divide.
690 */
691 _mesa_get_format_block_size(mt->format, &bw, &bh);
692 assert(y % bh == 0);
693 y /= bh;
694
695 base = intel_region_map(intel, mt->region, map->mode);
696 /* Note that in the case of cube maps, the caller must have passed the slice
697 * number referencing the face.
698 */
699 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
700 x += image_x;
701 y += image_y;
702
703 map->stride = mt->region->pitch * mt->cpp;
704 map->ptr = base + y * map->stride + x * mt->cpp;
705
706 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
707 map->x, map->y, map->w, map->h,
708 mt, _mesa_get_format_name(mt->format),
709 x, y, map->ptr, map->stride);
710 }
711
712 static void
713 intel_miptree_unmap_gtt(struct intel_context *intel,
714 struct intel_mipmap_tree *mt,
715 struct intel_miptree_map *map,
716 unsigned int level,
717 unsigned int slice)
718 {
719 intel_region_unmap(intel, mt->region);
720 }
721
722 static void
723 intel_miptree_map_blit(struct intel_context *intel,
724 struct intel_mipmap_tree *mt,
725 struct intel_miptree_map *map,
726 unsigned int level, unsigned int slice)
727 {
728 unsigned int image_x, image_y;
729 int x = map->x;
730 int y = map->y;
731 int ret;
732
733 /* The blitter requires the pitch to be aligned to 4. */
734 map->stride = ALIGN(map->w * mt->region->cpp, 4);
735
736 map->bo = drm_intel_bo_alloc(intel->bufmgr, "intel_miptree_map_blit() temp",
737 map->stride * map->h, 4096);
738 if (!map->bo) {
739 fprintf(stderr, "Failed to allocate blit temporary\n");
740 goto fail;
741 }
742
743 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
744 x += image_x;
745 y += image_y;
746
747 if (!intelEmitCopyBlit(intel,
748 mt->region->cpp,
749 mt->region->pitch, mt->region->bo,
750 0, mt->region->tiling,
751 map->stride / mt->region->cpp, map->bo,
752 0, I915_TILING_NONE,
753 x, y,
754 0, 0,
755 map->w, map->h,
756 GL_COPY)) {
757 fprintf(stderr, "Failed to blit\n");
758 goto fail;
759 }
760
761 intel_batchbuffer_flush(intel);
762 ret = drm_intel_bo_map(map->bo, (map->mode & GL_MAP_WRITE_BIT) != 0);
763 if (ret) {
764 fprintf(stderr, "Failed to map blit temporary\n");
765 goto fail;
766 }
767
768 map->ptr = map->bo->virtual;
769
770 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
771 map->x, map->y, map->w, map->h,
772 mt, _mesa_get_format_name(mt->format),
773 x, y, map->ptr, map->stride);
774
775 return;
776
777 fail:
778 drm_intel_bo_unreference(map->bo);
779 map->ptr = NULL;
780 map->stride = 0;
781 }
782
783 static void
784 intel_miptree_unmap_blit(struct intel_context *intel,
785 struct intel_mipmap_tree *mt,
786 struct intel_miptree_map *map,
787 unsigned int level,
788 unsigned int slice)
789 {
790 assert(!(map->mode & GL_MAP_WRITE_BIT));
791
792 drm_intel_bo_unmap(map->bo);
793 drm_intel_bo_unreference(map->bo);
794 }
795
796 static void
797 intel_miptree_map_s8(struct intel_context *intel,
798 struct intel_mipmap_tree *mt,
799 struct intel_miptree_map *map,
800 unsigned int level, unsigned int slice)
801 {
802 map->stride = map->w;
803 map->buffer = map->ptr = malloc(map->stride * map->h);
804 if (!map->buffer)
805 return;
806
807 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
808 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
809 * invalidate is set, since we'll be writing the whole rectangle from our
810 * temporary buffer back out.
811 */
812 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
813 uint8_t *untiled_s8_map = map->ptr;
814 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
815 GL_MAP_READ_BIT);
816 unsigned int image_x, image_y;
817
818 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
819
820 for (uint32_t y = 0; y < map->h; y++) {
821 for (uint32_t x = 0; x < map->w; x++) {
822 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
823 x + image_x + map->x,
824 y + image_y + map->y);
825 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
826 }
827 }
828
829 intel_region_unmap(intel, mt->region);
830
831 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
832 map->x, map->y, map->w, map->h,
833 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
834 } else {
835 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
836 map->x, map->y, map->w, map->h,
837 mt, map->ptr, map->stride);
838 }
839 }
840
841 static void
842 intel_miptree_unmap_s8(struct intel_context *intel,
843 struct intel_mipmap_tree *mt,
844 struct intel_miptree_map *map,
845 unsigned int level,
846 unsigned int slice)
847 {
848 if (map->mode & GL_MAP_WRITE_BIT) {
849 unsigned int image_x, image_y;
850 uint8_t *untiled_s8_map = map->ptr;
851 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
852
853 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
854
855 for (uint32_t y = 0; y < map->h; y++) {
856 for (uint32_t x = 0; x < map->w; x++) {
857 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
858 x + map->x,
859 y + map->y);
860 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
861 }
862 }
863
864 intel_region_unmap(intel, mt->region);
865 }
866
867 free(map->buffer);
868 }
869
870 /**
871 * Mapping function for packed depth/stencil miptrees backed by real separate
872 * miptrees for depth and stencil.
873 *
874 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
875 * separate from the depth buffer. Yet at the GL API level, we have to expose
876 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
877 * be able to map that memory for texture storage and glReadPixels-type
878 * operations. We give Mesa core that access by mallocing a temporary and
879 * copying the data between the actual backing store and the temporary.
880 */
881 static void
882 intel_miptree_map_depthstencil(struct intel_context *intel,
883 struct intel_mipmap_tree *mt,
884 struct intel_miptree_map *map,
885 unsigned int level, unsigned int slice)
886 {
887 struct intel_mipmap_tree *z_mt = mt;
888 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
889 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
890 int packed_bpp = map_z32f_x24s8 ? 8 : 4;
891
892 map->stride = map->w * packed_bpp;
893 map->buffer = map->ptr = malloc(map->stride * map->h);
894 if (!map->buffer)
895 return;
896
897 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
898 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
899 * invalidate is set, since we'll be writing the whole rectangle from our
900 * temporary buffer back out.
901 */
902 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
903 uint32_t *packed_map = map->ptr;
904 uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
905 uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
906 unsigned int s_image_x, s_image_y;
907 unsigned int z_image_x, z_image_y;
908
909 intel_miptree_get_image_offset(s_mt, level, 0, slice,
910 &s_image_x, &s_image_y);
911 intel_miptree_get_image_offset(z_mt, level, 0, slice,
912 &z_image_x, &z_image_y);
913
914 for (uint32_t y = 0; y < map->h; y++) {
915 for (uint32_t x = 0; x < map->w; x++) {
916 int map_x = map->x + x, map_y = map->y + y;
917 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
918 map_x + s_image_x,
919 map_y + s_image_y);
920 ptrdiff_t z_offset = ((map_y + z_image_y) * z_mt->region->pitch +
921 (map_x + z_image_x));
922 uint8_t s = s_map[s_offset];
923 uint32_t z = z_map[z_offset];
924
925 if (map_z32f_x24s8) {
926 packed_map[(y * map->w + x) * 2 + 0] = z;
927 packed_map[(y * map->w + x) * 2 + 1] = s;
928 } else {
929 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
930 }
931 }
932 }
933
934 intel_region_unmap(intel, s_mt->region);
935 intel_region_unmap(intel, z_mt->region);
936
937 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
938 __FUNCTION__,
939 map->x, map->y, map->w, map->h,
940 z_mt, map->x + z_image_x, map->y + z_image_y,
941 s_mt, map->x + s_image_x, map->y + s_image_y,
942 map->ptr, map->stride);
943 } else {
944 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
945 map->x, map->y, map->w, map->h,
946 mt, map->ptr, map->stride);
947 }
948 }
949
950 static void
951 intel_miptree_unmap_depthstencil(struct intel_context *intel,
952 struct intel_mipmap_tree *mt,
953 struct intel_miptree_map *map,
954 unsigned int level,
955 unsigned int slice)
956 {
957 struct intel_mipmap_tree *z_mt = mt;
958 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
959 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
960
961 if (map->mode & GL_MAP_WRITE_BIT) {
962 uint32_t *packed_map = map->ptr;
963 uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
964 uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
965 unsigned int s_image_x, s_image_y;
966 unsigned int z_image_x, z_image_y;
967
968 intel_miptree_get_image_offset(s_mt, level, 0, slice,
969 &s_image_x, &s_image_y);
970 intel_miptree_get_image_offset(z_mt, level, 0, slice,
971 &z_image_x, &z_image_y);
972
973 for (uint32_t y = 0; y < map->h; y++) {
974 for (uint32_t x = 0; x < map->w; x++) {
975 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
976 x + s_image_x + map->x,
977 y + s_image_y + map->y);
978 ptrdiff_t z_offset = ((y + z_image_y) * z_mt->region->pitch +
979 (x + z_image_x));
980
981 if (map_z32f_x24s8) {
982 z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
983 s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
984 } else {
985 uint32_t packed = packed_map[y * map->w + x];
986 s_map[s_offset] = packed >> 24;
987 z_map[z_offset] = packed;
988 }
989 }
990 }
991
992 intel_region_unmap(intel, s_mt->region);
993 intel_region_unmap(intel, z_mt->region);
994
995 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
996 __FUNCTION__,
997 map->x, map->y, map->w, map->h,
998 z_mt, _mesa_get_format_name(z_mt->format),
999 map->x + z_image_x, map->y + z_image_y,
1000 s_mt, map->x + s_image_x, map->y + s_image_y,
1001 map->ptr, map->stride);
1002 }
1003
1004 free(map->buffer);
1005 }
1006
1007 void
1008 intel_miptree_map(struct intel_context *intel,
1009 struct intel_mipmap_tree *mt,
1010 unsigned int level,
1011 unsigned int slice,
1012 unsigned int x,
1013 unsigned int y,
1014 unsigned int w,
1015 unsigned int h,
1016 GLbitfield mode,
1017 void **out_ptr,
1018 int *out_stride)
1019 {
1020 struct intel_miptree_map *map;
1021
1022 map = calloc(1, sizeof(struct intel_miptree_map));
1023 if (!map){
1024 *out_ptr = NULL;
1025 *out_stride = 0;
1026 return;
1027 }
1028
1029 assert(!mt->level[level].slice[slice].map);
1030 mt->level[level].slice[slice].map = map;
1031 map->mode = mode;
1032 map->x = x;
1033 map->y = y;
1034 map->w = w;
1035 map->h = h;
1036
1037 intel_miptree_slice_resolve_depth(intel, mt, level, slice);
1038 if (map->mode & GL_MAP_WRITE_BIT) {
1039 intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
1040 }
1041
1042 if (mt->format == MESA_FORMAT_S8) {
1043 intel_miptree_map_s8(intel, mt, map, level, slice);
1044 } else if (mt->stencil_mt) {
1045 intel_miptree_map_depthstencil(intel, mt, map, level, slice);
1046 } else if (intel->gen >= 6 &&
1047 !(mode & GL_MAP_WRITE_BIT) &&
1048 !mt->compressed &&
1049 mt->region->tiling == I915_TILING_X) {
1050 intel_miptree_map_blit(intel, mt, map, level, slice);
1051 } else {
1052 intel_miptree_map_gtt(intel, mt, map, level, slice);
1053 }
1054
1055 *out_ptr = map->ptr;
1056 *out_stride = map->stride;
1057 }
1058
1059 void
1060 intel_miptree_unmap(struct intel_context *intel,
1061 struct intel_mipmap_tree *mt,
1062 unsigned int level,
1063 unsigned int slice)
1064 {
1065 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1066
1067 if (!map)
1068 return;
1069
1070 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
1071 mt, _mesa_get_format_name(mt->format), level, slice);
1072
1073 if (mt->format == MESA_FORMAT_S8) {
1074 intel_miptree_unmap_s8(intel, mt, map, level, slice);
1075 } else if (mt->stencil_mt) {
1076 intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
1077 } else if (map->bo) {
1078 intel_miptree_unmap_blit(intel, mt, map, level, slice);
1079 } else {
1080 intel_miptree_unmap_gtt(intel, mt, map, level, slice);
1081 }
1082
1083 mt->level[level].slice[slice].map = NULL;
1084 free(map);
1085 }