intel: Fix memory leak in intel_miptree_create()
[mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "intel_batchbuffer.h"
29 #include "intel_context.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_regions.h"
32 #include "intel_resolve_map.h"
33 #include "intel_span.h"
34 #include "intel_tex_layout.h"
35 #include "intel_tex.h"
36 #include "intel_blit.h"
37
38 #include "main/enums.h"
39 #include "main/formats.h"
40 #include "main/image.h"
41 #include "main/teximage.h"
42
43 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
44
45 static GLenum
46 target_to_target(GLenum target)
47 {
48 switch (target) {
49 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
50 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
51 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
52 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
53 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
54 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
55 return GL_TEXTURE_CUBE_MAP_ARB;
56 default:
57 return target;
58 }
59 }
60
61 static struct intel_mipmap_tree *
62 intel_miptree_create_internal(struct intel_context *intel,
63 GLenum target,
64 gl_format format,
65 GLuint first_level,
66 GLuint last_level,
67 GLuint width0,
68 GLuint height0,
69 GLuint depth0)
70 {
71 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
72 int compress_byte = 0;
73
74 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
75 _mesa_lookup_enum_by_nr(target),
76 _mesa_get_format_name(format),
77 first_level, last_level, mt);
78
79 if (_mesa_is_format_compressed(format))
80 compress_byte = intel_compressed_num_bytes(format);
81
82 mt->target = target_to_target(target);
83 mt->format = format;
84 mt->first_level = first_level;
85 mt->last_level = last_level;
86 mt->width0 = width0;
87 mt->height0 = height0;
88 mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
89 mt->compressed = compress_byte ? 1 : 0;
90 mt->refcount = 1;
91
92 if (target == GL_TEXTURE_CUBE_MAP) {
93 assert(depth0 == 1);
94 mt->depth0 = 6;
95 } else {
96 mt->depth0 = depth0;
97 }
98
99 if (format == MESA_FORMAT_S8) {
100 /* The stencil buffer has quirky pitch requirements. From Vol 2a,
101 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
102 * The pitch must be set to 2x the value computed based on width, as
103 * the stencil buffer is stored with two rows interleaved.
104 */
105 assert(intel->has_separate_stencil);
106 mt->cpp = 2;
107 }
108
109 if (_mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
110 (intel->must_use_separate_stencil ||
111 (intel->has_separate_stencil &&
112 intel->vtbl.is_hiz_depth_format(intel, format)))) {
113 mt->stencil_mt = intel_miptree_create(intel,
114 mt->target,
115 MESA_FORMAT_S8,
116 mt->first_level,
117 mt->last_level,
118 mt->width0,
119 mt->height0,
120 mt->depth0,
121 true);
122 if (!mt->stencil_mt) {
123 intel_miptree_release(&mt);
124 return NULL;
125 }
126
127 /* Fix up the Z miptree format for how we're splitting out separate
128 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
129 */
130 if (mt->format == MESA_FORMAT_S8_Z24) {
131 mt->format = MESA_FORMAT_X8_Z24;
132 } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
133 mt->format = MESA_FORMAT_Z32_FLOAT;
134 mt->cpp = 4;
135 } else {
136 _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
137 _mesa_get_format_name(mt->format));
138 }
139 }
140
141 intel_get_texture_alignment_unit(intel, mt->format,
142 &mt->align_w, &mt->align_h);
143
144 #ifdef I915
145 (void) intel;
146 if (intel->is_945)
147 i945_miptree_layout(mt);
148 else
149 i915_miptree_layout(mt);
150 #else
151 brw_miptree_layout(intel, mt);
152 #endif
153
154 return mt;
155 }
156
157
158 struct intel_mipmap_tree *
159 intel_miptree_create(struct intel_context *intel,
160 GLenum target,
161 gl_format format,
162 GLuint first_level,
163 GLuint last_level,
164 GLuint width0,
165 GLuint height0,
166 GLuint depth0,
167 bool expect_accelerated_upload)
168 {
169 struct intel_mipmap_tree *mt;
170 uint32_t tiling = I915_TILING_NONE;
171 GLenum base_format = _mesa_get_format_base_format(format);
172
173 if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
174 if (intel->gen >= 4 &&
175 (base_format == GL_DEPTH_COMPONENT ||
176 base_format == GL_DEPTH_STENCIL_EXT))
177 tiling = I915_TILING_Y;
178 else if (width0 >= 64)
179 tiling = I915_TILING_X;
180 }
181
182 if (format == MESA_FORMAT_S8) {
183 /* The stencil buffer is W tiled. However, we request from the kernel a
184 * non-tiled buffer because the GTT is incapable of W fencing.
185 *
186 * The stencil buffer has quirky pitch requirements. From Vol 2a,
187 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
188 * The pitch must be set to 2x the value computed based on width, as
189 * the stencil buffer is stored with two rows interleaved.
190 * To accomplish this, we resort to the nasty hack of doubling the drm
191 * region's cpp and halving its height.
192 *
193 * If we neglect to double the pitch, then render corruption occurs.
194 */
195 tiling = I915_TILING_NONE;
196 width0 = ALIGN(width0, 64);
197 height0 = ALIGN((height0 + 1) / 2, 64);
198 }
199
200 mt = intel_miptree_create_internal(intel, target, format,
201 first_level, last_level, width0,
202 height0, depth0);
203 /*
204 * pitch == 0 || height == 0 indicates the null texture
205 */
206 if (!mt || !mt->total_width || !mt->total_height) {
207 intel_miptree_release(&mt);
208 return NULL;
209 }
210
211 mt->region = intel_region_alloc(intel->intelScreen,
212 tiling,
213 mt->cpp,
214 mt->total_width,
215 mt->total_height,
216 expect_accelerated_upload);
217
218 if (!mt->region) {
219 intel_miptree_release(&mt);
220 return NULL;
221 }
222
223 return mt;
224 }
225
226
227 struct intel_mipmap_tree *
228 intel_miptree_create_for_region(struct intel_context *intel,
229 GLenum target,
230 gl_format format,
231 struct intel_region *region)
232 {
233 struct intel_mipmap_tree *mt;
234
235 mt = intel_miptree_create_internal(intel, target, format,
236 0, 0,
237 region->width, region->height, 1);
238 if (!mt)
239 return mt;
240
241 intel_region_reference(&mt->region, region);
242
243 return mt;
244 }
245
246 struct intel_mipmap_tree*
247 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
248 gl_format format,
249 uint32_t width,
250 uint32_t height)
251 {
252 struct intel_mipmap_tree *mt;
253
254 mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
255 width, height, 1, true);
256
257 return mt;
258 }
259
260 void
261 intel_miptree_reference(struct intel_mipmap_tree **dst,
262 struct intel_mipmap_tree *src)
263 {
264 if (*dst == src)
265 return;
266
267 intel_miptree_release(dst);
268
269 if (src) {
270 src->refcount++;
271 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
272 }
273
274 *dst = src;
275 }
276
277
278 void
279 intel_miptree_release(struct intel_mipmap_tree **mt)
280 {
281 if (!*mt)
282 return;
283
284 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
285 if (--(*mt)->refcount <= 0) {
286 GLuint i;
287
288 DBG("%s deleting %p\n", __FUNCTION__, *mt);
289
290 intel_region_release(&((*mt)->region));
291 intel_miptree_release(&(*mt)->stencil_mt);
292 intel_miptree_release(&(*mt)->hiz_mt);
293 intel_resolve_map_clear(&(*mt)->hiz_map);
294
295 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
296 free((*mt)->level[i].slice);
297 }
298
299 free(*mt);
300 }
301 *mt = NULL;
302 }
303
304 void
305 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
306 int *width, int *height, int *depth)
307 {
308 switch (image->TexObject->Target) {
309 case GL_TEXTURE_1D_ARRAY:
310 *width = image->Width;
311 *height = 1;
312 *depth = image->Height;
313 break;
314 default:
315 *width = image->Width;
316 *height = image->Height;
317 *depth = image->Depth;
318 break;
319 }
320 }
321
322 /**
323 * Can the image be pulled into a unified mipmap tree? This mirrors
324 * the completeness test in a lot of ways.
325 *
326 * Not sure whether I want to pass gl_texture_image here.
327 */
328 bool
329 intel_miptree_match_image(struct intel_mipmap_tree *mt,
330 struct gl_texture_image *image)
331 {
332 struct intel_texture_image *intelImage = intel_texture_image(image);
333 GLuint level = intelImage->base.Base.Level;
334 int width, height, depth;
335
336 if (target_to_target(image->TexObject->Target) != mt->target)
337 return false;
338
339 if (image->TexFormat != mt->format &&
340 !(image->TexFormat == MESA_FORMAT_S8_Z24 &&
341 mt->format == MESA_FORMAT_X8_Z24 &&
342 mt->stencil_mt)) {
343 return false;
344 }
345
346 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
347
348 if (mt->target == GL_TEXTURE_CUBE_MAP)
349 depth = 6;
350
351 /* Test image dimensions against the base level image adjusted for
352 * minification. This will also catch images not present in the
353 * tree, changed targets, etc.
354 */
355 if (width != mt->level[level].width ||
356 height != mt->level[level].height ||
357 depth != mt->level[level].depth)
358 return false;
359
360 return true;
361 }
362
363
364 void
365 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
366 GLuint level,
367 GLuint x, GLuint y,
368 GLuint w, GLuint h, GLuint d)
369 {
370 mt->level[level].width = w;
371 mt->level[level].height = h;
372 mt->level[level].depth = d;
373 mt->level[level].level_x = x;
374 mt->level[level].level_y = y;
375
376 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
377 level, w, h, d, x, y);
378
379 assert(mt->level[level].slice == NULL);
380
381 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
382 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
383 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
384 }
385
386
387 void
388 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
389 GLuint level, GLuint img,
390 GLuint x, GLuint y)
391 {
392 if (img == 0 && level == 0)
393 assert(x == 0 && y == 0);
394
395 assert(img < mt->level[level].depth);
396
397 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
398 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
399
400 DBG("%s level %d img %d pos %d,%d\n",
401 __FUNCTION__, level, img,
402 mt->level[level].slice[img].x_offset,
403 mt->level[level].slice[img].y_offset);
404 }
405
406
407 /**
408 * For cube map textures, either the \c face parameter can be used, of course,
409 * or the cube face can be interpreted as a depth layer and the \c layer
410 * parameter used.
411 */
412 void
413 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
414 GLuint level, GLuint face, GLuint layer,
415 GLuint *x, GLuint *y)
416 {
417 int slice;
418
419 if (face > 0) {
420 assert(mt->target == GL_TEXTURE_CUBE_MAP);
421 assert(face < 6);
422 assert(layer == 0);
423 slice = face;
424 } else {
425 /* This branch may be taken even if the texture target is a cube map. In
426 * that case, the caller chose to interpret each cube face as a layer.
427 */
428 assert(face == 0);
429 slice = layer;
430 }
431
432 *x = mt->level[level].slice[slice].x_offset;
433 *y = mt->level[level].slice[slice].y_offset;
434 }
435
436 static void
437 intel_miptree_copy_slice(struct intel_context *intel,
438 struct intel_mipmap_tree *dst_mt,
439 struct intel_mipmap_tree *src_mt,
440 int level,
441 int face,
442 int depth)
443
444 {
445 gl_format format = src_mt->format;
446 uint32_t width = src_mt->level[level].width;
447 uint32_t height = src_mt->level[level].height;
448
449 assert(depth < src_mt->level[level].depth);
450
451 if (dst_mt->compressed) {
452 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
453 width = ALIGN(width, dst_mt->align_w);
454 }
455
456 uint32_t dst_x, dst_y, src_x, src_y;
457 intel_miptree_get_image_offset(dst_mt, level, face, depth,
458 &dst_x, &dst_y);
459 intel_miptree_get_image_offset(src_mt, level, face, depth,
460 &src_x, &src_y);
461
462 DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
463 src_mt, src_x, src_y, src_mt->region->pitch * src_mt->region->cpp,
464 dst_mt, dst_x, dst_y, dst_mt->region->pitch * dst_mt->region->cpp,
465 width, height);
466
467 if (!intelEmitCopyBlit(intel,
468 dst_mt->region->cpp,
469 src_mt->region->pitch, src_mt->region->bo,
470 0, src_mt->region->tiling,
471 dst_mt->region->pitch, dst_mt->region->bo,
472 0, dst_mt->region->tiling,
473 src_x, src_y,
474 dst_x, dst_y,
475 width, height,
476 GL_COPY)) {
477
478 fallback_debug("miptree validate blit for %s failed\n",
479 _mesa_get_format_name(format));
480 void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
481 void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
482
483 _mesa_copy_rect(dst,
484 dst_mt->cpp,
485 dst_mt->region->pitch,
486 dst_x, dst_y,
487 width, height,
488 src, src_mt->region->pitch,
489 src_x, src_y);
490
491 intel_region_unmap(intel, dst_mt->region);
492 intel_region_unmap(intel, src_mt->region);
493 }
494
495 if (src_mt->stencil_mt) {
496 intel_miptree_copy_slice(intel,
497 dst_mt->stencil_mt, src_mt->stencil_mt,
498 level, face, depth);
499 }
500 }
501
502 /**
503 * Copies the image's current data to the given miptree, and associates that
504 * miptree with the image.
505 */
506 void
507 intel_miptree_copy_teximage(struct intel_context *intel,
508 struct intel_texture_image *intelImage,
509 struct intel_mipmap_tree *dst_mt)
510 {
511 struct intel_mipmap_tree *src_mt = intelImage->mt;
512 int level = intelImage->base.Base.Level;
513 int face = intelImage->base.Base.Face;
514 GLuint depth = intelImage->base.Base.Depth;
515
516 for (int slice = 0; slice < depth; slice++) {
517 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
518 }
519
520 intel_miptree_reference(&intelImage->mt, dst_mt);
521 }
522
523 bool
524 intel_miptree_alloc_hiz(struct intel_context *intel,
525 struct intel_mipmap_tree *mt)
526 {
527 assert(mt->hiz_mt == NULL);
528 mt->hiz_mt = intel_miptree_create(intel,
529 mt->target,
530 MESA_FORMAT_X8_Z24,
531 mt->first_level,
532 mt->last_level,
533 mt->width0,
534 mt->height0,
535 mt->depth0,
536 true);
537
538 if (!mt->hiz_mt)
539 return false;
540
541 /* Mark that all slices need a HiZ resolve. */
542 struct intel_resolve_map *head = &mt->hiz_map;
543 for (int level = mt->first_level; level <= mt->last_level; ++level) {
544 for (int layer = 0; layer < mt->level[level].depth; ++layer) {
545 head->next = malloc(sizeof(*head->next));
546 head->next->prev = head;
547 head->next->next = NULL;
548 head = head->next;
549
550 head->level = level;
551 head->layer = layer;
552 head->need = INTEL_NEED_HIZ_RESOLVE;
553 }
554 }
555
556 return true;
557 }
558
559 void
560 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
561 uint32_t level,
562 uint32_t layer)
563 {
564 intel_miptree_check_level_layer(mt, level, layer);
565
566 if (!mt->hiz_mt)
567 return;
568
569 intel_resolve_map_set(&mt->hiz_map,
570 level, layer, INTEL_NEED_HIZ_RESOLVE);
571 }
572
573
574 void
575 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
576 uint32_t level,
577 uint32_t layer)
578 {
579 intel_miptree_check_level_layer(mt, level, layer);
580
581 if (!mt->hiz_mt)
582 return;
583
584 intel_resolve_map_set(&mt->hiz_map,
585 level, layer, INTEL_NEED_DEPTH_RESOLVE);
586 }
587
588 typedef void (*resolve_func_t)(struct intel_context *intel,
589 struct intel_mipmap_tree *mt,
590 uint32_t level,
591 uint32_t layer);
592
593 static bool
594 intel_miptree_slice_resolve(struct intel_context *intel,
595 struct intel_mipmap_tree *mt,
596 uint32_t level,
597 uint32_t layer,
598 enum intel_need_resolve need,
599 resolve_func_t func)
600 {
601 intel_miptree_check_level_layer(mt, level, layer);
602
603 struct intel_resolve_map *item =
604 intel_resolve_map_get(&mt->hiz_map, level, layer);
605
606 if (!item || item->need != need)
607 return false;
608
609 func(intel, mt, level, layer);
610 intel_resolve_map_remove(item);
611 return true;
612 }
613
614 bool
615 intel_miptree_slice_resolve_hiz(struct intel_context *intel,
616 struct intel_mipmap_tree *mt,
617 uint32_t level,
618 uint32_t layer)
619 {
620 return intel_miptree_slice_resolve(intel, mt, level, layer,
621 INTEL_NEED_HIZ_RESOLVE,
622 intel->vtbl.resolve_hiz_slice);
623 }
624
625 bool
626 intel_miptree_slice_resolve_depth(struct intel_context *intel,
627 struct intel_mipmap_tree *mt,
628 uint32_t level,
629 uint32_t layer)
630 {
631 return intel_miptree_slice_resolve(intel, mt, level, layer,
632 INTEL_NEED_DEPTH_RESOLVE,
633 intel->vtbl.resolve_depth_slice);
634 }
635
636 static bool
637 intel_miptree_all_slices_resolve(struct intel_context *intel,
638 struct intel_mipmap_tree *mt,
639 enum intel_need_resolve need,
640 resolve_func_t func)
641 {
642 bool did_resolve = false;
643 struct intel_resolve_map *i;
644
645 for (i = mt->hiz_map.next; i; i = i->next) {
646 if (i->need != need)
647 continue;
648 func(intel, mt, i->level, i->layer);
649 intel_resolve_map_remove(i);
650 did_resolve = true;
651 }
652
653 return did_resolve;
654 }
655
656 bool
657 intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
658 struct intel_mipmap_tree *mt)
659 {
660 return intel_miptree_all_slices_resolve(intel, mt,
661 INTEL_NEED_HIZ_RESOLVE,
662 intel->vtbl.resolve_hiz_slice);
663 }
664
665 bool
666 intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
667 struct intel_mipmap_tree *mt)
668 {
669 return intel_miptree_all_slices_resolve(intel, mt,
670 INTEL_NEED_DEPTH_RESOLVE,
671 intel->vtbl.resolve_depth_slice);
672 }
673
674 static void
675 intel_miptree_map_gtt(struct intel_context *intel,
676 struct intel_mipmap_tree *mt,
677 struct intel_miptree_map *map,
678 unsigned int level, unsigned int slice)
679 {
680 unsigned int bw, bh;
681 void *base;
682 unsigned int image_x, image_y;
683 int x = map->x;
684 int y = map->y;
685
686 /* For compressed formats, the stride is the number of bytes per
687 * row of blocks. intel_miptree_get_image_offset() already does
688 * the divide.
689 */
690 _mesa_get_format_block_size(mt->format, &bw, &bh);
691 assert(y % bh == 0);
692 y /= bh;
693
694 base = intel_region_map(intel, mt->region, map->mode);
695 /* Note that in the case of cube maps, the caller must have passed the slice
696 * number referencing the face.
697 */
698 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
699 x += image_x;
700 y += image_y;
701
702 map->stride = mt->region->pitch * mt->cpp;
703 map->ptr = base + y * map->stride + x * mt->cpp;
704
705 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
706 map->x, map->y, map->w, map->h,
707 mt, _mesa_get_format_name(mt->format),
708 x, y, map->ptr, map->stride);
709 }
710
711 static void
712 intel_miptree_unmap_gtt(struct intel_context *intel,
713 struct intel_mipmap_tree *mt,
714 struct intel_miptree_map *map,
715 unsigned int level,
716 unsigned int slice)
717 {
718 intel_region_unmap(intel, mt->region);
719 }
720
721 static void
722 intel_miptree_map_blit(struct intel_context *intel,
723 struct intel_mipmap_tree *mt,
724 struct intel_miptree_map *map,
725 unsigned int level, unsigned int slice)
726 {
727 unsigned int image_x, image_y;
728 int x = map->x;
729 int y = map->y;
730 int ret;
731
732 /* The blitter requires the pitch to be aligned to 4. */
733 map->stride = ALIGN(map->w * mt->region->cpp, 4);
734
735 map->bo = drm_intel_bo_alloc(intel->bufmgr, "intel_miptree_map_blit() temp",
736 map->stride * map->h, 4096);
737 if (!map->bo) {
738 fprintf(stderr, "Failed to allocate blit temporary\n");
739 goto fail;
740 }
741
742 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
743 x += image_x;
744 y += image_y;
745
746 if (!intelEmitCopyBlit(intel,
747 mt->region->cpp,
748 mt->region->pitch, mt->region->bo,
749 0, mt->region->tiling,
750 map->stride / mt->region->cpp, map->bo,
751 0, I915_TILING_NONE,
752 x, y,
753 0, 0,
754 map->w, map->h,
755 GL_COPY)) {
756 fprintf(stderr, "Failed to blit\n");
757 goto fail;
758 }
759
760 intel_batchbuffer_flush(intel);
761 ret = drm_intel_bo_map(map->bo, (map->mode & GL_MAP_WRITE_BIT) != 0);
762 if (ret) {
763 fprintf(stderr, "Failed to map blit temporary\n");
764 goto fail;
765 }
766
767 map->ptr = map->bo->virtual;
768
769 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
770 map->x, map->y, map->w, map->h,
771 mt, _mesa_get_format_name(mt->format),
772 x, y, map->ptr, map->stride);
773
774 return;
775
776 fail:
777 drm_intel_bo_unreference(map->bo);
778 map->ptr = NULL;
779 map->stride = 0;
780 }
781
782 static void
783 intel_miptree_unmap_blit(struct intel_context *intel,
784 struct intel_mipmap_tree *mt,
785 struct intel_miptree_map *map,
786 unsigned int level,
787 unsigned int slice)
788 {
789 assert(!(map->mode & GL_MAP_WRITE_BIT));
790
791 drm_intel_bo_unmap(map->bo);
792 drm_intel_bo_unreference(map->bo);
793 }
794
795 static void
796 intel_miptree_map_s8(struct intel_context *intel,
797 struct intel_mipmap_tree *mt,
798 struct intel_miptree_map *map,
799 unsigned int level, unsigned int slice)
800 {
801 map->stride = map->w;
802 map->buffer = map->ptr = malloc(map->stride * map->h);
803 if (!map->buffer)
804 return;
805
806 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
807 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
808 * invalidate is set, since we'll be writing the whole rectangle from our
809 * temporary buffer back out.
810 */
811 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
812 uint8_t *untiled_s8_map = map->ptr;
813 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
814 GL_MAP_READ_BIT);
815 unsigned int image_x, image_y;
816
817 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
818
819 for (uint32_t y = 0; y < map->h; y++) {
820 for (uint32_t x = 0; x < map->w; x++) {
821 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
822 x + image_x + map->x,
823 y + image_y + map->y);
824 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
825 }
826 }
827
828 intel_region_unmap(intel, mt->region);
829
830 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
831 map->x, map->y, map->w, map->h,
832 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
833 } else {
834 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
835 map->x, map->y, map->w, map->h,
836 mt, map->ptr, map->stride);
837 }
838 }
839
840 static void
841 intel_miptree_unmap_s8(struct intel_context *intel,
842 struct intel_mipmap_tree *mt,
843 struct intel_miptree_map *map,
844 unsigned int level,
845 unsigned int slice)
846 {
847 if (map->mode & GL_MAP_WRITE_BIT) {
848 unsigned int image_x, image_y;
849 uint8_t *untiled_s8_map = map->ptr;
850 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
851
852 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
853
854 for (uint32_t y = 0; y < map->h; y++) {
855 for (uint32_t x = 0; x < map->w; x++) {
856 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
857 x + map->x,
858 y + map->y);
859 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
860 }
861 }
862
863 intel_region_unmap(intel, mt->region);
864 }
865
866 free(map->buffer);
867 }
868
869 /**
870 * Mapping function for packed depth/stencil miptrees backed by real separate
871 * miptrees for depth and stencil.
872 *
873 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
874 * separate from the depth buffer. Yet at the GL API level, we have to expose
875 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
876 * be able to map that memory for texture storage and glReadPixels-type
877 * operations. We give Mesa core that access by mallocing a temporary and
878 * copying the data between the actual backing store and the temporary.
879 */
880 static void
881 intel_miptree_map_depthstencil(struct intel_context *intel,
882 struct intel_mipmap_tree *mt,
883 struct intel_miptree_map *map,
884 unsigned int level, unsigned int slice)
885 {
886 struct intel_mipmap_tree *z_mt = mt;
887 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
888 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
889 int packed_bpp = map_z32f_x24s8 ? 8 : 4;
890
891 map->stride = map->w * packed_bpp;
892 map->buffer = map->ptr = malloc(map->stride * map->h);
893 if (!map->buffer)
894 return;
895
896 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
897 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
898 * invalidate is set, since we'll be writing the whole rectangle from our
899 * temporary buffer back out.
900 */
901 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
902 uint32_t *packed_map = map->ptr;
903 uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
904 uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
905 unsigned int s_image_x, s_image_y;
906 unsigned int z_image_x, z_image_y;
907
908 intel_miptree_get_image_offset(s_mt, level, 0, slice,
909 &s_image_x, &s_image_y);
910 intel_miptree_get_image_offset(z_mt, level, 0, slice,
911 &z_image_x, &z_image_y);
912
913 for (uint32_t y = 0; y < map->h; y++) {
914 for (uint32_t x = 0; x < map->w; x++) {
915 int map_x = map->x + x, map_y = map->y + y;
916 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
917 map_x + s_image_x,
918 map_y + s_image_y);
919 ptrdiff_t z_offset = ((map_y + z_image_y) * z_mt->region->pitch +
920 (map_x + z_image_x));
921 uint8_t s = s_map[s_offset];
922 uint32_t z = z_map[z_offset];
923
924 if (map_z32f_x24s8) {
925 packed_map[(y * map->w + x) * 2 + 0] = z;
926 packed_map[(y * map->w + x) * 2 + 1] = s;
927 } else {
928 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
929 }
930 }
931 }
932
933 intel_region_unmap(intel, s_mt->region);
934 intel_region_unmap(intel, z_mt->region);
935
936 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
937 __FUNCTION__,
938 map->x, map->y, map->w, map->h,
939 z_mt, map->x + z_image_x, map->y + z_image_y,
940 s_mt, map->x + s_image_x, map->y + s_image_y,
941 map->ptr, map->stride);
942 } else {
943 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
944 map->x, map->y, map->w, map->h,
945 mt, map->ptr, map->stride);
946 }
947 }
948
949 static void
950 intel_miptree_unmap_depthstencil(struct intel_context *intel,
951 struct intel_mipmap_tree *mt,
952 struct intel_miptree_map *map,
953 unsigned int level,
954 unsigned int slice)
955 {
956 struct intel_mipmap_tree *z_mt = mt;
957 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
958 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
959
960 if (map->mode & GL_MAP_WRITE_BIT) {
961 uint32_t *packed_map = map->ptr;
962 uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
963 uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
964 unsigned int s_image_x, s_image_y;
965 unsigned int z_image_x, z_image_y;
966
967 intel_miptree_get_image_offset(s_mt, level, 0, slice,
968 &s_image_x, &s_image_y);
969 intel_miptree_get_image_offset(z_mt, level, 0, slice,
970 &z_image_x, &z_image_y);
971
972 for (uint32_t y = 0; y < map->h; y++) {
973 for (uint32_t x = 0; x < map->w; x++) {
974 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
975 x + s_image_x + map->x,
976 y + s_image_y + map->y);
977 ptrdiff_t z_offset = ((y + z_image_y) * z_mt->region->pitch +
978 (x + z_image_x));
979
980 if (map_z32f_x24s8) {
981 z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
982 s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
983 } else {
984 uint32_t packed = packed_map[y * map->w + x];
985 s_map[s_offset] = packed >> 24;
986 z_map[z_offset] = packed;
987 }
988 }
989 }
990
991 intel_region_unmap(intel, s_mt->region);
992 intel_region_unmap(intel, z_mt->region);
993
994 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
995 __FUNCTION__,
996 map->x, map->y, map->w, map->h,
997 z_mt, _mesa_get_format_name(z_mt->format),
998 map->x + z_image_x, map->y + z_image_y,
999 s_mt, map->x + s_image_x, map->y + s_image_y,
1000 map->ptr, map->stride);
1001 }
1002
1003 free(map->buffer);
1004 }
1005
1006 void
1007 intel_miptree_map(struct intel_context *intel,
1008 struct intel_mipmap_tree *mt,
1009 unsigned int level,
1010 unsigned int slice,
1011 unsigned int x,
1012 unsigned int y,
1013 unsigned int w,
1014 unsigned int h,
1015 GLbitfield mode,
1016 void **out_ptr,
1017 int *out_stride)
1018 {
1019 struct intel_miptree_map *map;
1020
1021 map = calloc(1, sizeof(struct intel_miptree_map));
1022 if (!map){
1023 *out_ptr = NULL;
1024 *out_stride = 0;
1025 return;
1026 }
1027
1028 assert(!mt->level[level].slice[slice].map);
1029 mt->level[level].slice[slice].map = map;
1030 map->mode = mode;
1031 map->x = x;
1032 map->y = y;
1033 map->w = w;
1034 map->h = h;
1035
1036 intel_miptree_slice_resolve_depth(intel, mt, level, slice);
1037 if (map->mode & GL_MAP_WRITE_BIT) {
1038 intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
1039 }
1040
1041 if (mt->format == MESA_FORMAT_S8) {
1042 intel_miptree_map_s8(intel, mt, map, level, slice);
1043 } else if (mt->stencil_mt) {
1044 intel_miptree_map_depthstencil(intel, mt, map, level, slice);
1045 } else if (intel->gen >= 6 &&
1046 !(mode & GL_MAP_WRITE_BIT) &&
1047 !mt->compressed &&
1048 mt->region->tiling == I915_TILING_X) {
1049 intel_miptree_map_blit(intel, mt, map, level, slice);
1050 } else {
1051 intel_miptree_map_gtt(intel, mt, map, level, slice);
1052 }
1053
1054 *out_ptr = map->ptr;
1055 *out_stride = map->stride;
1056 }
1057
1058 void
1059 intel_miptree_unmap(struct intel_context *intel,
1060 struct intel_mipmap_tree *mt,
1061 unsigned int level,
1062 unsigned int slice)
1063 {
1064 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1065
1066 if (!map)
1067 return;
1068
1069 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
1070 mt, _mesa_get_format_name(mt->format), level, slice);
1071
1072 if (mt->format == MESA_FORMAT_S8) {
1073 intel_miptree_unmap_s8(intel, mt, map, level, slice);
1074 } else if (mt->stencil_mt) {
1075 intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
1076 } else if (map->bo) {
1077 intel_miptree_unmap_blit(intel, mt, map, level, slice);
1078 } else {
1079 intel_miptree_unmap_gtt(intel, mt, map, level, slice);
1080 }
1081
1082 mt->level[level].slice[slice].map = NULL;
1083 free(map);
1084 }