intel: Make the fake packed depth/stencil mappings use a cached temporary.
[mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "intel_batchbuffer.h"
29 #include "intel_context.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_regions.h"
32 #include "intel_resolve_map.h"
33 #include "intel_span.h"
34 #include "intel_tex_layout.h"
35 #include "intel_tex.h"
36 #include "intel_blit.h"
37
38 #include "main/enums.h"
39 #include "main/formats.h"
40 #include "main/image.h"
41 #include "main/teximage.h"
42
43 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
44
45 static GLenum
46 target_to_target(GLenum target)
47 {
48 switch (target) {
49 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
50 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
51 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
52 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
53 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
54 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
55 return GL_TEXTURE_CUBE_MAP_ARB;
56 default:
57 return target;
58 }
59 }
60
61 static struct intel_mipmap_tree *
62 intel_miptree_create_internal(struct intel_context *intel,
63 GLenum target,
64 gl_format format,
65 GLuint first_level,
66 GLuint last_level,
67 GLuint width0,
68 GLuint height0,
69 GLuint depth0)
70 {
71 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
72 int compress_byte = 0;
73
74 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
75 _mesa_lookup_enum_by_nr(target),
76 _mesa_get_format_name(format),
77 first_level, last_level, mt);
78
79 if (_mesa_is_format_compressed(format))
80 compress_byte = intel_compressed_num_bytes(format);
81
82 mt->target = target_to_target(target);
83 mt->format = format;
84 mt->first_level = first_level;
85 mt->last_level = last_level;
86 mt->width0 = width0;
87 mt->height0 = height0;
88 mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
89 mt->compressed = compress_byte ? 1 : 0;
90 mt->refcount = 1;
91
92 intel_get_texture_alignment_unit(intel, format,
93 &mt->align_w, &mt->align_h);
94
95 if (target == GL_TEXTURE_CUBE_MAP) {
96 assert(depth0 == 1);
97 mt->depth0 = 6;
98 } else {
99 mt->depth0 = depth0;
100 }
101
102 if (format == MESA_FORMAT_S8) {
103 /* The stencil buffer has quirky pitch requirements. From Vol 2a,
104 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
105 * The pitch must be set to 2x the value computed based on width, as
106 * the stencil buffer is stored with two rows interleaved.
107 */
108 assert(intel->has_separate_stencil);
109 mt->cpp = 2;
110 }
111
112 #ifdef I915
113 (void) intel;
114 if (intel->is_945)
115 i945_miptree_layout(mt);
116 else
117 i915_miptree_layout(mt);
118 #else
119 brw_miptree_layout(intel, mt);
120 #endif
121
122 if (_mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
123 (intel->must_use_separate_stencil ||
124 (intel->has_separate_stencil &&
125 intel->vtbl.is_hiz_depth_format(intel, format)))) {
126 mt->stencil_mt = intel_miptree_create(intel,
127 mt->target,
128 MESA_FORMAT_S8,
129 mt->first_level,
130 mt->last_level,
131 mt->width0,
132 mt->height0,
133 mt->depth0,
134 true);
135 if (!mt->stencil_mt) {
136 intel_miptree_release(&mt);
137 return NULL;
138 }
139 }
140
141 return mt;
142 }
143
144
145 struct intel_mipmap_tree *
146 intel_miptree_create(struct intel_context *intel,
147 GLenum target,
148 gl_format format,
149 GLuint first_level,
150 GLuint last_level,
151 GLuint width0,
152 GLuint height0,
153 GLuint depth0,
154 bool expect_accelerated_upload)
155 {
156 struct intel_mipmap_tree *mt;
157 uint32_t tiling = I915_TILING_NONE;
158 GLenum base_format = _mesa_get_format_base_format(format);
159
160 if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
161 if (intel->gen >= 4 &&
162 (base_format == GL_DEPTH_COMPONENT ||
163 base_format == GL_DEPTH_STENCIL_EXT))
164 tiling = I915_TILING_Y;
165 else if (format == MESA_FORMAT_S8)
166 tiling = I915_TILING_NONE;
167 else if (width0 >= 64)
168 tiling = I915_TILING_X;
169 }
170
171 mt = intel_miptree_create_internal(intel, target, format,
172 first_level, last_level, width0,
173 height0, depth0);
174 /*
175 * pitch == 0 || height == 0 indicates the null texture
176 */
177 if (!mt || !mt->total_width || !mt->total_height) {
178 free(mt);
179 return NULL;
180 }
181
182 mt->region = intel_region_alloc(intel->intelScreen,
183 tiling,
184 mt->cpp,
185 mt->total_width,
186 mt->total_height,
187 expect_accelerated_upload);
188
189 if (!mt->region) {
190 free(mt);
191 return NULL;
192 }
193
194 return mt;
195 }
196
197
198 struct intel_mipmap_tree *
199 intel_miptree_create_for_region(struct intel_context *intel,
200 GLenum target,
201 gl_format format,
202 struct intel_region *region)
203 {
204 struct intel_mipmap_tree *mt;
205
206 mt = intel_miptree_create_internal(intel, target, format,
207 0, 0,
208 region->width, region->height, 1);
209 if (!mt)
210 return mt;
211
212 intel_region_reference(&mt->region, region);
213
214 return mt;
215 }
216
217 struct intel_mipmap_tree*
218 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
219 gl_format format,
220 uint32_t tiling,
221 uint32_t cpp,
222 uint32_t width,
223 uint32_t height)
224 {
225 struct intel_region *region;
226 struct intel_mipmap_tree *mt;
227
228 region = intel_region_alloc(intel->intelScreen,
229 tiling, cpp, width, height, true);
230 if (!region)
231 return NULL;
232
233 mt = intel_miptree_create_for_region(intel, GL_TEXTURE_2D, format, region);
234 intel_region_release(&region);
235 return mt;
236 }
237
238 void
239 intel_miptree_reference(struct intel_mipmap_tree **dst,
240 struct intel_mipmap_tree *src)
241 {
242 if (*dst == src)
243 return;
244
245 intel_miptree_release(dst);
246
247 if (src) {
248 src->refcount++;
249 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
250 }
251
252 *dst = src;
253 }
254
255
256 void
257 intel_miptree_release(struct intel_mipmap_tree **mt)
258 {
259 if (!*mt)
260 return;
261
262 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
263 if (--(*mt)->refcount <= 0) {
264 GLuint i;
265
266 DBG("%s deleting %p\n", __FUNCTION__, *mt);
267
268 intel_region_release(&((*mt)->region));
269 intel_miptree_release(&(*mt)->stencil_mt);
270 intel_miptree_release(&(*mt)->hiz_mt);
271 intel_resolve_map_clear(&(*mt)->hiz_map);
272
273 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
274 free((*mt)->level[i].slice);
275 }
276
277 free(*mt);
278 }
279 *mt = NULL;
280 }
281
282 void
283 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
284 int *width, int *height, int *depth)
285 {
286 switch (image->TexObject->Target) {
287 case GL_TEXTURE_1D_ARRAY:
288 *width = image->Width;
289 *height = 1;
290 *depth = image->Height;
291 break;
292 default:
293 *width = image->Width;
294 *height = image->Height;
295 *depth = image->Depth;
296 break;
297 }
298 }
299
300 /**
301 * Can the image be pulled into a unified mipmap tree? This mirrors
302 * the completeness test in a lot of ways.
303 *
304 * Not sure whether I want to pass gl_texture_image here.
305 */
306 bool
307 intel_miptree_match_image(struct intel_mipmap_tree *mt,
308 struct gl_texture_image *image)
309 {
310 struct intel_texture_image *intelImage = intel_texture_image(image);
311 GLuint level = intelImage->base.Base.Level;
312 int width, height, depth;
313
314 if (image->TexFormat != mt->format)
315 return false;
316
317 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
318
319 /* Test image dimensions against the base level image adjusted for
320 * minification. This will also catch images not present in the
321 * tree, changed targets, etc.
322 */
323 if (width != mt->level[level].width ||
324 height != mt->level[level].height ||
325 depth != mt->level[level].depth)
326 return false;
327
328 return true;
329 }
330
331
332 void
333 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
334 GLuint level,
335 GLuint x, GLuint y,
336 GLuint w, GLuint h, GLuint d)
337 {
338 mt->level[level].width = w;
339 mt->level[level].height = h;
340 mt->level[level].depth = d;
341 mt->level[level].level_x = x;
342 mt->level[level].level_y = y;
343
344 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
345 level, w, h, d, x, y);
346
347 assert(mt->level[level].slice == NULL);
348
349 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
350 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
351 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
352 }
353
354
355 void
356 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
357 GLuint level, GLuint img,
358 GLuint x, GLuint y)
359 {
360 if (img == 0 && level == 0)
361 assert(x == 0 && y == 0);
362
363 assert(img < mt->level[level].depth);
364
365 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
366 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
367
368 DBG("%s level %d img %d pos %d,%d\n",
369 __FUNCTION__, level, img,
370 mt->level[level].slice[img].x_offset,
371 mt->level[level].slice[img].y_offset);
372 }
373
374
375 /**
376 * For cube map textures, either the \c face parameter can be used, of course,
377 * or the cube face can be interpreted as a depth layer and the \c layer
378 * parameter used.
379 */
380 void
381 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
382 GLuint level, GLuint face, GLuint layer,
383 GLuint *x, GLuint *y)
384 {
385 int slice;
386
387 if (face > 0) {
388 assert(mt->target == GL_TEXTURE_CUBE_MAP);
389 assert(face < 6);
390 assert(layer == 0);
391 slice = face;
392 } else {
393 /* This branch may be taken even if the texture target is a cube map. In
394 * that case, the caller chose to interpret each cube face as a layer.
395 */
396 assert(face == 0);
397 slice = layer;
398 }
399
400 *x = mt->level[level].slice[slice].x_offset;
401 *y = mt->level[level].slice[slice].y_offset;
402 }
403
404 static void
405 intel_miptree_copy_slice(struct intel_context *intel,
406 struct intel_mipmap_tree *dst_mt,
407 struct intel_mipmap_tree *src_mt,
408 int level,
409 int face,
410 int depth)
411
412 {
413 gl_format format = src_mt->format;
414 uint32_t width = src_mt->level[level].width;
415 uint32_t height = src_mt->level[level].height;
416
417 assert(depth < src_mt->level[level].depth);
418
419 if (dst_mt->compressed) {
420 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
421 width = ALIGN(width, dst_mt->align_w);
422 }
423
424 uint32_t dst_x, dst_y, src_x, src_y;
425 intel_miptree_get_image_offset(dst_mt, level, face, depth,
426 &dst_x, &dst_y);
427 intel_miptree_get_image_offset(src_mt, level, face, depth,
428 &src_x, &src_y);
429
430 DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
431 src_mt, src_x, src_y, src_mt->region->pitch * src_mt->region->cpp,
432 dst_mt, dst_x, dst_y, dst_mt->region->pitch * dst_mt->region->cpp,
433 width, height);
434
435 if (!intelEmitCopyBlit(intel,
436 dst_mt->region->cpp,
437 src_mt->region->pitch, src_mt->region->bo,
438 0, src_mt->region->tiling,
439 dst_mt->region->pitch, dst_mt->region->bo,
440 0, dst_mt->region->tiling,
441 src_x, src_y,
442 dst_x, dst_y,
443 width, height,
444 GL_COPY)) {
445
446 fallback_debug("miptree validate blit for %s failed\n",
447 _mesa_get_format_name(format));
448 void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
449 void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
450
451 _mesa_copy_rect(dst,
452 dst_mt->cpp,
453 dst_mt->region->pitch,
454 dst_x, dst_y,
455 width, height,
456 src, src_mt->region->pitch,
457 src_x, src_y);
458
459 intel_region_unmap(intel, dst_mt->region);
460 intel_region_unmap(intel, src_mt->region);
461 }
462
463 if (src_mt->stencil_mt) {
464 intel_miptree_copy_slice(intel,
465 dst_mt->stencil_mt, src_mt->stencil_mt,
466 level, face, depth);
467 }
468 }
469
470 /**
471 * Copies the image's current data to the given miptree, and associates that
472 * miptree with the image.
473 */
474 void
475 intel_miptree_copy_teximage(struct intel_context *intel,
476 struct intel_texture_image *intelImage,
477 struct intel_mipmap_tree *dst_mt)
478 {
479 struct intel_mipmap_tree *src_mt = intelImage->mt;
480 int level = intelImage->base.Base.Level;
481 int face = intelImage->base.Base.Face;
482 GLuint depth = intelImage->base.Base.Depth;
483
484 for (int slice = 0; slice < depth; slice++) {
485 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
486 }
487
488 intel_miptree_reference(&intelImage->mt, dst_mt);
489 }
490
491 bool
492 intel_miptree_alloc_hiz(struct intel_context *intel,
493 struct intel_mipmap_tree *mt)
494 {
495 assert(mt->hiz_mt == NULL);
496 mt->hiz_mt = intel_miptree_create(intel,
497 mt->target,
498 MESA_FORMAT_X8_Z24,
499 mt->first_level,
500 mt->last_level,
501 mt->width0,
502 mt->height0,
503 mt->depth0,
504 true);
505
506 if (!mt->hiz_mt)
507 return false;
508
509 /* Mark that all slices need a HiZ resolve. */
510 struct intel_resolve_map *head = &mt->hiz_map;
511 for (int level = mt->first_level; level <= mt->last_level; ++level) {
512 for (int layer = 0; layer < mt->level[level].depth; ++layer) {
513 head->next = malloc(sizeof(*head->next));
514 head->next->prev = head;
515 head->next->next = NULL;
516 head = head->next;
517
518 head->level = level;
519 head->layer = layer;
520 head->need = INTEL_NEED_HIZ_RESOLVE;
521 }
522 }
523
524 return true;
525 }
526
527 void
528 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
529 uint32_t level,
530 uint32_t layer)
531 {
532 intel_miptree_check_level_layer(mt, level, layer);
533
534 if (!mt->hiz_mt)
535 return;
536
537 intel_resolve_map_set(&mt->hiz_map,
538 level, layer, INTEL_NEED_HIZ_RESOLVE);
539 }
540
541
542 void
543 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
544 uint32_t level,
545 uint32_t layer)
546 {
547 intel_miptree_check_level_layer(mt, level, layer);
548
549 if (!mt->hiz_mt)
550 return;
551
552 intel_resolve_map_set(&mt->hiz_map,
553 level, layer, INTEL_NEED_DEPTH_RESOLVE);
554 }
555
556 typedef void (*resolve_func_t)(struct intel_context *intel,
557 struct intel_mipmap_tree *mt,
558 uint32_t level,
559 uint32_t layer);
560
561 static bool
562 intel_miptree_slice_resolve(struct intel_context *intel,
563 struct intel_mipmap_tree *mt,
564 uint32_t level,
565 uint32_t layer,
566 enum intel_need_resolve need,
567 resolve_func_t func)
568 {
569 intel_miptree_check_level_layer(mt, level, layer);
570
571 struct intel_resolve_map *item =
572 intel_resolve_map_get(&mt->hiz_map, level, layer);
573
574 if (!item || item->need != need)
575 return false;
576
577 func(intel, mt, level, layer);
578 intel_resolve_map_remove(item);
579 return true;
580 }
581
582 bool
583 intel_miptree_slice_resolve_hiz(struct intel_context *intel,
584 struct intel_mipmap_tree *mt,
585 uint32_t level,
586 uint32_t layer)
587 {
588 return intel_miptree_slice_resolve(intel, mt, level, layer,
589 INTEL_NEED_HIZ_RESOLVE,
590 intel->vtbl.resolve_hiz_slice);
591 }
592
593 bool
594 intel_miptree_slice_resolve_depth(struct intel_context *intel,
595 struct intel_mipmap_tree *mt,
596 uint32_t level,
597 uint32_t layer)
598 {
599 return intel_miptree_slice_resolve(intel, mt, level, layer,
600 INTEL_NEED_DEPTH_RESOLVE,
601 intel->vtbl.resolve_depth_slice);
602 }
603
604 static bool
605 intel_miptree_all_slices_resolve(struct intel_context *intel,
606 struct intel_mipmap_tree *mt,
607 enum intel_need_resolve need,
608 resolve_func_t func)
609 {
610 bool did_resolve = false;
611 struct intel_resolve_map *i;
612
613 for (i = mt->hiz_map.next; i; i = i->next) {
614 if (i->need != need)
615 continue;
616 func(intel, mt, i->level, i->layer);
617 intel_resolve_map_remove(i);
618 did_resolve = true;
619 }
620
621 return did_resolve;
622 }
623
624 bool
625 intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
626 struct intel_mipmap_tree *mt)
627 {
628 return intel_miptree_all_slices_resolve(intel, mt,
629 INTEL_NEED_HIZ_RESOLVE,
630 intel->vtbl.resolve_hiz_slice);
631 }
632
633 bool
634 intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
635 struct intel_mipmap_tree *mt)
636 {
637 return intel_miptree_all_slices_resolve(intel, mt,
638 INTEL_NEED_DEPTH_RESOLVE,
639 intel->vtbl.resolve_depth_slice);
640 }
641
642 static void
643 intel_miptree_map_gtt(struct intel_context *intel,
644 struct intel_mipmap_tree *mt,
645 struct intel_miptree_map *map,
646 unsigned int level, unsigned int slice)
647 {
648 unsigned int bw, bh;
649 void *base;
650 unsigned int image_x, image_y;
651 int x = map->x;
652 int y = map->y;
653
654 /* For compressed formats, the stride is the number of bytes per
655 * row of blocks. intel_miptree_get_image_offset() already does
656 * the divide.
657 */
658 _mesa_get_format_block_size(mt->format, &bw, &bh);
659 assert(y % bh == 0);
660 y /= bh;
661
662 base = intel_region_map(intel, mt->region, map->mode);
663 /* Note that in the case of cube maps, the caller must have passed the slice
664 * number referencing the face.
665 */
666 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
667 x += image_x;
668 y += image_y;
669
670 map->stride = mt->region->pitch * mt->cpp;
671 map->ptr = base + y * map->stride + x * mt->cpp;
672
673 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
674 map->x, map->y, map->w, map->h,
675 mt, _mesa_get_format_name(mt->format),
676 x, y, map->ptr, map->stride);
677 }
678
679 static void
680 intel_miptree_unmap_gtt(struct intel_context *intel,
681 struct intel_mipmap_tree *mt,
682 struct intel_miptree_map *map,
683 unsigned int level,
684 unsigned int slice)
685 {
686 intel_region_unmap(intel, mt->region);
687 }
688
689 static void
690 intel_miptree_map_s8(struct intel_context *intel,
691 struct intel_mipmap_tree *mt,
692 struct intel_miptree_map *map,
693 unsigned int level, unsigned int slice)
694 {
695 map->stride = map->w;
696 map->buffer = map->ptr = malloc(map->stride * map->h);
697 if (!map->buffer)
698 return;
699
700 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
701 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
702 * invalidate is set, since we'll be writing the whole rectangle from our
703 * temporary buffer back out.
704 */
705 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
706 uint8_t *untiled_s8_map = map->ptr;
707 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
708 GL_MAP_READ_BIT);
709 unsigned int image_x, image_y;
710
711 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
712
713 for (uint32_t y = 0; y < map->h; y++) {
714 for (uint32_t x = 0; x < map->w; x++) {
715 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
716 x + image_x + map->x,
717 y + image_y + map->y);
718 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
719 }
720 }
721
722 intel_region_unmap(intel, mt->region);
723
724 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
725 map->x, map->y, map->w, map->h,
726 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
727 } else {
728 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
729 map->x, map->y, map->w, map->h,
730 mt, map->ptr, map->stride);
731 }
732 }
733
734 static void
735 intel_miptree_unmap_s8(struct intel_context *intel,
736 struct intel_mipmap_tree *mt,
737 struct intel_miptree_map *map,
738 unsigned int level,
739 unsigned int slice)
740 {
741 if (map->mode & GL_MAP_WRITE_BIT) {
742 unsigned int image_x, image_y;
743 uint8_t *untiled_s8_map = map->ptr;
744 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
745
746 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
747
748 for (uint32_t y = 0; y < map->h; y++) {
749 for (uint32_t x = 0; x < map->w; x++) {
750 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
751 x + map->x,
752 y + map->y);
753 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
754 }
755 }
756
757 intel_region_unmap(intel, mt->region);
758 }
759
760 free(map->buffer);
761 }
762
763 /**
764 * Mapping function for packed depth/stencil miptrees backed by real separate
765 * miptrees for depth and stencil.
766 *
767 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
768 * separate from the depth buffer. Yet at the GL API level, we have to expose
769 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
770 * be able to map that memory for texture storage and glReadPixels-type
771 * operations. We give Mesa core that access by mallocing a temporary and
772 * copying the data between the actual backing store and the temporary.
773 */
774 static void
775 intel_miptree_map_depthstencil(struct intel_context *intel,
776 struct intel_mipmap_tree *mt,
777 struct intel_miptree_map *map,
778 unsigned int level, unsigned int slice)
779 {
780 struct intel_mipmap_tree *z_mt = mt;
781 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
782 int packed_bpp = 4;
783
784 map->stride = map->w * packed_bpp;
785 map->buffer = map->ptr = malloc(map->stride * map->h);
786 if (!map->buffer)
787 return;
788
789 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
790 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
791 * invalidate is set, since we'll be writing the whole rectangle from our
792 * temporary buffer back out.
793 */
794 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
795 uint32_t *packed_map = map->ptr;
796 uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
797 uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
798 unsigned int s_image_x, s_image_y;
799 unsigned int z_image_x, z_image_y;
800
801 intel_miptree_get_image_offset(s_mt, level, 0, slice,
802 &s_image_x, &s_image_y);
803 intel_miptree_get_image_offset(z_mt, level, 0, slice,
804 &z_image_x, &z_image_y);
805
806 for (uint32_t y = 0; y < map->h; y++) {
807 for (uint32_t x = 0; x < map->w; x++) {
808 int map_x = map->x + x, map_y = map->y + y;
809 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
810 map_x + s_image_x,
811 map_y + s_image_y);
812 ptrdiff_t z_offset = ((map_y + z_image_y) * z_mt->region->pitch +
813 (map_x + z_image_x));
814 uint8_t s = s_map[s_offset];
815 uint32_t z = z_map[z_offset];
816
817 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
818 }
819 }
820
821 intel_region_unmap(intel, s_mt->region);
822 intel_region_unmap(intel, z_mt->region);
823
824 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
825 __FUNCTION__,
826 map->x, map->y, map->w, map->h,
827 z_mt, map->x + z_image_x, map->y + z_image_y,
828 s_mt, map->x + s_image_x, map->y + s_image_y,
829 map->ptr, map->stride);
830 } else {
831 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
832 map->x, map->y, map->w, map->h,
833 mt, map->ptr, map->stride);
834 }
835 }
836
837 static void
838 intel_miptree_unmap_depthstencil(struct intel_context *intel,
839 struct intel_mipmap_tree *mt,
840 struct intel_miptree_map *map,
841 unsigned int level,
842 unsigned int slice)
843 {
844 struct intel_mipmap_tree *z_mt = mt;
845 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
846
847 if (map->mode & GL_MAP_WRITE_BIT) {
848 uint32_t *packed_map = map->ptr;
849 uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
850 uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
851 unsigned int s_image_x, s_image_y;
852 unsigned int z_image_x, z_image_y;
853
854 intel_miptree_get_image_offset(s_mt, level, 0, slice,
855 &s_image_x, &s_image_y);
856 intel_miptree_get_image_offset(z_mt, level, 0, slice,
857 &z_image_x, &z_image_y);
858
859 for (uint32_t y = 0; y < map->h; y++) {
860 for (uint32_t x = 0; x < map->w; x++) {
861 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
862 x + s_image_x + map->x,
863 y + s_image_y + map->y);
864 ptrdiff_t z_offset = ((y + z_image_y) * z_mt->region->pitch +
865 (x + z_image_x));
866 uint32_t packed = packed_map[y * map->w + x];
867
868 s_map[s_offset] = packed >> 24;
869 z_map[z_offset] = packed;
870 }
871 }
872
873 intel_region_unmap(intel, s_mt->region);
874 intel_region_unmap(intel, z_mt->region);
875
876 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
877 __FUNCTION__,
878 map->x, map->y, map->w, map->h,
879 z_mt, _mesa_get_format_name(z_mt->format),
880 map->x + z_image_x, map->y + z_image_y,
881 s_mt, map->x + s_image_x, map->y + s_image_y,
882 map->ptr, map->stride);
883 }
884
885 free(map->buffer);
886 }
887
888 void
889 intel_miptree_map(struct intel_context *intel,
890 struct intel_mipmap_tree *mt,
891 unsigned int level,
892 unsigned int slice,
893 unsigned int x,
894 unsigned int y,
895 unsigned int w,
896 unsigned int h,
897 GLbitfield mode,
898 void **out_ptr,
899 int *out_stride)
900 {
901 struct intel_miptree_map *map;
902
903 map = calloc(1, sizeof(struct intel_miptree_map));
904 if (!map){
905 *out_ptr = NULL;
906 *out_stride = 0;
907 return;
908 }
909
910 assert(!mt->level[level].slice[slice].map);
911 mt->level[level].slice[slice].map = map;
912 map->mode = mode;
913 map->x = x;
914 map->y = y;
915 map->w = w;
916 map->h = h;
917
918 intel_miptree_slice_resolve_depth(intel, mt, level, slice);
919 if (map->mode & GL_MAP_WRITE_BIT) {
920 intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
921 }
922
923 if (mt->format == MESA_FORMAT_S8) {
924 intel_miptree_map_s8(intel, mt, map, level, slice);
925 } else if (mt->stencil_mt) {
926 intel_miptree_map_depthstencil(intel, mt, map, level, slice);
927 } else {
928 intel_miptree_map_gtt(intel, mt, map, level, slice);
929 }
930
931 *out_ptr = map->ptr;
932 *out_stride = map->stride;
933 }
934
935 void
936 intel_miptree_unmap(struct intel_context *intel,
937 struct intel_mipmap_tree *mt,
938 unsigned int level,
939 unsigned int slice)
940 {
941 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
942
943 if (!map)
944 return;
945
946 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
947 mt, _mesa_get_format_name(mt->format), level, slice);
948
949 if (mt->format == MESA_FORMAT_S8) {
950 intel_miptree_unmap_s8(intel, mt, map, level, slice);
951 } else if (mt->stencil_mt) {
952 intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
953 } else {
954 intel_miptree_unmap_gtt(intel, mt, map, level, slice);
955 }
956
957 mt->level[level].slice[slice].map = NULL;
958 free(map);
959 }