i965: Properly demote the depth mt format for fake packed depth/stencil.
[mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "intel_batchbuffer.h"
29 #include "intel_context.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_regions.h"
32 #include "intel_resolve_map.h"
33 #include "intel_span.h"
34 #include "intel_tex_layout.h"
35 #include "intel_tex.h"
36 #include "intel_blit.h"
37
38 #include "main/enums.h"
39 #include "main/formats.h"
40 #include "main/image.h"
41 #include "main/teximage.h"
42
43 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
44
45 static GLenum
46 target_to_target(GLenum target)
47 {
48 switch (target) {
49 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
50 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
51 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
52 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
53 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
54 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
55 return GL_TEXTURE_CUBE_MAP_ARB;
56 default:
57 return target;
58 }
59 }
60
61 static struct intel_mipmap_tree *
62 intel_miptree_create_internal(struct intel_context *intel,
63 GLenum target,
64 gl_format format,
65 GLuint first_level,
66 GLuint last_level,
67 GLuint width0,
68 GLuint height0,
69 GLuint depth0)
70 {
71 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
72 int compress_byte = 0;
73
74 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
75 _mesa_lookup_enum_by_nr(target),
76 _mesa_get_format_name(format),
77 first_level, last_level, mt);
78
79 if (_mesa_is_format_compressed(format))
80 compress_byte = intel_compressed_num_bytes(format);
81
82 mt->target = target_to_target(target);
83 mt->format = format;
84 mt->first_level = first_level;
85 mt->last_level = last_level;
86 mt->width0 = width0;
87 mt->height0 = height0;
88 mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
89 mt->compressed = compress_byte ? 1 : 0;
90 mt->refcount = 1;
91
92 intel_get_texture_alignment_unit(intel, format,
93 &mt->align_w, &mt->align_h);
94
95 if (target == GL_TEXTURE_CUBE_MAP) {
96 assert(depth0 == 1);
97 mt->depth0 = 6;
98 } else {
99 mt->depth0 = depth0;
100 }
101
102 if (format == MESA_FORMAT_S8) {
103 /* The stencil buffer has quirky pitch requirements. From Vol 2a,
104 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
105 * The pitch must be set to 2x the value computed based on width, as
106 * the stencil buffer is stored with two rows interleaved.
107 */
108 assert(intel->has_separate_stencil);
109 mt->cpp = 2;
110 }
111
112 #ifdef I915
113 (void) intel;
114 if (intel->is_945)
115 i945_miptree_layout(mt);
116 else
117 i915_miptree_layout(mt);
118 #else
119 brw_miptree_layout(intel, mt);
120 #endif
121
122 if (_mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
123 (intel->must_use_separate_stencil ||
124 (intel->has_separate_stencil &&
125 intel->vtbl.is_hiz_depth_format(intel, format)))) {
126 mt->stencil_mt = intel_miptree_create(intel,
127 mt->target,
128 MESA_FORMAT_S8,
129 mt->first_level,
130 mt->last_level,
131 mt->width0,
132 mt->height0,
133 mt->depth0,
134 true);
135 if (!mt->stencil_mt) {
136 intel_miptree_release(&mt);
137 return NULL;
138 }
139
140 /* Fix up the Z miptree format for how we're splitting out separate
141 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
142 */
143 if (mt->format == MESA_FORMAT_S8_Z24) {
144 mt->format = MESA_FORMAT_X8_Z24;
145 } else {
146 _mesa_problem("Unknown format %s in separate stencil\n",
147 _mesa_get_format_name(mt->format));
148 }
149 }
150
151 return mt;
152 }
153
154
155 struct intel_mipmap_tree *
156 intel_miptree_create(struct intel_context *intel,
157 GLenum target,
158 gl_format format,
159 GLuint first_level,
160 GLuint last_level,
161 GLuint width0,
162 GLuint height0,
163 GLuint depth0,
164 bool expect_accelerated_upload)
165 {
166 struct intel_mipmap_tree *mt;
167 uint32_t tiling = I915_TILING_NONE;
168 GLenum base_format = _mesa_get_format_base_format(format);
169
170 if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
171 if (intel->gen >= 4 &&
172 (base_format == GL_DEPTH_COMPONENT ||
173 base_format == GL_DEPTH_STENCIL_EXT))
174 tiling = I915_TILING_Y;
175 else if (width0 >= 64)
176 tiling = I915_TILING_X;
177 }
178
179 if (format == MESA_FORMAT_S8) {
180 /* The stencil buffer is W tiled. However, we request from the kernel a
181 * non-tiled buffer because the GTT is incapable of W fencing.
182 *
183 * The stencil buffer has quirky pitch requirements. From Vol 2a,
184 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
185 * The pitch must be set to 2x the value computed based on width, as
186 * the stencil buffer is stored with two rows interleaved.
187 * To accomplish this, we resort to the nasty hack of doubling the drm
188 * region's cpp and halving its height.
189 *
190 * If we neglect to double the pitch, then render corruption occurs.
191 */
192 tiling = I915_TILING_NONE;
193 width0 = ALIGN(width0, 64);
194 height0 = ALIGN((height0 + 1) / 2, 64);
195 }
196
197 mt = intel_miptree_create_internal(intel, target, format,
198 first_level, last_level, width0,
199 height0, depth0);
200 /*
201 * pitch == 0 || height == 0 indicates the null texture
202 */
203 if (!mt || !mt->total_width || !mt->total_height) {
204 free(mt);
205 return NULL;
206 }
207
208 mt->region = intel_region_alloc(intel->intelScreen,
209 tiling,
210 mt->cpp,
211 mt->total_width,
212 mt->total_height,
213 expect_accelerated_upload);
214
215 if (!mt->region) {
216 free(mt);
217 return NULL;
218 }
219
220 return mt;
221 }
222
223
224 struct intel_mipmap_tree *
225 intel_miptree_create_for_region(struct intel_context *intel,
226 GLenum target,
227 gl_format format,
228 struct intel_region *region)
229 {
230 struct intel_mipmap_tree *mt;
231
232 mt = intel_miptree_create_internal(intel, target, format,
233 0, 0,
234 region->width, region->height, 1);
235 if (!mt)
236 return mt;
237
238 intel_region_reference(&mt->region, region);
239
240 return mt;
241 }
242
243 struct intel_mipmap_tree*
244 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
245 gl_format format,
246 uint32_t width,
247 uint32_t height)
248 {
249 struct intel_mipmap_tree *mt;
250
251 mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
252 width, height, 1, true);
253
254 return mt;
255 }
256
257 void
258 intel_miptree_reference(struct intel_mipmap_tree **dst,
259 struct intel_mipmap_tree *src)
260 {
261 if (*dst == src)
262 return;
263
264 intel_miptree_release(dst);
265
266 if (src) {
267 src->refcount++;
268 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
269 }
270
271 *dst = src;
272 }
273
274
275 void
276 intel_miptree_release(struct intel_mipmap_tree **mt)
277 {
278 if (!*mt)
279 return;
280
281 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
282 if (--(*mt)->refcount <= 0) {
283 GLuint i;
284
285 DBG("%s deleting %p\n", __FUNCTION__, *mt);
286
287 intel_region_release(&((*mt)->region));
288 intel_miptree_release(&(*mt)->stencil_mt);
289 intel_miptree_release(&(*mt)->hiz_mt);
290 intel_resolve_map_clear(&(*mt)->hiz_map);
291
292 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
293 free((*mt)->level[i].slice);
294 }
295
296 free(*mt);
297 }
298 *mt = NULL;
299 }
300
301 void
302 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
303 int *width, int *height, int *depth)
304 {
305 switch (image->TexObject->Target) {
306 case GL_TEXTURE_1D_ARRAY:
307 *width = image->Width;
308 *height = 1;
309 *depth = image->Height;
310 break;
311 default:
312 *width = image->Width;
313 *height = image->Height;
314 *depth = image->Depth;
315 break;
316 }
317 }
318
319 /**
320 * Can the image be pulled into a unified mipmap tree? This mirrors
321 * the completeness test in a lot of ways.
322 *
323 * Not sure whether I want to pass gl_texture_image here.
324 */
325 bool
326 intel_miptree_match_image(struct intel_mipmap_tree *mt,
327 struct gl_texture_image *image)
328 {
329 struct intel_texture_image *intelImage = intel_texture_image(image);
330 GLuint level = intelImage->base.Base.Level;
331 int width, height, depth;
332
333 if (image->TexFormat != mt->format &&
334 !(image->TexFormat == MESA_FORMAT_S8_Z24 &&
335 mt->format == MESA_FORMAT_X8_Z24 &&
336 mt->stencil_mt)) {
337 return false;
338 }
339
340 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
341
342 /* Test image dimensions against the base level image adjusted for
343 * minification. This will also catch images not present in the
344 * tree, changed targets, etc.
345 */
346 if (width != mt->level[level].width ||
347 height != mt->level[level].height ||
348 depth != mt->level[level].depth)
349 return false;
350
351 return true;
352 }
353
354
355 void
356 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
357 GLuint level,
358 GLuint x, GLuint y,
359 GLuint w, GLuint h, GLuint d)
360 {
361 mt->level[level].width = w;
362 mt->level[level].height = h;
363 mt->level[level].depth = d;
364 mt->level[level].level_x = x;
365 mt->level[level].level_y = y;
366
367 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
368 level, w, h, d, x, y);
369
370 assert(mt->level[level].slice == NULL);
371
372 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
373 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
374 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
375 }
376
377
378 void
379 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
380 GLuint level, GLuint img,
381 GLuint x, GLuint y)
382 {
383 if (img == 0 && level == 0)
384 assert(x == 0 && y == 0);
385
386 assert(img < mt->level[level].depth);
387
388 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
389 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
390
391 DBG("%s level %d img %d pos %d,%d\n",
392 __FUNCTION__, level, img,
393 mt->level[level].slice[img].x_offset,
394 mt->level[level].slice[img].y_offset);
395 }
396
397
398 /**
399 * For cube map textures, either the \c face parameter can be used, of course,
400 * or the cube face can be interpreted as a depth layer and the \c layer
401 * parameter used.
402 */
403 void
404 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
405 GLuint level, GLuint face, GLuint layer,
406 GLuint *x, GLuint *y)
407 {
408 int slice;
409
410 if (face > 0) {
411 assert(mt->target == GL_TEXTURE_CUBE_MAP);
412 assert(face < 6);
413 assert(layer == 0);
414 slice = face;
415 } else {
416 /* This branch may be taken even if the texture target is a cube map. In
417 * that case, the caller chose to interpret each cube face as a layer.
418 */
419 assert(face == 0);
420 slice = layer;
421 }
422
423 *x = mt->level[level].slice[slice].x_offset;
424 *y = mt->level[level].slice[slice].y_offset;
425 }
426
427 static void
428 intel_miptree_copy_slice(struct intel_context *intel,
429 struct intel_mipmap_tree *dst_mt,
430 struct intel_mipmap_tree *src_mt,
431 int level,
432 int face,
433 int depth)
434
435 {
436 gl_format format = src_mt->format;
437 uint32_t width = src_mt->level[level].width;
438 uint32_t height = src_mt->level[level].height;
439
440 assert(depth < src_mt->level[level].depth);
441
442 if (dst_mt->compressed) {
443 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
444 width = ALIGN(width, dst_mt->align_w);
445 }
446
447 uint32_t dst_x, dst_y, src_x, src_y;
448 intel_miptree_get_image_offset(dst_mt, level, face, depth,
449 &dst_x, &dst_y);
450 intel_miptree_get_image_offset(src_mt, level, face, depth,
451 &src_x, &src_y);
452
453 DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
454 src_mt, src_x, src_y, src_mt->region->pitch * src_mt->region->cpp,
455 dst_mt, dst_x, dst_y, dst_mt->region->pitch * dst_mt->region->cpp,
456 width, height);
457
458 if (!intelEmitCopyBlit(intel,
459 dst_mt->region->cpp,
460 src_mt->region->pitch, src_mt->region->bo,
461 0, src_mt->region->tiling,
462 dst_mt->region->pitch, dst_mt->region->bo,
463 0, dst_mt->region->tiling,
464 src_x, src_y,
465 dst_x, dst_y,
466 width, height,
467 GL_COPY)) {
468
469 fallback_debug("miptree validate blit for %s failed\n",
470 _mesa_get_format_name(format));
471 void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
472 void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
473
474 _mesa_copy_rect(dst,
475 dst_mt->cpp,
476 dst_mt->region->pitch,
477 dst_x, dst_y,
478 width, height,
479 src, src_mt->region->pitch,
480 src_x, src_y);
481
482 intel_region_unmap(intel, dst_mt->region);
483 intel_region_unmap(intel, src_mt->region);
484 }
485
486 if (src_mt->stencil_mt) {
487 intel_miptree_copy_slice(intel,
488 dst_mt->stencil_mt, src_mt->stencil_mt,
489 level, face, depth);
490 }
491 }
492
493 /**
494 * Copies the image's current data to the given miptree, and associates that
495 * miptree with the image.
496 */
497 void
498 intel_miptree_copy_teximage(struct intel_context *intel,
499 struct intel_texture_image *intelImage,
500 struct intel_mipmap_tree *dst_mt)
501 {
502 struct intel_mipmap_tree *src_mt = intelImage->mt;
503 int level = intelImage->base.Base.Level;
504 int face = intelImage->base.Base.Face;
505 GLuint depth = intelImage->base.Base.Depth;
506
507 for (int slice = 0; slice < depth; slice++) {
508 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
509 }
510
511 intel_miptree_reference(&intelImage->mt, dst_mt);
512 }
513
514 bool
515 intel_miptree_alloc_hiz(struct intel_context *intel,
516 struct intel_mipmap_tree *mt)
517 {
518 assert(mt->hiz_mt == NULL);
519 mt->hiz_mt = intel_miptree_create(intel,
520 mt->target,
521 MESA_FORMAT_X8_Z24,
522 mt->first_level,
523 mt->last_level,
524 mt->width0,
525 mt->height0,
526 mt->depth0,
527 true);
528
529 if (!mt->hiz_mt)
530 return false;
531
532 /* Mark that all slices need a HiZ resolve. */
533 struct intel_resolve_map *head = &mt->hiz_map;
534 for (int level = mt->first_level; level <= mt->last_level; ++level) {
535 for (int layer = 0; layer < mt->level[level].depth; ++layer) {
536 head->next = malloc(sizeof(*head->next));
537 head->next->prev = head;
538 head->next->next = NULL;
539 head = head->next;
540
541 head->level = level;
542 head->layer = layer;
543 head->need = INTEL_NEED_HIZ_RESOLVE;
544 }
545 }
546
547 return true;
548 }
549
550 void
551 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
552 uint32_t level,
553 uint32_t layer)
554 {
555 intel_miptree_check_level_layer(mt, level, layer);
556
557 if (!mt->hiz_mt)
558 return;
559
560 intel_resolve_map_set(&mt->hiz_map,
561 level, layer, INTEL_NEED_HIZ_RESOLVE);
562 }
563
564
565 void
566 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
567 uint32_t level,
568 uint32_t layer)
569 {
570 intel_miptree_check_level_layer(mt, level, layer);
571
572 if (!mt->hiz_mt)
573 return;
574
575 intel_resolve_map_set(&mt->hiz_map,
576 level, layer, INTEL_NEED_DEPTH_RESOLVE);
577 }
578
579 typedef void (*resolve_func_t)(struct intel_context *intel,
580 struct intel_mipmap_tree *mt,
581 uint32_t level,
582 uint32_t layer);
583
584 static bool
585 intel_miptree_slice_resolve(struct intel_context *intel,
586 struct intel_mipmap_tree *mt,
587 uint32_t level,
588 uint32_t layer,
589 enum intel_need_resolve need,
590 resolve_func_t func)
591 {
592 intel_miptree_check_level_layer(mt, level, layer);
593
594 struct intel_resolve_map *item =
595 intel_resolve_map_get(&mt->hiz_map, level, layer);
596
597 if (!item || item->need != need)
598 return false;
599
600 func(intel, mt, level, layer);
601 intel_resolve_map_remove(item);
602 return true;
603 }
604
605 bool
606 intel_miptree_slice_resolve_hiz(struct intel_context *intel,
607 struct intel_mipmap_tree *mt,
608 uint32_t level,
609 uint32_t layer)
610 {
611 return intel_miptree_slice_resolve(intel, mt, level, layer,
612 INTEL_NEED_HIZ_RESOLVE,
613 intel->vtbl.resolve_hiz_slice);
614 }
615
616 bool
617 intel_miptree_slice_resolve_depth(struct intel_context *intel,
618 struct intel_mipmap_tree *mt,
619 uint32_t level,
620 uint32_t layer)
621 {
622 return intel_miptree_slice_resolve(intel, mt, level, layer,
623 INTEL_NEED_DEPTH_RESOLVE,
624 intel->vtbl.resolve_depth_slice);
625 }
626
627 static bool
628 intel_miptree_all_slices_resolve(struct intel_context *intel,
629 struct intel_mipmap_tree *mt,
630 enum intel_need_resolve need,
631 resolve_func_t func)
632 {
633 bool did_resolve = false;
634 struct intel_resolve_map *i;
635
636 for (i = mt->hiz_map.next; i; i = i->next) {
637 if (i->need != need)
638 continue;
639 func(intel, mt, i->level, i->layer);
640 intel_resolve_map_remove(i);
641 did_resolve = true;
642 }
643
644 return did_resolve;
645 }
646
647 bool
648 intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
649 struct intel_mipmap_tree *mt)
650 {
651 return intel_miptree_all_slices_resolve(intel, mt,
652 INTEL_NEED_HIZ_RESOLVE,
653 intel->vtbl.resolve_hiz_slice);
654 }
655
656 bool
657 intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
658 struct intel_mipmap_tree *mt)
659 {
660 return intel_miptree_all_slices_resolve(intel, mt,
661 INTEL_NEED_DEPTH_RESOLVE,
662 intel->vtbl.resolve_depth_slice);
663 }
664
665 static void
666 intel_miptree_map_gtt(struct intel_context *intel,
667 struct intel_mipmap_tree *mt,
668 struct intel_miptree_map *map,
669 unsigned int level, unsigned int slice)
670 {
671 unsigned int bw, bh;
672 void *base;
673 unsigned int image_x, image_y;
674 int x = map->x;
675 int y = map->y;
676
677 /* For compressed formats, the stride is the number of bytes per
678 * row of blocks. intel_miptree_get_image_offset() already does
679 * the divide.
680 */
681 _mesa_get_format_block_size(mt->format, &bw, &bh);
682 assert(y % bh == 0);
683 y /= bh;
684
685 base = intel_region_map(intel, mt->region, map->mode);
686 /* Note that in the case of cube maps, the caller must have passed the slice
687 * number referencing the face.
688 */
689 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
690 x += image_x;
691 y += image_y;
692
693 map->stride = mt->region->pitch * mt->cpp;
694 map->ptr = base + y * map->stride + x * mt->cpp;
695
696 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
697 map->x, map->y, map->w, map->h,
698 mt, _mesa_get_format_name(mt->format),
699 x, y, map->ptr, map->stride);
700 }
701
702 static void
703 intel_miptree_unmap_gtt(struct intel_context *intel,
704 struct intel_mipmap_tree *mt,
705 struct intel_miptree_map *map,
706 unsigned int level,
707 unsigned int slice)
708 {
709 intel_region_unmap(intel, mt->region);
710 }
711
712 static void
713 intel_miptree_map_blit(struct intel_context *intel,
714 struct intel_mipmap_tree *mt,
715 struct intel_miptree_map *map,
716 unsigned int level, unsigned int slice)
717 {
718 unsigned int image_x, image_y;
719 int x = map->x;
720 int y = map->y;
721 int ret;
722
723 /* The blitter requires the pitch to be aligned to 4. */
724 map->stride = ALIGN(map->w * mt->region->cpp, 4);
725
726 map->bo = drm_intel_bo_alloc(intel->bufmgr, "intel_miptree_map_blit() temp",
727 map->stride * map->h, 4096);
728 if (!map->bo) {
729 fprintf(stderr, "Failed to allocate blit temporary\n");
730 goto fail;
731 }
732
733 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
734 x += image_x;
735 y += image_y;
736
737 if (!intelEmitCopyBlit(intel,
738 mt->region->cpp,
739 mt->region->pitch, mt->region->bo,
740 0, mt->region->tiling,
741 map->stride / mt->region->cpp, map->bo,
742 0, I915_TILING_NONE,
743 x, y,
744 0, 0,
745 map->w, map->h,
746 GL_COPY)) {
747 fprintf(stderr, "Failed to blit\n");
748 goto fail;
749 }
750
751 intel_batchbuffer_flush(intel);
752 ret = drm_intel_bo_map(map->bo, (map->mode & GL_MAP_WRITE_BIT) != 0);
753 if (ret) {
754 fprintf(stderr, "Failed to map blit temporary\n");
755 goto fail;
756 }
757
758 map->ptr = map->bo->virtual;
759
760 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
761 map->x, map->y, map->w, map->h,
762 mt, _mesa_get_format_name(mt->format),
763 x, y, map->ptr, map->stride);
764
765 return;
766
767 fail:
768 drm_intel_bo_unreference(map->bo);
769 map->ptr = NULL;
770 map->stride = 0;
771 }
772
773 static void
774 intel_miptree_unmap_blit(struct intel_context *intel,
775 struct intel_mipmap_tree *mt,
776 struct intel_miptree_map *map,
777 unsigned int level,
778 unsigned int slice)
779 {
780 assert(!(map->mode & GL_MAP_WRITE_BIT));
781
782 drm_intel_bo_unmap(map->bo);
783 drm_intel_bo_unreference(map->bo);
784 }
785
786 static void
787 intel_miptree_map_s8(struct intel_context *intel,
788 struct intel_mipmap_tree *mt,
789 struct intel_miptree_map *map,
790 unsigned int level, unsigned int slice)
791 {
792 map->stride = map->w;
793 map->buffer = map->ptr = malloc(map->stride * map->h);
794 if (!map->buffer)
795 return;
796
797 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
798 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
799 * invalidate is set, since we'll be writing the whole rectangle from our
800 * temporary buffer back out.
801 */
802 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
803 uint8_t *untiled_s8_map = map->ptr;
804 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
805 GL_MAP_READ_BIT);
806 unsigned int image_x, image_y;
807
808 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
809
810 for (uint32_t y = 0; y < map->h; y++) {
811 for (uint32_t x = 0; x < map->w; x++) {
812 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
813 x + image_x + map->x,
814 y + image_y + map->y);
815 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
816 }
817 }
818
819 intel_region_unmap(intel, mt->region);
820
821 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
822 map->x, map->y, map->w, map->h,
823 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
824 } else {
825 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
826 map->x, map->y, map->w, map->h,
827 mt, map->ptr, map->stride);
828 }
829 }
830
831 static void
832 intel_miptree_unmap_s8(struct intel_context *intel,
833 struct intel_mipmap_tree *mt,
834 struct intel_miptree_map *map,
835 unsigned int level,
836 unsigned int slice)
837 {
838 if (map->mode & GL_MAP_WRITE_BIT) {
839 unsigned int image_x, image_y;
840 uint8_t *untiled_s8_map = map->ptr;
841 uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
842
843 intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
844
845 for (uint32_t y = 0; y < map->h; y++) {
846 for (uint32_t x = 0; x < map->w; x++) {
847 ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
848 x + map->x,
849 y + map->y);
850 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
851 }
852 }
853
854 intel_region_unmap(intel, mt->region);
855 }
856
857 free(map->buffer);
858 }
859
860 /**
861 * Mapping function for packed depth/stencil miptrees backed by real separate
862 * miptrees for depth and stencil.
863 *
864 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
865 * separate from the depth buffer. Yet at the GL API level, we have to expose
866 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
867 * be able to map that memory for texture storage and glReadPixels-type
868 * operations. We give Mesa core that access by mallocing a temporary and
869 * copying the data between the actual backing store and the temporary.
870 */
871 static void
872 intel_miptree_map_depthstencil(struct intel_context *intel,
873 struct intel_mipmap_tree *mt,
874 struct intel_miptree_map *map,
875 unsigned int level, unsigned int slice)
876 {
877 struct intel_mipmap_tree *z_mt = mt;
878 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
879 int packed_bpp = 4;
880
881 map->stride = map->w * packed_bpp;
882 map->buffer = map->ptr = malloc(map->stride * map->h);
883 if (!map->buffer)
884 return;
885
886 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
887 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
888 * invalidate is set, since we'll be writing the whole rectangle from our
889 * temporary buffer back out.
890 */
891 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
892 uint32_t *packed_map = map->ptr;
893 uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
894 uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
895 unsigned int s_image_x, s_image_y;
896 unsigned int z_image_x, z_image_y;
897
898 intel_miptree_get_image_offset(s_mt, level, 0, slice,
899 &s_image_x, &s_image_y);
900 intel_miptree_get_image_offset(z_mt, level, 0, slice,
901 &z_image_x, &z_image_y);
902
903 for (uint32_t y = 0; y < map->h; y++) {
904 for (uint32_t x = 0; x < map->w; x++) {
905 int map_x = map->x + x, map_y = map->y + y;
906 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
907 map_x + s_image_x,
908 map_y + s_image_y);
909 ptrdiff_t z_offset = ((map_y + z_image_y) * z_mt->region->pitch +
910 (map_x + z_image_x));
911 uint8_t s = s_map[s_offset];
912 uint32_t z = z_map[z_offset];
913
914 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
915 }
916 }
917
918 intel_region_unmap(intel, s_mt->region);
919 intel_region_unmap(intel, z_mt->region);
920
921 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
922 __FUNCTION__,
923 map->x, map->y, map->w, map->h,
924 z_mt, map->x + z_image_x, map->y + z_image_y,
925 s_mt, map->x + s_image_x, map->y + s_image_y,
926 map->ptr, map->stride);
927 } else {
928 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
929 map->x, map->y, map->w, map->h,
930 mt, map->ptr, map->stride);
931 }
932 }
933
934 static void
935 intel_miptree_unmap_depthstencil(struct intel_context *intel,
936 struct intel_mipmap_tree *mt,
937 struct intel_miptree_map *map,
938 unsigned int level,
939 unsigned int slice)
940 {
941 struct intel_mipmap_tree *z_mt = mt;
942 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
943
944 if (map->mode & GL_MAP_WRITE_BIT) {
945 uint32_t *packed_map = map->ptr;
946 uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
947 uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
948 unsigned int s_image_x, s_image_y;
949 unsigned int z_image_x, z_image_y;
950
951 intel_miptree_get_image_offset(s_mt, level, 0, slice,
952 &s_image_x, &s_image_y);
953 intel_miptree_get_image_offset(z_mt, level, 0, slice,
954 &z_image_x, &z_image_y);
955
956 for (uint32_t y = 0; y < map->h; y++) {
957 for (uint32_t x = 0; x < map->w; x++) {
958 ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
959 x + s_image_x + map->x,
960 y + s_image_y + map->y);
961 ptrdiff_t z_offset = ((y + z_image_y) * z_mt->region->pitch +
962 (x + z_image_x));
963 uint32_t packed = packed_map[y * map->w + x];
964
965 s_map[s_offset] = packed >> 24;
966 z_map[z_offset] = packed;
967 }
968 }
969
970 intel_region_unmap(intel, s_mt->region);
971 intel_region_unmap(intel, z_mt->region);
972
973 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
974 __FUNCTION__,
975 map->x, map->y, map->w, map->h,
976 z_mt, _mesa_get_format_name(z_mt->format),
977 map->x + z_image_x, map->y + z_image_y,
978 s_mt, map->x + s_image_x, map->y + s_image_y,
979 map->ptr, map->stride);
980 }
981
982 free(map->buffer);
983 }
984
985 void
986 intel_miptree_map(struct intel_context *intel,
987 struct intel_mipmap_tree *mt,
988 unsigned int level,
989 unsigned int slice,
990 unsigned int x,
991 unsigned int y,
992 unsigned int w,
993 unsigned int h,
994 GLbitfield mode,
995 void **out_ptr,
996 int *out_stride)
997 {
998 struct intel_miptree_map *map;
999
1000 map = calloc(1, sizeof(struct intel_miptree_map));
1001 if (!map){
1002 *out_ptr = NULL;
1003 *out_stride = 0;
1004 return;
1005 }
1006
1007 assert(!mt->level[level].slice[slice].map);
1008 mt->level[level].slice[slice].map = map;
1009 map->mode = mode;
1010 map->x = x;
1011 map->y = y;
1012 map->w = w;
1013 map->h = h;
1014
1015 intel_miptree_slice_resolve_depth(intel, mt, level, slice);
1016 if (map->mode & GL_MAP_WRITE_BIT) {
1017 intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
1018 }
1019
1020 if (mt->format == MESA_FORMAT_S8) {
1021 intel_miptree_map_s8(intel, mt, map, level, slice);
1022 } else if (mt->stencil_mt) {
1023 intel_miptree_map_depthstencil(intel, mt, map, level, slice);
1024 } else if (intel->gen >= 6 &&
1025 !(mode & GL_MAP_WRITE_BIT) &&
1026 !mt->compressed &&
1027 mt->region->tiling == I915_TILING_X) {
1028 intel_miptree_map_blit(intel, mt, map, level, slice);
1029 } else {
1030 intel_miptree_map_gtt(intel, mt, map, level, slice);
1031 }
1032
1033 *out_ptr = map->ptr;
1034 *out_stride = map->stride;
1035 }
1036
1037 void
1038 intel_miptree_unmap(struct intel_context *intel,
1039 struct intel_mipmap_tree *mt,
1040 unsigned int level,
1041 unsigned int slice)
1042 {
1043 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1044
1045 if (!map)
1046 return;
1047
1048 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
1049 mt, _mesa_get_format_name(mt->format), level, slice);
1050
1051 if (mt->format == MESA_FORMAT_S8) {
1052 intel_miptree_unmap_s8(intel, mt, map, level, slice);
1053 } else if (mt->stencil_mt) {
1054 intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
1055 } else if (map->bo) {
1056 intel_miptree_unmap_blit(intel, mt, map, level, slice);
1057 } else {
1058 intel_miptree_unmap_gtt(intel, mt, map, level, slice);
1059 }
1060
1061 mt->level[level].slice[slice].map = NULL;
1062 free(map);
1063 }