860d053de2c8f2006dc08f8af59cdd41ad11a9bc
[mesa.git] / src / mesa / drivers / dri / i915 / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <GL/gl.h>
29 #include <GL/internal/dri_interface.h>
30
31 #include "intel_batchbuffer.h"
32 #include "intel_chipset.h"
33 #include "intel_context.h"
34 #include "intel_mipmap_tree.h"
35 #include "intel_regions.h"
36 #include "intel_tex_layout.h"
37 #include "intel_tex.h"
38 #include "intel_blit.h"
39
40 #include "main/enums.h"
41 #include "main/formats.h"
42 #include "main/glformats.h"
43 #include "main/teximage.h"
44
45 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
46
47 static GLenum
48 target_to_target(GLenum target)
49 {
50 switch (target) {
51 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
52 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
53 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
54 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
55 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
56 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
57 return GL_TEXTURE_CUBE_MAP_ARB;
58 default:
59 return target;
60 }
61 }
62
63 struct intel_mipmap_tree *
64 intel_miptree_create_layout(struct intel_context *intel,
65 GLenum target,
66 mesa_format format,
67 GLuint first_level,
68 GLuint last_level,
69 GLuint width0,
70 GLuint height0,
71 GLuint depth0)
72 {
73 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
74 if (!mt)
75 return NULL;
76
77 DBG("%s target %s format %s level %d..%d <-- %p\n", __func__,
78 _mesa_enum_to_string(target),
79 _mesa_get_format_name(format),
80 first_level, last_level, mt);
81
82 mt->target = target_to_target(target);
83 mt->format = format;
84 mt->first_level = first_level;
85 mt->last_level = last_level;
86 mt->logical_width0 = width0;
87 mt->logical_height0 = height0;
88 mt->logical_depth0 = depth0;
89
90 /* The cpp is bytes per (1, blockheight)-sized block for compressed
91 * textures. This is why you'll see divides by blockheight all over
92 */
93 unsigned bw, bh;
94 _mesa_get_format_block_size(format, &bw, &bh);
95 assert(_mesa_get_format_bytes(mt->format) % bw == 0);
96 mt->cpp = _mesa_get_format_bytes(mt->format) / bw;
97
98 mt->compressed = _mesa_is_format_compressed(format);
99 mt->refcount = 1;
100
101 if (target == GL_TEXTURE_CUBE_MAP) {
102 assert(depth0 == 1);
103 depth0 = 6;
104 }
105
106 mt->physical_width0 = width0;
107 mt->physical_height0 = height0;
108 mt->physical_depth0 = depth0;
109
110 intel_get_texture_alignment_unit(intel, mt->format,
111 &mt->align_w, &mt->align_h);
112
113 if (intel->is_945)
114 i945_miptree_layout(mt);
115 else
116 i915_miptree_layout(mt);
117
118 return mt;
119 }
120
121 /**
122 * \brief Helper function for intel_miptree_create().
123 */
124 static uint32_t
125 intel_miptree_choose_tiling(struct intel_context *intel,
126 mesa_format format,
127 uint32_t width0,
128 enum intel_miptree_tiling_mode requested,
129 struct intel_mipmap_tree *mt)
130 {
131 /* Some usages may want only one type of tiling, like depth miptrees (Y
132 * tiled), or temporary BOs for uploading data once (linear).
133 */
134 switch (requested) {
135 case INTEL_MIPTREE_TILING_ANY:
136 break;
137 case INTEL_MIPTREE_TILING_Y:
138 return I915_TILING_Y;
139 case INTEL_MIPTREE_TILING_NONE:
140 return I915_TILING_NONE;
141 }
142
143 int minimum_pitch = mt->total_width * mt->cpp;
144
145 /* If the width is much smaller than a tile, don't bother tiling. */
146 if (minimum_pitch < 64)
147 return I915_TILING_NONE;
148
149 if (ALIGN(minimum_pitch, 512) >= 32768) {
150 perf_debug("%dx%d miptree too large to blit, falling back to untiled",
151 mt->total_width, mt->total_height);
152 return I915_TILING_NONE;
153 }
154
155 /* We don't have BLORP to handle Y-tiled blits, so use X-tiling. */
156 return I915_TILING_X;
157 }
158
159 struct intel_mipmap_tree *
160 intel_miptree_create(struct intel_context *intel,
161 GLenum target,
162 mesa_format format,
163 GLuint first_level,
164 GLuint last_level,
165 GLuint width0,
166 GLuint height0,
167 GLuint depth0,
168 bool expect_accelerated_upload,
169 enum intel_miptree_tiling_mode requested_tiling)
170 {
171 struct intel_mipmap_tree *mt;
172 GLuint total_width, total_height;
173
174
175 mt = intel_miptree_create_layout(intel, target, format,
176 first_level, last_level, width0,
177 height0, depth0);
178
179 /* pitch == 0 || height == 0 indicates the null texture */
180 if (!mt || !mt->total_width || !mt->total_height) {
181 intel_miptree_release(&mt);
182 return NULL;
183 }
184
185 total_width = mt->total_width;
186 total_height = mt->total_height;
187
188 uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
189 requested_tiling,
190 mt);
191 bool y_or_x = tiling == (I915_TILING_Y | I915_TILING_X);
192
193 mt->region = intel_region_alloc(intel->intelScreen,
194 y_or_x ? I915_TILING_Y : tiling,
195 mt->cpp,
196 total_width,
197 total_height,
198 expect_accelerated_upload);
199
200 /* If the region is too large to fit in the aperture, we need to use the
201 * BLT engine to support it. The BLT paths can't currently handle Y-tiling,
202 * so we need to fall back to X.
203 */
204 if (y_or_x && mt->region->bo->size >= intel->max_gtt_map_object_size) {
205 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
206 mt->total_width, mt->total_height);
207 intel_region_release(&mt->region);
208
209 mt->region = intel_region_alloc(intel->intelScreen,
210 I915_TILING_X,
211 mt->cpp,
212 total_width,
213 total_height,
214 expect_accelerated_upload);
215 }
216
217 mt->offset = 0;
218
219 if (!mt->region) {
220 intel_miptree_release(&mt);
221 return NULL;
222 }
223
224 return mt;
225 }
226
227 struct intel_mipmap_tree *
228 intel_miptree_create_for_bo(struct intel_context *intel,
229 drm_intel_bo *bo,
230 mesa_format format,
231 uint32_t offset,
232 uint32_t width,
233 uint32_t height,
234 int pitch,
235 uint32_t tiling)
236 {
237 struct intel_mipmap_tree *mt;
238
239 struct intel_region *region = calloc(1, sizeof(*region));
240 if (!region)
241 return NULL;
242
243 /* Nothing will be able to use this miptree with the BO if the offset isn't
244 * aligned.
245 */
246 if (tiling != I915_TILING_NONE)
247 assert(offset % 4096 == 0);
248
249 /* miptrees can't handle negative pitch. If you need flipping of images,
250 * that's outside of the scope of the mt.
251 */
252 assert(pitch >= 0);
253
254 mt = intel_miptree_create_layout(intel, GL_TEXTURE_2D, format,
255 0, 0,
256 width, height, 1);
257 if (!mt) {
258 free(region);
259 return mt;
260 }
261
262 region->cpp = mt->cpp;
263 region->width = width;
264 region->height = height;
265 region->pitch = pitch;
266 region->refcount = 1;
267 drm_intel_bo_reference(bo);
268 region->bo = bo;
269 region->tiling = tiling;
270
271 mt->region = region;
272 mt->offset = offset;
273
274 return mt;
275 }
276
277
278 /**
279 * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
280 *
281 * For a multisample DRI2 buffer, this wraps the given region with
282 * a singlesample miptree, then creates a multisample miptree into which the
283 * singlesample miptree is embedded as a child.
284 */
285 struct intel_mipmap_tree *
286 intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
287 unsigned dri_attachment,
288 mesa_format format,
289 struct intel_region *region)
290 {
291 struct intel_mipmap_tree *mt = NULL;
292
293 /* Only the front and back buffers, which are color buffers, are shared
294 * through DRI2.
295 */
296 assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
297 dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
298 dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
299 assert(_mesa_get_format_base_format(format) == GL_RGB ||
300 _mesa_get_format_base_format(format) == GL_RGBA);
301
302 mt = intel_miptree_create_for_bo(intel,
303 region->bo,
304 format,
305 0,
306 region->width,
307 region->height,
308 region->pitch,
309 region->tiling);
310 if (!mt)
311 return NULL;
312 mt->region->name = region->name;
313
314 return mt;
315 }
316
317 /**
318 * For a singlesample image buffer, this simply wraps the given region with a miptree.
319 *
320 * For a multisample image buffer, this wraps the given region with
321 * a singlesample miptree, then creates a multisample miptree into which the
322 * singlesample miptree is embedded as a child.
323 */
324 struct intel_mipmap_tree *
325 intel_miptree_create_for_image_buffer(struct intel_context *intel,
326 enum __DRIimageBufferMask buffer_type,
327 mesa_format format,
328 uint32_t num_samples,
329 struct intel_region *region)
330 {
331 struct intel_mipmap_tree *mt = NULL;
332
333 /* Only the front and back buffers, which are color buffers, are allocated
334 * through the image loader.
335 */
336 assert(_mesa_get_format_base_format(format) == GL_RGB ||
337 _mesa_get_format_base_format(format) == GL_RGBA);
338
339 mt = intel_miptree_create_for_bo(intel,
340 region->bo,
341 format,
342 0,
343 region->width,
344 region->height,
345 region->pitch,
346 region->tiling);
347 return mt;
348 }
349
350 struct intel_mipmap_tree *
351 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
352 mesa_format format,
353 uint32_t width,
354 uint32_t height)
355 {
356 uint32_t depth = 1;
357
358 return intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
359 width, height, depth, true,
360 INTEL_MIPTREE_TILING_ANY);
361 }
362
363 void
364 intel_miptree_reference(struct intel_mipmap_tree **dst,
365 struct intel_mipmap_tree *src)
366 {
367 if (*dst == src)
368 return;
369
370 intel_miptree_release(dst);
371
372 if (src) {
373 src->refcount++;
374 DBG("%s %p refcount now %d\n", __func__, src, src->refcount);
375 }
376
377 *dst = src;
378 }
379
380
381 void
382 intel_miptree_release(struct intel_mipmap_tree **mt)
383 {
384 if (!*mt)
385 return;
386
387 DBG("%s %p refcount will be %d\n", __func__, *mt, (*mt)->refcount - 1);
388 if (--(*mt)->refcount <= 0) {
389 GLuint i;
390
391 DBG("%s deleting %p\n", __func__, *mt);
392
393 intel_region_release(&((*mt)->region));
394
395 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
396 free((*mt)->level[i].slice);
397 }
398
399 free(*mt);
400 }
401 *mt = NULL;
402 }
403
404 void
405 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
406 int *width, int *height, int *depth)
407 {
408 switch (image->TexObject->Target) {
409 case GL_TEXTURE_1D_ARRAY:
410 *width = image->Width;
411 *height = 1;
412 *depth = image->Height;
413 break;
414 default:
415 *width = image->Width;
416 *height = image->Height;
417 *depth = image->Depth;
418 break;
419 }
420 }
421
422 /**
423 * Can the image be pulled into a unified mipmap tree? This mirrors
424 * the completeness test in a lot of ways.
425 *
426 * Not sure whether I want to pass gl_texture_image here.
427 */
428 bool
429 intel_miptree_match_image(struct intel_mipmap_tree *mt,
430 struct gl_texture_image *image)
431 {
432 struct intel_texture_image *intelImage = intel_texture_image(image);
433 GLuint level = intelImage->base.Base.Level;
434 int width, height, depth;
435
436 /* glTexImage* choose the texture object based on the target passed in, and
437 * objects can't change targets over their lifetimes, so this should be
438 * true.
439 */
440 assert(target_to_target(image->TexObject->Target) == mt->target);
441
442 mesa_format mt_format = mt->format;
443
444 if (image->TexFormat != mt_format)
445 return false;
446
447 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
448
449 if (mt->target == GL_TEXTURE_CUBE_MAP)
450 depth = 6;
451
452 /* Test image dimensions against the base level image adjusted for
453 * minification. This will also catch images not present in the
454 * tree, changed targets, etc.
455 */
456 if (mt->target == GL_TEXTURE_2D_MULTISAMPLE ||
457 mt->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
458 /* nonzero level here is always bogus */
459 assert(level == 0);
460
461 if (width != mt->logical_width0 ||
462 height != mt->logical_height0 ||
463 depth != mt->logical_depth0) {
464 return false;
465 }
466 }
467 else {
468 /* all normal textures, renderbuffers, etc */
469 if (width != mt->level[level].width ||
470 height != mt->level[level].height ||
471 depth != mt->level[level].depth) {
472 return false;
473 }
474 }
475
476 return true;
477 }
478
479
480 void
481 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
482 GLuint level,
483 GLuint x, GLuint y,
484 GLuint w, GLuint h, GLuint d)
485 {
486 mt->level[level].width = w;
487 mt->level[level].height = h;
488 mt->level[level].depth = d;
489 mt->level[level].level_x = x;
490 mt->level[level].level_y = y;
491
492 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __func__,
493 level, w, h, d, x, y);
494
495 assert(mt->level[level].slice == NULL);
496
497 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
498 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
499 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
500 }
501
502
503 void
504 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
505 GLuint level, GLuint img,
506 GLuint x, GLuint y)
507 {
508 if (img == 0 && level == 0)
509 assert(x == 0 && y == 0);
510
511 assert(img < mt->level[level].depth);
512
513 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
514 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
515
516 DBG("%s level %d img %d pos %d,%d\n",
517 __func__, level, img,
518 mt->level[level].slice[img].x_offset,
519 mt->level[level].slice[img].y_offset);
520 }
521
522 void
523 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
524 GLuint level, GLuint slice,
525 GLuint *x, GLuint *y)
526 {
527 assert(slice < mt->level[level].depth);
528
529 *x = mt->level[level].slice[slice].x_offset;
530 *y = mt->level[level].slice[slice].y_offset;
531 }
532
533 /**
534 * Rendering with tiled buffers requires that the base address of the buffer
535 * be aligned to a page boundary. For renderbuffers, and sometimes with
536 * textures, we may want the surface to point at a texture image level that
537 * isn't at a page boundary.
538 *
539 * This function returns an appropriately-aligned base offset
540 * according to the tiling restrictions, plus any required x/y offset
541 * from there.
542 */
543 uint32_t
544 intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
545 GLuint level, GLuint slice,
546 uint32_t *tile_x,
547 uint32_t *tile_y)
548 {
549 struct intel_region *region = mt->region;
550 uint32_t x, y;
551 uint32_t mask_x, mask_y;
552
553 intel_region_get_tile_masks(region, &mask_x, &mask_y, false);
554 intel_miptree_get_image_offset(mt, level, slice, &x, &y);
555
556 *tile_x = x & mask_x;
557 *tile_y = y & mask_y;
558
559 return intel_region_get_aligned_offset(region, x & ~mask_x, y & ~mask_y,
560 false);
561 }
562
563 static void
564 intel_miptree_copy_slice_sw(struct intel_context *intel,
565 struct intel_mipmap_tree *dst_mt,
566 struct intel_mipmap_tree *src_mt,
567 int level,
568 int slice,
569 int width,
570 int height)
571 {
572 void *src, *dst;
573 int src_stride, dst_stride;
574 int cpp = dst_mt->cpp;
575
576 intel_miptree_map(intel, src_mt,
577 level, slice,
578 0, 0,
579 width, height,
580 GL_MAP_READ_BIT,
581 &src, &src_stride);
582
583 intel_miptree_map(intel, dst_mt,
584 level, slice,
585 0, 0,
586 width, height,
587 GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT,
588 &dst, &dst_stride);
589
590 DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
591 _mesa_get_format_name(src_mt->format),
592 src_mt, src, src_stride,
593 _mesa_get_format_name(dst_mt->format),
594 dst_mt, dst, dst_stride,
595 width, height);
596
597 int row_size = cpp * width;
598 if (src_stride == row_size &&
599 dst_stride == row_size) {
600 memcpy(dst, src, row_size * height);
601 } else {
602 for (int i = 0; i < height; i++) {
603 memcpy(dst, src, row_size);
604 dst += dst_stride;
605 src += src_stride;
606 }
607 }
608
609 intel_miptree_unmap(intel, dst_mt, level, slice);
610 intel_miptree_unmap(intel, src_mt, level, slice);
611 }
612
613 static void
614 intel_miptree_copy_slice(struct intel_context *intel,
615 struct intel_mipmap_tree *dst_mt,
616 struct intel_mipmap_tree *src_mt,
617 int level,
618 int face,
619 int depth)
620
621 {
622 mesa_format format = src_mt->format;
623 uint32_t width = src_mt->level[level].width;
624 uint32_t height = src_mt->level[level].height;
625 int slice;
626
627 if (face > 0)
628 slice = face;
629 else
630 slice = depth;
631
632 assert(depth < src_mt->level[level].depth);
633 assert(src_mt->format == dst_mt->format);
634
635 if (dst_mt->compressed) {
636 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
637 width = ALIGN(width, dst_mt->align_w);
638 }
639
640 uint32_t dst_x, dst_y, src_x, src_y;
641 intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
642 intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
643
644 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
645 _mesa_get_format_name(src_mt->format),
646 src_mt, src_x, src_y, src_mt->region->pitch,
647 _mesa_get_format_name(dst_mt->format),
648 dst_mt, dst_x, dst_y, dst_mt->region->pitch,
649 width, height);
650
651 if (!intel_miptree_blit(intel,
652 src_mt, level, slice, 0, 0, false,
653 dst_mt, level, slice, 0, 0, false,
654 width, height, GL_COPY)) {
655 perf_debug("miptree validate blit for %s failed\n",
656 _mesa_get_format_name(format));
657
658 intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
659 width, height);
660 }
661 }
662
663 /**
664 * Copies the image's current data to the given miptree, and associates that
665 * miptree with the image.
666 *
667 * If \c invalidate is true, then the actual image data does not need to be
668 * copied, but the image still needs to be associated to the new miptree (this
669 * is set to true if we're about to clear the image).
670 */
671 void
672 intel_miptree_copy_teximage(struct intel_context *intel,
673 struct intel_texture_image *intelImage,
674 struct intel_mipmap_tree *dst_mt,
675 bool invalidate)
676 {
677 struct intel_mipmap_tree *src_mt = intelImage->mt;
678 struct intel_texture_object *intel_obj =
679 intel_texture_object(intelImage->base.Base.TexObject);
680 int level = intelImage->base.Base.Level;
681 int face = intelImage->base.Base.Face;
682 GLuint depth = intelImage->base.Base.Depth;
683
684 if (!invalidate) {
685 for (int slice = 0; slice < depth; slice++) {
686 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
687 }
688 }
689
690 intel_miptree_reference(&intelImage->mt, dst_mt);
691 intel_obj->needs_validate = true;
692 }
693
694 void *
695 intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
696 {
697 drm_intel_bo *bo = mt->region->bo;
698
699 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
700 if (drm_intel_bo_busy(bo)) {
701 perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
702 }
703 }
704
705 intel_flush(&intel->ctx);
706
707 if (mt->region->tiling != I915_TILING_NONE)
708 drm_intel_gem_bo_map_gtt(bo);
709 else
710 drm_intel_bo_map(bo, true);
711
712 return bo->virtual;
713 }
714
715 void
716 intel_miptree_unmap_raw(struct intel_mipmap_tree *mt)
717 {
718 drm_intel_bo_unmap(mt->region->bo);
719 }
720
721 static void
722 intel_miptree_map_gtt(struct intel_context *intel,
723 struct intel_mipmap_tree *mt,
724 struct intel_miptree_map *map,
725 unsigned int level, unsigned int slice)
726 {
727 unsigned int bw, bh;
728 void *base;
729 unsigned int image_x, image_y;
730 int x = map->x;
731 int y = map->y;
732
733 /* For compressed formats, the stride is the number of bytes per
734 * row of blocks. intel_miptree_get_image_offset() already does
735 * the divide.
736 */
737 _mesa_get_format_block_size(mt->format, &bw, &bh);
738 assert(y % bh == 0);
739 y /= bh;
740
741 base = intel_miptree_map_raw(intel, mt) + mt->offset;
742
743 if (base == NULL)
744 map->ptr = NULL;
745 else {
746 /* Note that in the case of cube maps, the caller must have passed the
747 * slice number referencing the face.
748 */
749 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
750 x += image_x;
751 y += image_y;
752
753 map->stride = mt->region->pitch;
754 map->ptr = base + y * map->stride + x * mt->cpp;
755 }
756
757 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
758 map->x, map->y, map->w, map->h,
759 mt, _mesa_get_format_name(mt->format),
760 x, y, map->ptr, map->stride);
761 }
762
763 static void
764 intel_miptree_unmap_gtt(struct intel_mipmap_tree *mt)
765 {
766 intel_miptree_unmap_raw(mt);
767 }
768
769 static void
770 intel_miptree_map_blit(struct intel_context *intel,
771 struct intel_mipmap_tree *mt,
772 struct intel_miptree_map *map,
773 unsigned int level, unsigned int slice)
774 {
775 map->mt = intel_miptree_create(intel, GL_TEXTURE_2D, mt->format,
776 0, 0,
777 map->w, map->h, 1,
778 false,
779 INTEL_MIPTREE_TILING_NONE);
780 if (!map->mt) {
781 fprintf(stderr, "Failed to allocate blit temporary\n");
782 goto fail;
783 }
784 map->stride = map->mt->region->pitch;
785
786 if (!intel_miptree_blit(intel,
787 mt, level, slice,
788 map->x, map->y, false,
789 map->mt, 0, 0,
790 0, 0, false,
791 map->w, map->h, GL_COPY)) {
792 fprintf(stderr, "Failed to blit\n");
793 goto fail;
794 }
795
796 intel_batchbuffer_flush(intel);
797 map->ptr = intel_miptree_map_raw(intel, map->mt);
798
799 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
800 map->x, map->y, map->w, map->h,
801 mt, _mesa_get_format_name(mt->format),
802 level, slice, map->ptr, map->stride);
803
804 return;
805
806 fail:
807 intel_miptree_release(&map->mt);
808 map->ptr = NULL;
809 map->stride = 0;
810 }
811
812 static void
813 intel_miptree_unmap_blit(struct intel_context *intel,
814 struct intel_mipmap_tree *mt,
815 struct intel_miptree_map *map,
816 unsigned int level,
817 unsigned int slice)
818 {
819 struct gl_context *ctx = &intel->ctx;
820
821 intel_miptree_unmap_raw(map->mt);
822
823 if (map->mode & GL_MAP_WRITE_BIT) {
824 bool ok = intel_miptree_blit(intel,
825 map->mt, 0, 0,
826 0, 0, false,
827 mt, level, slice,
828 map->x, map->y, false,
829 map->w, map->h, GL_COPY);
830 WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
831 }
832
833 intel_miptree_release(&map->mt);
834 }
835
836 /**
837 * Create and attach a map to the miptree at (level, slice). Return the
838 * attached map.
839 */
840 static struct intel_miptree_map*
841 intel_miptree_attach_map(struct intel_mipmap_tree *mt,
842 unsigned int level,
843 unsigned int slice,
844 unsigned int x,
845 unsigned int y,
846 unsigned int w,
847 unsigned int h,
848 GLbitfield mode)
849 {
850 struct intel_miptree_map *map = calloc(1, sizeof(*map));
851
852 if (!map)
853 return NULL;
854
855 assert(mt->level[level].slice[slice].map == NULL);
856 mt->level[level].slice[slice].map = map;
857
858 map->mode = mode;
859 map->x = x;
860 map->y = y;
861 map->w = w;
862 map->h = h;
863
864 return map;
865 }
866
867 /**
868 * Release the map at (level, slice).
869 */
870 static void
871 intel_miptree_release_map(struct intel_mipmap_tree *mt,
872 unsigned int level,
873 unsigned int slice)
874 {
875 struct intel_miptree_map **map;
876
877 map = &mt->level[level].slice[slice].map;
878 free(*map);
879 *map = NULL;
880 }
881
882 void
883 intel_miptree_map(struct intel_context *intel,
884 struct intel_mipmap_tree *mt,
885 unsigned int level,
886 unsigned int slice,
887 unsigned int x,
888 unsigned int y,
889 unsigned int w,
890 unsigned int h,
891 GLbitfield mode,
892 void **out_ptr,
893 int *out_stride)
894 {
895 struct intel_miptree_map *map;
896
897 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
898 if (!map) {
899 *out_ptr = NULL;
900 *out_stride = 0;
901 return;
902 }
903
904 /* See intel_miptree_blit() for details on the 32k pitch limit. */
905 if (mt->region->tiling != I915_TILING_NONE &&
906 mt->region->bo->size >= intel->max_gtt_map_object_size) {
907 assert(mt->region->pitch < 32768);
908 intel_miptree_map_blit(intel, mt, map, level, slice);
909 } else {
910 intel_miptree_map_gtt(intel, mt, map, level, slice);
911 }
912
913 *out_ptr = map->ptr;
914 *out_stride = map->stride;
915
916 if (map->ptr == NULL)
917 intel_miptree_release_map(mt, level, slice);
918 }
919
920 void
921 intel_miptree_unmap(struct intel_context *intel,
922 struct intel_mipmap_tree *mt,
923 unsigned int level,
924 unsigned int slice)
925 {
926 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
927
928 if (!map)
929 return;
930
931 DBG("%s: mt %p (%s) level %d slice %d\n", __func__,
932 mt, _mesa_get_format_name(mt->format), level, slice);
933
934 if (map->mt) {
935 intel_miptree_unmap_blit(intel, mt, map, level, slice);
936 } else {
937 intel_miptree_unmap_gtt(mt);
938 }
939
940 intel_miptree_release_map(mt, level, slice);
941 }