dri: add __DRIimageLoaderExtension and __DRIimageDriverExtension
[mesa.git] / src / mesa / drivers / dri / i915 / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <GL/gl.h>
29 #include <GL/internal/dri_interface.h>
30
31 #include "intel_batchbuffer.h"
32 #include "intel_chipset.h"
33 #include "intel_context.h"
34 #include "intel_mipmap_tree.h"
35 #include "intel_regions.h"
36 #include "intel_tex_layout.h"
37 #include "intel_tex.h"
38 #include "intel_blit.h"
39
40 #include "main/enums.h"
41 #include "main/formats.h"
42 #include "main/glformats.h"
43 #include "main/teximage.h"
44
45 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
46
47 static GLenum
48 target_to_target(GLenum target)
49 {
50 switch (target) {
51 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
52 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
53 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
54 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
55 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
56 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
57 return GL_TEXTURE_CUBE_MAP_ARB;
58 default:
59 return target;
60 }
61 }
62
63 /**
64 * @param for_bo Indicates that the caller is
65 * intel_miptree_create_for_bo(). If true, then do not create
66 * \c stencil_mt.
67 */
68 struct intel_mipmap_tree *
69 intel_miptree_create_layout(struct intel_context *intel,
70 GLenum target,
71 gl_format format,
72 GLuint first_level,
73 GLuint last_level,
74 GLuint width0,
75 GLuint height0,
76 GLuint depth0,
77 bool for_bo)
78 {
79 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
80 if (!mt)
81 return NULL;
82
83 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
84 _mesa_lookup_enum_by_nr(target),
85 _mesa_get_format_name(format),
86 first_level, last_level, mt);
87
88 mt->target = target_to_target(target);
89 mt->format = format;
90 mt->first_level = first_level;
91 mt->last_level = last_level;
92 mt->logical_width0 = width0;
93 mt->logical_height0 = height0;
94 mt->logical_depth0 = depth0;
95
96 /* The cpp is bytes per (1, blockheight)-sized block for compressed
97 * textures. This is why you'll see divides by blockheight all over
98 */
99 unsigned bw, bh;
100 _mesa_get_format_block_size(format, &bw, &bh);
101 assert(_mesa_get_format_bytes(mt->format) % bw == 0);
102 mt->cpp = _mesa_get_format_bytes(mt->format) / bw;
103
104 mt->compressed = _mesa_is_format_compressed(format);
105 mt->refcount = 1;
106
107 if (target == GL_TEXTURE_CUBE_MAP) {
108 assert(depth0 == 1);
109 depth0 = 6;
110 }
111
112 mt->physical_width0 = width0;
113 mt->physical_height0 = height0;
114 mt->physical_depth0 = depth0;
115
116 intel_get_texture_alignment_unit(intel, mt->format,
117 &mt->align_w, &mt->align_h);
118
119 (void) intel;
120 if (intel->is_945)
121 i945_miptree_layout(mt);
122 else
123 i915_miptree_layout(mt);
124
125 return mt;
126 }
127
128 /**
129 * \brief Helper function for intel_miptree_create().
130 */
131 static uint32_t
132 intel_miptree_choose_tiling(struct intel_context *intel,
133 gl_format format,
134 uint32_t width0,
135 enum intel_miptree_tiling_mode requested,
136 struct intel_mipmap_tree *mt)
137 {
138 /* Some usages may want only one type of tiling, like depth miptrees (Y
139 * tiled), or temporary BOs for uploading data once (linear).
140 */
141 switch (requested) {
142 case INTEL_MIPTREE_TILING_ANY:
143 break;
144 case INTEL_MIPTREE_TILING_Y:
145 return I915_TILING_Y;
146 case INTEL_MIPTREE_TILING_NONE:
147 return I915_TILING_NONE;
148 }
149
150 int minimum_pitch = mt->total_width * mt->cpp;
151
152 /* If the width is much smaller than a tile, don't bother tiling. */
153 if (minimum_pitch < 64)
154 return I915_TILING_NONE;
155
156 if (ALIGN(minimum_pitch, 512) >= 32768) {
157 perf_debug("%dx%d miptree too large to blit, falling back to untiled",
158 mt->total_width, mt->total_height);
159 return I915_TILING_NONE;
160 }
161
162 /* We don't have BLORP to handle Y-tiled blits, so use X-tiling. */
163 return I915_TILING_X;
164 }
165
166 struct intel_mipmap_tree *
167 intel_miptree_create(struct intel_context *intel,
168 GLenum target,
169 gl_format format,
170 GLuint first_level,
171 GLuint last_level,
172 GLuint width0,
173 GLuint height0,
174 GLuint depth0,
175 bool expect_accelerated_upload,
176 enum intel_miptree_tiling_mode requested_tiling)
177 {
178 struct intel_mipmap_tree *mt;
179 GLuint total_width, total_height;
180
181
182 mt = intel_miptree_create_layout(intel, target, format,
183 first_level, last_level, width0,
184 height0, depth0,
185 false);
186 /*
187 * pitch == 0 || height == 0 indicates the null texture
188 */
189 if (!mt || !mt->total_width || !mt->total_height) {
190 intel_miptree_release(&mt);
191 return NULL;
192 }
193
194 total_width = mt->total_width;
195 total_height = mt->total_height;
196
197 uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
198 requested_tiling,
199 mt);
200 bool y_or_x = tiling == (I915_TILING_Y | I915_TILING_X);
201
202 mt->region = intel_region_alloc(intel->intelScreen,
203 y_or_x ? I915_TILING_Y : tiling,
204 mt->cpp,
205 total_width,
206 total_height,
207 expect_accelerated_upload);
208
209 /* If the region is too large to fit in the aperture, we need to use the
210 * BLT engine to support it. The BLT paths can't currently handle Y-tiling,
211 * so we need to fall back to X.
212 */
213 if (y_or_x && mt->region->bo->size >= intel->max_gtt_map_object_size) {
214 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
215 mt->total_width, mt->total_height);
216 intel_region_release(&mt->region);
217
218 mt->region = intel_region_alloc(intel->intelScreen,
219 I915_TILING_X,
220 mt->cpp,
221 total_width,
222 total_height,
223 expect_accelerated_upload);
224 }
225
226 mt->offset = 0;
227
228 if (!mt->region) {
229 intel_miptree_release(&mt);
230 return NULL;
231 }
232
233 return mt;
234 }
235
236 struct intel_mipmap_tree *
237 intel_miptree_create_for_bo(struct intel_context *intel,
238 drm_intel_bo *bo,
239 gl_format format,
240 uint32_t offset,
241 uint32_t width,
242 uint32_t height,
243 int pitch,
244 uint32_t tiling)
245 {
246 struct intel_mipmap_tree *mt;
247
248 struct intel_region *region = calloc(1, sizeof(*region));
249 if (!region)
250 return NULL;
251
252 /* Nothing will be able to use this miptree with the BO if the offset isn't
253 * aligned.
254 */
255 if (tiling != I915_TILING_NONE)
256 assert(offset % 4096 == 0);
257
258 /* miptrees can't handle negative pitch. If you need flipping of images,
259 * that's outside of the scope of the mt.
260 */
261 assert(pitch >= 0);
262
263 mt = intel_miptree_create_layout(intel, GL_TEXTURE_2D, format,
264 0, 0,
265 width, height, 1,
266 true);
267 if (!mt)
268 return mt;
269
270 region->cpp = mt->cpp;
271 region->width = width;
272 region->height = height;
273 region->pitch = pitch;
274 region->refcount = 1;
275 drm_intel_bo_reference(bo);
276 region->bo = bo;
277 region->tiling = tiling;
278
279 mt->region = region;
280 mt->offset = offset;
281
282 return mt;
283 }
284
285
286 /**
287 * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
288 *
289 * For a multisample DRI2 buffer, this wraps the given region with
290 * a singlesample miptree, then creates a multisample miptree into which the
291 * singlesample miptree is embedded as a child.
292 */
293 struct intel_mipmap_tree*
294 intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
295 unsigned dri_attachment,
296 gl_format format,
297 struct intel_region *region)
298 {
299 struct intel_mipmap_tree *mt = NULL;
300
301 /* Only the front and back buffers, which are color buffers, are shared
302 * through DRI2.
303 */
304 assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
305 dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
306 dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
307 assert(_mesa_get_format_base_format(format) == GL_RGB ||
308 _mesa_get_format_base_format(format) == GL_RGBA);
309
310 mt = intel_miptree_create_for_bo(intel,
311 region->bo,
312 format,
313 0,
314 region->width,
315 region->height,
316 region->pitch,
317 region->tiling);
318 if (!mt)
319 return NULL;
320 mt->region->name = region->name;
321
322 return mt;
323 }
324
325 /**
326 * For a singlesample image buffer, this simply wraps the given region with a miptree.
327 *
328 * For a multisample image buffer, this wraps the given region with
329 * a singlesample miptree, then creates a multisample miptree into which the
330 * singlesample miptree is embedded as a child.
331 */
332 struct intel_mipmap_tree*
333 intel_miptree_create_for_image_buffer(struct intel_context *intel,
334 enum __DRIimageBufferMask buffer_type,
335 gl_format format,
336 uint32_t num_samples,
337 struct intel_region *region)
338 {
339 struct intel_mipmap_tree *mt = NULL;
340
341 /* Only the front and back buffers, which are color buffers, are allocated
342 * through the image loader.
343 */
344 assert(_mesa_get_format_base_format(format) == GL_RGB ||
345 _mesa_get_format_base_format(format) == GL_RGBA);
346
347 mt = intel_miptree_create_for_bo(intel,
348 region->bo,
349 format,
350 0,
351 region->width,
352 region->height,
353 region->pitch,
354 region->tiling);
355 return mt;
356 }
357
358 struct intel_mipmap_tree*
359 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
360 gl_format format,
361 uint32_t width,
362 uint32_t height)
363 {
364 uint32_t depth = 1;
365
366 return intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
367 width, height, depth, true,
368 INTEL_MIPTREE_TILING_ANY);
369 }
370
371 void
372 intel_miptree_reference(struct intel_mipmap_tree **dst,
373 struct intel_mipmap_tree *src)
374 {
375 if (*dst == src)
376 return;
377
378 intel_miptree_release(dst);
379
380 if (src) {
381 src->refcount++;
382 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
383 }
384
385 *dst = src;
386 }
387
388
389 void
390 intel_miptree_release(struct intel_mipmap_tree **mt)
391 {
392 if (!*mt)
393 return;
394
395 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
396 if (--(*mt)->refcount <= 0) {
397 GLuint i;
398
399 DBG("%s deleting %p\n", __FUNCTION__, *mt);
400
401 intel_region_release(&((*mt)->region));
402
403 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
404 free((*mt)->level[i].slice);
405 }
406
407 free(*mt);
408 }
409 *mt = NULL;
410 }
411
412 void
413 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
414 int *width, int *height, int *depth)
415 {
416 switch (image->TexObject->Target) {
417 case GL_TEXTURE_1D_ARRAY:
418 *width = image->Width;
419 *height = 1;
420 *depth = image->Height;
421 break;
422 default:
423 *width = image->Width;
424 *height = image->Height;
425 *depth = image->Depth;
426 break;
427 }
428 }
429
430 /**
431 * Can the image be pulled into a unified mipmap tree? This mirrors
432 * the completeness test in a lot of ways.
433 *
434 * Not sure whether I want to pass gl_texture_image here.
435 */
436 bool
437 intel_miptree_match_image(struct intel_mipmap_tree *mt,
438 struct gl_texture_image *image)
439 {
440 struct intel_texture_image *intelImage = intel_texture_image(image);
441 GLuint level = intelImage->base.Base.Level;
442 int width, height, depth;
443
444 /* glTexImage* choose the texture object based on the target passed in, and
445 * objects can't change targets over their lifetimes, so this should be
446 * true.
447 */
448 assert(target_to_target(image->TexObject->Target) == mt->target);
449
450 gl_format mt_format = mt->format;
451
452 if (image->TexFormat != mt_format)
453 return false;
454
455 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
456
457 if (mt->target == GL_TEXTURE_CUBE_MAP)
458 depth = 6;
459
460 /* Test image dimensions against the base level image adjusted for
461 * minification. This will also catch images not present in the
462 * tree, changed targets, etc.
463 */
464 if (mt->target == GL_TEXTURE_2D_MULTISAMPLE ||
465 mt->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
466 /* nonzero level here is always bogus */
467 assert(level == 0);
468
469 if (width != mt->logical_width0 ||
470 height != mt->logical_height0 ||
471 depth != mt->logical_depth0) {
472 return false;
473 }
474 }
475 else {
476 /* all normal textures, renderbuffers, etc */
477 if (width != mt->level[level].width ||
478 height != mt->level[level].height ||
479 depth != mt->level[level].depth) {
480 return false;
481 }
482 }
483
484 return true;
485 }
486
487
488 void
489 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
490 GLuint level,
491 GLuint x, GLuint y,
492 GLuint w, GLuint h, GLuint d)
493 {
494 mt->level[level].width = w;
495 mt->level[level].height = h;
496 mt->level[level].depth = d;
497 mt->level[level].level_x = x;
498 mt->level[level].level_y = y;
499
500 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
501 level, w, h, d, x, y);
502
503 assert(mt->level[level].slice == NULL);
504
505 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
506 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
507 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
508 }
509
510
511 void
512 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
513 GLuint level, GLuint img,
514 GLuint x, GLuint y)
515 {
516 if (img == 0 && level == 0)
517 assert(x == 0 && y == 0);
518
519 assert(img < mt->level[level].depth);
520
521 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
522 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
523
524 DBG("%s level %d img %d pos %d,%d\n",
525 __FUNCTION__, level, img,
526 mt->level[level].slice[img].x_offset,
527 mt->level[level].slice[img].y_offset);
528 }
529
530 void
531 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
532 GLuint level, GLuint slice,
533 GLuint *x, GLuint *y)
534 {
535 assert(slice < mt->level[level].depth);
536
537 *x = mt->level[level].slice[slice].x_offset;
538 *y = mt->level[level].slice[slice].y_offset;
539 }
540
541 /**
542 * Rendering with tiled buffers requires that the base address of the buffer
543 * be aligned to a page boundary. For renderbuffers, and sometimes with
544 * textures, we may want the surface to point at a texture image level that
545 * isn't at a page boundary.
546 *
547 * This function returns an appropriately-aligned base offset
548 * according to the tiling restrictions, plus any required x/y offset
549 * from there.
550 */
551 uint32_t
552 intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
553 GLuint level, GLuint slice,
554 uint32_t *tile_x,
555 uint32_t *tile_y)
556 {
557 struct intel_region *region = mt->region;
558 uint32_t x, y;
559 uint32_t mask_x, mask_y;
560
561 intel_region_get_tile_masks(region, &mask_x, &mask_y, false);
562 intel_miptree_get_image_offset(mt, level, slice, &x, &y);
563
564 *tile_x = x & mask_x;
565 *tile_y = y & mask_y;
566
567 return intel_region_get_aligned_offset(region, x & ~mask_x, y & ~mask_y,
568 false);
569 }
570
571 static void
572 intel_miptree_copy_slice_sw(struct intel_context *intel,
573 struct intel_mipmap_tree *dst_mt,
574 struct intel_mipmap_tree *src_mt,
575 int level,
576 int slice,
577 int width,
578 int height)
579 {
580 void *src, *dst;
581 int src_stride, dst_stride;
582 int cpp = dst_mt->cpp;
583
584 intel_miptree_map(intel, src_mt,
585 level, slice,
586 0, 0,
587 width, height,
588 GL_MAP_READ_BIT,
589 &src, &src_stride);
590
591 intel_miptree_map(intel, dst_mt,
592 level, slice,
593 0, 0,
594 width, height,
595 GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT,
596 &dst, &dst_stride);
597
598 DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
599 _mesa_get_format_name(src_mt->format),
600 src_mt, src, src_stride,
601 _mesa_get_format_name(dst_mt->format),
602 dst_mt, dst, dst_stride,
603 width, height);
604
605 int row_size = cpp * width;
606 if (src_stride == row_size &&
607 dst_stride == row_size) {
608 memcpy(dst, src, row_size * height);
609 } else {
610 for (int i = 0; i < height; i++) {
611 memcpy(dst, src, row_size);
612 dst += dst_stride;
613 src += src_stride;
614 }
615 }
616
617 intel_miptree_unmap(intel, dst_mt, level, slice);
618 intel_miptree_unmap(intel, src_mt, level, slice);
619 }
620
621 static void
622 intel_miptree_copy_slice(struct intel_context *intel,
623 struct intel_mipmap_tree *dst_mt,
624 struct intel_mipmap_tree *src_mt,
625 int level,
626 int face,
627 int depth)
628
629 {
630 gl_format format = src_mt->format;
631 uint32_t width = src_mt->level[level].width;
632 uint32_t height = src_mt->level[level].height;
633 int slice;
634
635 if (face > 0)
636 slice = face;
637 else
638 slice = depth;
639
640 assert(depth < src_mt->level[level].depth);
641 assert(src_mt->format == dst_mt->format);
642
643 if (dst_mt->compressed) {
644 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
645 width = ALIGN(width, dst_mt->align_w);
646 }
647
648 uint32_t dst_x, dst_y, src_x, src_y;
649 intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
650 intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
651
652 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
653 _mesa_get_format_name(src_mt->format),
654 src_mt, src_x, src_y, src_mt->region->pitch,
655 _mesa_get_format_name(dst_mt->format),
656 dst_mt, dst_x, dst_y, dst_mt->region->pitch,
657 width, height);
658
659 if (!intel_miptree_blit(intel,
660 src_mt, level, slice, 0, 0, false,
661 dst_mt, level, slice, 0, 0, false,
662 width, height, GL_COPY)) {
663 perf_debug("miptree validate blit for %s failed\n",
664 _mesa_get_format_name(format));
665
666 intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
667 width, height);
668 }
669 }
670
671 /**
672 * Copies the image's current data to the given miptree, and associates that
673 * miptree with the image.
674 *
675 * If \c invalidate is true, then the actual image data does not need to be
676 * copied, but the image still needs to be associated to the new miptree (this
677 * is set to true if we're about to clear the image).
678 */
679 void
680 intel_miptree_copy_teximage(struct intel_context *intel,
681 struct intel_texture_image *intelImage,
682 struct intel_mipmap_tree *dst_mt,
683 bool invalidate)
684 {
685 struct intel_mipmap_tree *src_mt = intelImage->mt;
686 struct intel_texture_object *intel_obj =
687 intel_texture_object(intelImage->base.Base.TexObject);
688 int level = intelImage->base.Base.Level;
689 int face = intelImage->base.Base.Face;
690 GLuint depth = intelImage->base.Base.Depth;
691
692 if (!invalidate) {
693 for (int slice = 0; slice < depth; slice++) {
694 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
695 }
696 }
697
698 intel_miptree_reference(&intelImage->mt, dst_mt);
699 intel_obj->needs_validate = true;
700 }
701
702 void *
703 intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
704 {
705 drm_intel_bo *bo = mt->region->bo;
706
707 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
708 if (drm_intel_bo_busy(bo)) {
709 perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
710 }
711 }
712
713 intel_flush(&intel->ctx);
714
715 if (mt->region->tiling != I915_TILING_NONE)
716 drm_intel_gem_bo_map_gtt(bo);
717 else
718 drm_intel_bo_map(bo, true);
719
720 return bo->virtual;
721 }
722
723 void
724 intel_miptree_unmap_raw(struct intel_context *intel,
725 struct intel_mipmap_tree *mt)
726 {
727 drm_intel_bo_unmap(mt->region->bo);
728 }
729
730 static void
731 intel_miptree_map_gtt(struct intel_context *intel,
732 struct intel_mipmap_tree *mt,
733 struct intel_miptree_map *map,
734 unsigned int level, unsigned int slice)
735 {
736 unsigned int bw, bh;
737 void *base;
738 unsigned int image_x, image_y;
739 int x = map->x;
740 int y = map->y;
741
742 /* For compressed formats, the stride is the number of bytes per
743 * row of blocks. intel_miptree_get_image_offset() already does
744 * the divide.
745 */
746 _mesa_get_format_block_size(mt->format, &bw, &bh);
747 assert(y % bh == 0);
748 y /= bh;
749
750 base = intel_miptree_map_raw(intel, mt) + mt->offset;
751
752 if (base == NULL)
753 map->ptr = NULL;
754 else {
755 /* Note that in the case of cube maps, the caller must have passed the
756 * slice number referencing the face.
757 */
758 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
759 x += image_x;
760 y += image_y;
761
762 map->stride = mt->region->pitch;
763 map->ptr = base + y * map->stride + x * mt->cpp;
764 }
765
766 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
767 map->x, map->y, map->w, map->h,
768 mt, _mesa_get_format_name(mt->format),
769 x, y, map->ptr, map->stride);
770 }
771
772 static void
773 intel_miptree_unmap_gtt(struct intel_context *intel,
774 struct intel_mipmap_tree *mt,
775 struct intel_miptree_map *map,
776 unsigned int level,
777 unsigned int slice)
778 {
779 intel_miptree_unmap_raw(intel, mt);
780 }
781
782 static void
783 intel_miptree_map_blit(struct intel_context *intel,
784 struct intel_mipmap_tree *mt,
785 struct intel_miptree_map *map,
786 unsigned int level, unsigned int slice)
787 {
788 map->mt = intel_miptree_create(intel, GL_TEXTURE_2D, mt->format,
789 0, 0,
790 map->w, map->h, 1,
791 false,
792 INTEL_MIPTREE_TILING_NONE);
793 if (!map->mt) {
794 fprintf(stderr, "Failed to allocate blit temporary\n");
795 goto fail;
796 }
797 map->stride = map->mt->region->pitch;
798
799 if (!intel_miptree_blit(intel,
800 mt, level, slice,
801 map->x, map->y, false,
802 map->mt, 0, 0,
803 0, 0, false,
804 map->w, map->h, GL_COPY)) {
805 fprintf(stderr, "Failed to blit\n");
806 goto fail;
807 }
808
809 intel_batchbuffer_flush(intel);
810 map->ptr = intel_miptree_map_raw(intel, map->mt);
811
812 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
813 map->x, map->y, map->w, map->h,
814 mt, _mesa_get_format_name(mt->format),
815 level, slice, map->ptr, map->stride);
816
817 return;
818
819 fail:
820 intel_miptree_release(&map->mt);
821 map->ptr = NULL;
822 map->stride = 0;
823 }
824
825 static void
826 intel_miptree_unmap_blit(struct intel_context *intel,
827 struct intel_mipmap_tree *mt,
828 struct intel_miptree_map *map,
829 unsigned int level,
830 unsigned int slice)
831 {
832 struct gl_context *ctx = &intel->ctx;
833
834 intel_miptree_unmap_raw(intel, map->mt);
835
836 if (map->mode & GL_MAP_WRITE_BIT) {
837 bool ok = intel_miptree_blit(intel,
838 map->mt, 0, 0,
839 0, 0, false,
840 mt, level, slice,
841 map->x, map->y, false,
842 map->w, map->h, GL_COPY);
843 WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
844 }
845
846 intel_miptree_release(&map->mt);
847 }
848
849 /**
850 * Create and attach a map to the miptree at (level, slice). Return the
851 * attached map.
852 */
853 static struct intel_miptree_map*
854 intel_miptree_attach_map(struct intel_mipmap_tree *mt,
855 unsigned int level,
856 unsigned int slice,
857 unsigned int x,
858 unsigned int y,
859 unsigned int w,
860 unsigned int h,
861 GLbitfield mode)
862 {
863 struct intel_miptree_map *map = calloc(1, sizeof(*map));
864
865 if (!map)
866 return NULL;
867
868 assert(mt->level[level].slice[slice].map == NULL);
869 mt->level[level].slice[slice].map = map;
870
871 map->mode = mode;
872 map->x = x;
873 map->y = y;
874 map->w = w;
875 map->h = h;
876
877 return map;
878 }
879
880 /**
881 * Release the map at (level, slice).
882 */
883 static void
884 intel_miptree_release_map(struct intel_mipmap_tree *mt,
885 unsigned int level,
886 unsigned int slice)
887 {
888 struct intel_miptree_map **map;
889
890 map = &mt->level[level].slice[slice].map;
891 free(*map);
892 *map = NULL;
893 }
894
895 void
896 intel_miptree_map(struct intel_context *intel,
897 struct intel_mipmap_tree *mt,
898 unsigned int level,
899 unsigned int slice,
900 unsigned int x,
901 unsigned int y,
902 unsigned int w,
903 unsigned int h,
904 GLbitfield mode,
905 void **out_ptr,
906 int *out_stride)
907 {
908 struct intel_miptree_map *map;
909
910 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
911 if (!map){
912 *out_ptr = NULL;
913 *out_stride = 0;
914 return;
915 }
916
917 /* See intel_miptree_blit() for details on the 32k pitch limit. */
918 if (mt->region->tiling != I915_TILING_NONE &&
919 mt->region->bo->size >= intel->max_gtt_map_object_size) {
920 assert(mt->region->pitch < 32768);
921 intel_miptree_map_blit(intel, mt, map, level, slice);
922 } else {
923 intel_miptree_map_gtt(intel, mt, map, level, slice);
924 }
925
926 *out_ptr = map->ptr;
927 *out_stride = map->stride;
928
929 if (map->ptr == NULL)
930 intel_miptree_release_map(mt, level, slice);
931 }
932
933 void
934 intel_miptree_unmap(struct intel_context *intel,
935 struct intel_mipmap_tree *mt,
936 unsigned int level,
937 unsigned int slice)
938 {
939 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
940
941 if (!map)
942 return;
943
944 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
945 mt, _mesa_get_format_name(mt->format), level, slice);
946
947 if (map->mt) {
948 intel_miptree_unmap_blit(intel, mt, map, level, slice);
949 } else {
950 intel_miptree_unmap_gtt(intel, mt, map, level, slice);
951 }
952
953 intel_miptree_release_map(mt, level, slice);
954 }