i915: Remove separate stencil code.
[mesa.git] / src / mesa / drivers / dri / i915 / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <GL/gl.h>
29 #include <GL/internal/dri_interface.h>
30
31 #include "intel_batchbuffer.h"
32 #include "intel_chipset.h"
33 #include "intel_context.h"
34 #include "intel_mipmap_tree.h"
35 #include "intel_regions.h"
36 #include "intel_tex_layout.h"
37 #include "intel_tex.h"
38 #include "intel_blit.h"
39
40 #include "main/enums.h"
41 #include "main/formats.h"
42 #include "main/glformats.h"
43 #include "main/texcompress_etc.h"
44 #include "main/teximage.h"
45
46 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
47
48 static GLenum
49 target_to_target(GLenum target)
50 {
51 switch (target) {
52 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
53 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
54 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
55 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
56 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
57 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
58 return GL_TEXTURE_CUBE_MAP_ARB;
59 default:
60 return target;
61 }
62 }
63
64 /**
65 * @param for_bo Indicates that the caller is
66 * intel_miptree_create_for_bo(). If true, then do not create
67 * \c stencil_mt.
68 */
69 struct intel_mipmap_tree *
70 intel_miptree_create_layout(struct intel_context *intel,
71 GLenum target,
72 gl_format format,
73 GLuint first_level,
74 GLuint last_level,
75 GLuint width0,
76 GLuint height0,
77 GLuint depth0,
78 bool for_bo)
79 {
80 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
81 if (!mt)
82 return NULL;
83
84 DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
85 _mesa_lookup_enum_by_nr(target),
86 _mesa_get_format_name(format),
87 first_level, last_level, mt);
88
89 mt->target = target_to_target(target);
90 mt->format = format;
91 mt->first_level = first_level;
92 mt->last_level = last_level;
93 mt->logical_width0 = width0;
94 mt->logical_height0 = height0;
95 mt->logical_depth0 = depth0;
96
97 /* The cpp is bytes per (1, blockheight)-sized block for compressed
98 * textures. This is why you'll see divides by blockheight all over
99 */
100 unsigned bw, bh;
101 _mesa_get_format_block_size(format, &bw, &bh);
102 assert(_mesa_get_format_bytes(mt->format) % bw == 0);
103 mt->cpp = _mesa_get_format_bytes(mt->format) / bw;
104
105 mt->compressed = _mesa_is_format_compressed(format);
106 mt->refcount = 1;
107
108 if (target == GL_TEXTURE_CUBE_MAP) {
109 assert(depth0 == 1);
110 depth0 = 6;
111 }
112
113 mt->physical_width0 = width0;
114 mt->physical_height0 = height0;
115 mt->physical_depth0 = depth0;
116
117 intel_get_texture_alignment_unit(intel, mt->format,
118 &mt->align_w, &mt->align_h);
119
120 (void) intel;
121 if (intel->is_945)
122 i945_miptree_layout(mt);
123 else
124 i915_miptree_layout(mt);
125
126 return mt;
127 }
128
129 /**
130 * \brief Helper function for intel_miptree_create().
131 */
132 static uint32_t
133 intel_miptree_choose_tiling(struct intel_context *intel,
134 gl_format format,
135 uint32_t width0,
136 enum intel_miptree_tiling_mode requested,
137 struct intel_mipmap_tree *mt)
138 {
139 /* Some usages may want only one type of tiling, like depth miptrees (Y
140 * tiled), or temporary BOs for uploading data once (linear).
141 */
142 switch (requested) {
143 case INTEL_MIPTREE_TILING_ANY:
144 break;
145 case INTEL_MIPTREE_TILING_Y:
146 return I915_TILING_Y;
147 case INTEL_MIPTREE_TILING_NONE:
148 return I915_TILING_NONE;
149 }
150
151 GLenum base_format = _mesa_get_format_base_format(format);
152 if (intel->gen >= 4 &&
153 (base_format == GL_DEPTH_COMPONENT ||
154 base_format == GL_DEPTH_STENCIL_EXT))
155 return I915_TILING_Y;
156
157 int minimum_pitch = mt->total_width * mt->cpp;
158
159 /* If the width is much smaller than a tile, don't bother tiling. */
160 if (minimum_pitch < 64)
161 return I915_TILING_NONE;
162
163 if (ALIGN(minimum_pitch, 512) >= 32768) {
164 perf_debug("%dx%d miptree too large to blit, falling back to untiled",
165 mt->total_width, mt->total_height);
166 return I915_TILING_NONE;
167 }
168
169 /* Pre-gen6 doesn't have BLORP to handle Y-tiling, so use X-tiling. */
170 if (intel->gen < 6)
171 return I915_TILING_X;
172
173 return I915_TILING_Y | I915_TILING_X;
174 }
175
176 struct intel_mipmap_tree *
177 intel_miptree_create(struct intel_context *intel,
178 GLenum target,
179 gl_format format,
180 GLuint first_level,
181 GLuint last_level,
182 GLuint width0,
183 GLuint height0,
184 GLuint depth0,
185 bool expect_accelerated_upload,
186 enum intel_miptree_tiling_mode requested_tiling)
187 {
188 struct intel_mipmap_tree *mt;
189 gl_format tex_format = format;
190 gl_format etc_format = MESA_FORMAT_NONE;
191 GLuint total_width, total_height;
192
193 if (!intel->is_baytrail) {
194 switch (format) {
195 case MESA_FORMAT_ETC1_RGB8:
196 format = MESA_FORMAT_RGBX8888_REV;
197 break;
198 case MESA_FORMAT_ETC2_RGB8:
199 format = MESA_FORMAT_RGBX8888_REV;
200 break;
201 case MESA_FORMAT_ETC2_SRGB8:
202 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC:
203 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1:
204 format = MESA_FORMAT_SARGB8;
205 break;
206 case MESA_FORMAT_ETC2_RGBA8_EAC:
207 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1:
208 format = MESA_FORMAT_RGBA8888_REV;
209 break;
210 case MESA_FORMAT_ETC2_R11_EAC:
211 format = MESA_FORMAT_R16;
212 break;
213 case MESA_FORMAT_ETC2_SIGNED_R11_EAC:
214 format = MESA_FORMAT_SIGNED_R16;
215 break;
216 case MESA_FORMAT_ETC2_RG11_EAC:
217 format = MESA_FORMAT_GR1616;
218 break;
219 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC:
220 format = MESA_FORMAT_SIGNED_GR1616;
221 break;
222 default:
223 /* Non ETC1 / ETC2 format */
224 break;
225 }
226 }
227
228 etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
229
230 mt = intel_miptree_create_layout(intel, target, format,
231 first_level, last_level, width0,
232 height0, depth0,
233 false);
234 /*
235 * pitch == 0 || height == 0 indicates the null texture
236 */
237 if (!mt || !mt->total_width || !mt->total_height) {
238 intel_miptree_release(&mt);
239 return NULL;
240 }
241
242 total_width = mt->total_width;
243 total_height = mt->total_height;
244
245 uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
246 requested_tiling,
247 mt);
248 bool y_or_x = tiling == (I915_TILING_Y | I915_TILING_X);
249
250 mt->etc_format = etc_format;
251 mt->region = intel_region_alloc(intel->intelScreen,
252 y_or_x ? I915_TILING_Y : tiling,
253 mt->cpp,
254 total_width,
255 total_height,
256 expect_accelerated_upload);
257
258 /* If the region is too large to fit in the aperture, we need to use the
259 * BLT engine to support it. The BLT paths can't currently handle Y-tiling,
260 * so we need to fall back to X.
261 */
262 if (y_or_x && mt->region->bo->size >= intel->max_gtt_map_object_size) {
263 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
264 mt->total_width, mt->total_height);
265 intel_region_release(&mt->region);
266
267 mt->region = intel_region_alloc(intel->intelScreen,
268 I915_TILING_X,
269 mt->cpp,
270 total_width,
271 total_height,
272 expect_accelerated_upload);
273 }
274
275 mt->offset = 0;
276
277 if (!mt->region) {
278 intel_miptree_release(&mt);
279 return NULL;
280 }
281
282 return mt;
283 }
284
285 struct intel_mipmap_tree *
286 intel_miptree_create_for_bo(struct intel_context *intel,
287 drm_intel_bo *bo,
288 gl_format format,
289 uint32_t offset,
290 uint32_t width,
291 uint32_t height,
292 int pitch,
293 uint32_t tiling)
294 {
295 struct intel_mipmap_tree *mt;
296
297 struct intel_region *region = calloc(1, sizeof(*region));
298 if (!region)
299 return NULL;
300
301 /* Nothing will be able to use this miptree with the BO if the offset isn't
302 * aligned.
303 */
304 if (tiling != I915_TILING_NONE)
305 assert(offset % 4096 == 0);
306
307 /* miptrees can't handle negative pitch. If you need flipping of images,
308 * that's outside of the scope of the mt.
309 */
310 assert(pitch >= 0);
311
312 mt = intel_miptree_create_layout(intel, GL_TEXTURE_2D, format,
313 0, 0,
314 width, height, 1,
315 true);
316 if (!mt)
317 return mt;
318
319 region->cpp = mt->cpp;
320 region->width = width;
321 region->height = height;
322 region->pitch = pitch;
323 region->refcount = 1;
324 drm_intel_bo_reference(bo);
325 region->bo = bo;
326 region->tiling = tiling;
327
328 mt->region = region;
329 mt->offset = offset;
330
331 return mt;
332 }
333
334
335 /**
336 * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
337 *
338 * For a multisample DRI2 buffer, this wraps the given region with
339 * a singlesample miptree, then creates a multisample miptree into which the
340 * singlesample miptree is embedded as a child.
341 */
342 struct intel_mipmap_tree*
343 intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
344 unsigned dri_attachment,
345 gl_format format,
346 struct intel_region *region)
347 {
348 struct intel_mipmap_tree *mt = NULL;
349
350 /* Only the front and back buffers, which are color buffers, are shared
351 * through DRI2.
352 */
353 assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
354 dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
355 dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
356 assert(_mesa_get_format_base_format(format) == GL_RGB ||
357 _mesa_get_format_base_format(format) == GL_RGBA);
358
359 mt = intel_miptree_create_for_bo(intel,
360 region->bo,
361 format,
362 0,
363 region->width,
364 region->height,
365 region->pitch,
366 region->tiling);
367 if (!mt)
368 return NULL;
369 mt->region->name = region->name;
370
371 return mt;
372 }
373
374 struct intel_mipmap_tree*
375 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
376 gl_format format,
377 uint32_t width,
378 uint32_t height)
379 {
380 uint32_t depth = 1;
381
382 return intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
383 width, height, depth, true,
384 INTEL_MIPTREE_TILING_ANY);
385 }
386
387 void
388 intel_miptree_reference(struct intel_mipmap_tree **dst,
389 struct intel_mipmap_tree *src)
390 {
391 if (*dst == src)
392 return;
393
394 intel_miptree_release(dst);
395
396 if (src) {
397 src->refcount++;
398 DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
399 }
400
401 *dst = src;
402 }
403
404
405 void
406 intel_miptree_release(struct intel_mipmap_tree **mt)
407 {
408 if (!*mt)
409 return;
410
411 DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
412 if (--(*mt)->refcount <= 0) {
413 GLuint i;
414
415 DBG("%s deleting %p\n", __FUNCTION__, *mt);
416
417 intel_region_release(&((*mt)->region));
418
419 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
420 free((*mt)->level[i].slice);
421 }
422
423 free(*mt);
424 }
425 *mt = NULL;
426 }
427
428 void
429 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
430 int *width, int *height, int *depth)
431 {
432 switch (image->TexObject->Target) {
433 case GL_TEXTURE_1D_ARRAY:
434 *width = image->Width;
435 *height = 1;
436 *depth = image->Height;
437 break;
438 default:
439 *width = image->Width;
440 *height = image->Height;
441 *depth = image->Depth;
442 break;
443 }
444 }
445
446 /**
447 * Can the image be pulled into a unified mipmap tree? This mirrors
448 * the completeness test in a lot of ways.
449 *
450 * Not sure whether I want to pass gl_texture_image here.
451 */
452 bool
453 intel_miptree_match_image(struct intel_mipmap_tree *mt,
454 struct gl_texture_image *image)
455 {
456 struct intel_texture_image *intelImage = intel_texture_image(image);
457 GLuint level = intelImage->base.Base.Level;
458 int width, height, depth;
459
460 /* glTexImage* choose the texture object based on the target passed in, and
461 * objects can't change targets over their lifetimes, so this should be
462 * true.
463 */
464 assert(target_to_target(image->TexObject->Target) == mt->target);
465
466 gl_format mt_format = mt->format;
467 if (mt->etc_format != MESA_FORMAT_NONE)
468 mt_format = mt->etc_format;
469
470 if (image->TexFormat != mt_format)
471 return false;
472
473 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
474
475 if (mt->target == GL_TEXTURE_CUBE_MAP)
476 depth = 6;
477
478 /* Test image dimensions against the base level image adjusted for
479 * minification. This will also catch images not present in the
480 * tree, changed targets, etc.
481 */
482 if (mt->target == GL_TEXTURE_2D_MULTISAMPLE ||
483 mt->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
484 /* nonzero level here is always bogus */
485 assert(level == 0);
486
487 if (width != mt->logical_width0 ||
488 height != mt->logical_height0 ||
489 depth != mt->logical_depth0) {
490 return false;
491 }
492 }
493 else {
494 /* all normal textures, renderbuffers, etc */
495 if (width != mt->level[level].width ||
496 height != mt->level[level].height ||
497 depth != mt->level[level].depth) {
498 return false;
499 }
500 }
501
502 return true;
503 }
504
505
506 void
507 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
508 GLuint level,
509 GLuint x, GLuint y,
510 GLuint w, GLuint h, GLuint d)
511 {
512 mt->level[level].width = w;
513 mt->level[level].height = h;
514 mt->level[level].depth = d;
515 mt->level[level].level_x = x;
516 mt->level[level].level_y = y;
517
518 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
519 level, w, h, d, x, y);
520
521 assert(mt->level[level].slice == NULL);
522
523 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
524 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
525 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
526 }
527
528
529 void
530 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
531 GLuint level, GLuint img,
532 GLuint x, GLuint y)
533 {
534 if (img == 0 && level == 0)
535 assert(x == 0 && y == 0);
536
537 assert(img < mt->level[level].depth);
538
539 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
540 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
541
542 DBG("%s level %d img %d pos %d,%d\n",
543 __FUNCTION__, level, img,
544 mt->level[level].slice[img].x_offset,
545 mt->level[level].slice[img].y_offset);
546 }
547
548 void
549 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
550 GLuint level, GLuint slice,
551 GLuint *x, GLuint *y)
552 {
553 assert(slice < mt->level[level].depth);
554
555 *x = mt->level[level].slice[slice].x_offset;
556 *y = mt->level[level].slice[slice].y_offset;
557 }
558
559 /**
560 * Rendering with tiled buffers requires that the base address of the buffer
561 * be aligned to a page boundary. For renderbuffers, and sometimes with
562 * textures, we may want the surface to point at a texture image level that
563 * isn't at a page boundary.
564 *
565 * This function returns an appropriately-aligned base offset
566 * according to the tiling restrictions, plus any required x/y offset
567 * from there.
568 */
569 uint32_t
570 intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
571 GLuint level, GLuint slice,
572 uint32_t *tile_x,
573 uint32_t *tile_y)
574 {
575 struct intel_region *region = mt->region;
576 uint32_t x, y;
577 uint32_t mask_x, mask_y;
578
579 intel_region_get_tile_masks(region, &mask_x, &mask_y, false);
580 intel_miptree_get_image_offset(mt, level, slice, &x, &y);
581
582 *tile_x = x & mask_x;
583 *tile_y = y & mask_y;
584
585 return intel_region_get_aligned_offset(region, x & ~mask_x, y & ~mask_y,
586 false);
587 }
588
589 static void
590 intel_miptree_copy_slice_sw(struct intel_context *intel,
591 struct intel_mipmap_tree *dst_mt,
592 struct intel_mipmap_tree *src_mt,
593 int level,
594 int slice,
595 int width,
596 int height)
597 {
598 void *src, *dst;
599 int src_stride, dst_stride;
600 int cpp = dst_mt->cpp;
601
602 intel_miptree_map(intel, src_mt,
603 level, slice,
604 0, 0,
605 width, height,
606 GL_MAP_READ_BIT | BRW_MAP_DIRECT_BIT,
607 &src, &src_stride);
608
609 intel_miptree_map(intel, dst_mt,
610 level, slice,
611 0, 0,
612 width, height,
613 GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT |
614 BRW_MAP_DIRECT_BIT,
615 &dst, &dst_stride);
616
617 DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
618 _mesa_get_format_name(src_mt->format),
619 src_mt, src, src_stride,
620 _mesa_get_format_name(dst_mt->format),
621 dst_mt, dst, dst_stride,
622 width, height);
623
624 int row_size = cpp * width;
625 if (src_stride == row_size &&
626 dst_stride == row_size) {
627 memcpy(dst, src, row_size * height);
628 } else {
629 for (int i = 0; i < height; i++) {
630 memcpy(dst, src, row_size);
631 dst += dst_stride;
632 src += src_stride;
633 }
634 }
635
636 intel_miptree_unmap(intel, dst_mt, level, slice);
637 intel_miptree_unmap(intel, src_mt, level, slice);
638 }
639
640 static void
641 intel_miptree_copy_slice(struct intel_context *intel,
642 struct intel_mipmap_tree *dst_mt,
643 struct intel_mipmap_tree *src_mt,
644 int level,
645 int face,
646 int depth)
647
648 {
649 gl_format format = src_mt->format;
650 uint32_t width = src_mt->level[level].width;
651 uint32_t height = src_mt->level[level].height;
652 int slice;
653
654 if (face > 0)
655 slice = face;
656 else
657 slice = depth;
658
659 assert(depth < src_mt->level[level].depth);
660 assert(src_mt->format == dst_mt->format);
661
662 if (dst_mt->compressed) {
663 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
664 width = ALIGN(width, dst_mt->align_w);
665 }
666
667 uint32_t dst_x, dst_y, src_x, src_y;
668 intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
669 intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
670
671 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
672 _mesa_get_format_name(src_mt->format),
673 src_mt, src_x, src_y, src_mt->region->pitch,
674 _mesa_get_format_name(dst_mt->format),
675 dst_mt, dst_x, dst_y, dst_mt->region->pitch,
676 width, height);
677
678 if (!intel_miptree_blit(intel,
679 src_mt, level, slice, 0, 0, false,
680 dst_mt, level, slice, 0, 0, false,
681 width, height, GL_COPY)) {
682 perf_debug("miptree validate blit for %s failed\n",
683 _mesa_get_format_name(format));
684
685 intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
686 width, height);
687 }
688 }
689
690 /**
691 * Copies the image's current data to the given miptree, and associates that
692 * miptree with the image.
693 *
694 * If \c invalidate is true, then the actual image data does not need to be
695 * copied, but the image still needs to be associated to the new miptree (this
696 * is set to true if we're about to clear the image).
697 */
698 void
699 intel_miptree_copy_teximage(struct intel_context *intel,
700 struct intel_texture_image *intelImage,
701 struct intel_mipmap_tree *dst_mt,
702 bool invalidate)
703 {
704 struct intel_mipmap_tree *src_mt = intelImage->mt;
705 struct intel_texture_object *intel_obj =
706 intel_texture_object(intelImage->base.Base.TexObject);
707 int level = intelImage->base.Base.Level;
708 int face = intelImage->base.Base.Face;
709 GLuint depth = intelImage->base.Base.Depth;
710
711 if (!invalidate) {
712 for (int slice = 0; slice < depth; slice++) {
713 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
714 }
715 }
716
717 intel_miptree_reference(&intelImage->mt, dst_mt);
718 intel_obj->needs_validate = true;
719 }
720
721 void *
722 intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
723 {
724 drm_intel_bo *bo = mt->region->bo;
725
726 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
727 if (drm_intel_bo_busy(bo)) {
728 perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
729 }
730 }
731
732 intel_flush(&intel->ctx);
733
734 if (mt->region->tiling != I915_TILING_NONE)
735 drm_intel_gem_bo_map_gtt(bo);
736 else
737 drm_intel_bo_map(bo, true);
738
739 return bo->virtual;
740 }
741
742 void
743 intel_miptree_unmap_raw(struct intel_context *intel,
744 struct intel_mipmap_tree *mt)
745 {
746 drm_intel_bo_unmap(mt->region->bo);
747 }
748
749 static void
750 intel_miptree_map_gtt(struct intel_context *intel,
751 struct intel_mipmap_tree *mt,
752 struct intel_miptree_map *map,
753 unsigned int level, unsigned int slice)
754 {
755 unsigned int bw, bh;
756 void *base;
757 unsigned int image_x, image_y;
758 int x = map->x;
759 int y = map->y;
760
761 /* For compressed formats, the stride is the number of bytes per
762 * row of blocks. intel_miptree_get_image_offset() already does
763 * the divide.
764 */
765 _mesa_get_format_block_size(mt->format, &bw, &bh);
766 assert(y % bh == 0);
767 y /= bh;
768
769 base = intel_miptree_map_raw(intel, mt) + mt->offset;
770
771 if (base == NULL)
772 map->ptr = NULL;
773 else {
774 /* Note that in the case of cube maps, the caller must have passed the
775 * slice number referencing the face.
776 */
777 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
778 x += image_x;
779 y += image_y;
780
781 map->stride = mt->region->pitch;
782 map->ptr = base + y * map->stride + x * mt->cpp;
783 }
784
785 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
786 map->x, map->y, map->w, map->h,
787 mt, _mesa_get_format_name(mt->format),
788 x, y, map->ptr, map->stride);
789 }
790
791 static void
792 intel_miptree_unmap_gtt(struct intel_context *intel,
793 struct intel_mipmap_tree *mt,
794 struct intel_miptree_map *map,
795 unsigned int level,
796 unsigned int slice)
797 {
798 intel_miptree_unmap_raw(intel, mt);
799 }
800
801 static void
802 intel_miptree_map_blit(struct intel_context *intel,
803 struct intel_mipmap_tree *mt,
804 struct intel_miptree_map *map,
805 unsigned int level, unsigned int slice)
806 {
807 map->mt = intel_miptree_create(intel, GL_TEXTURE_2D, mt->format,
808 0, 0,
809 map->w, map->h, 1,
810 false,
811 INTEL_MIPTREE_TILING_NONE);
812 if (!map->mt) {
813 fprintf(stderr, "Failed to allocate blit temporary\n");
814 goto fail;
815 }
816 map->stride = map->mt->region->pitch;
817
818 if (!intel_miptree_blit(intel,
819 mt, level, slice,
820 map->x, map->y, false,
821 map->mt, 0, 0,
822 0, 0, false,
823 map->w, map->h, GL_COPY)) {
824 fprintf(stderr, "Failed to blit\n");
825 goto fail;
826 }
827
828 intel_batchbuffer_flush(intel);
829 map->ptr = intel_miptree_map_raw(intel, map->mt);
830
831 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
832 map->x, map->y, map->w, map->h,
833 mt, _mesa_get_format_name(mt->format),
834 level, slice, map->ptr, map->stride);
835
836 return;
837
838 fail:
839 intel_miptree_release(&map->mt);
840 map->ptr = NULL;
841 map->stride = 0;
842 }
843
844 static void
845 intel_miptree_unmap_blit(struct intel_context *intel,
846 struct intel_mipmap_tree *mt,
847 struct intel_miptree_map *map,
848 unsigned int level,
849 unsigned int slice)
850 {
851 struct gl_context *ctx = &intel->ctx;
852
853 intel_miptree_unmap_raw(intel, map->mt);
854
855 if (map->mode & GL_MAP_WRITE_BIT) {
856 bool ok = intel_miptree_blit(intel,
857 map->mt, 0, 0,
858 0, 0, false,
859 mt, level, slice,
860 map->x, map->y, false,
861 map->w, map->h, GL_COPY);
862 WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
863 }
864
865 intel_miptree_release(&map->mt);
866 }
867
868 static void
869 intel_miptree_map_etc(struct intel_context *intel,
870 struct intel_mipmap_tree *mt,
871 struct intel_miptree_map *map,
872 unsigned int level,
873 unsigned int slice)
874 {
875 assert(mt->etc_format != MESA_FORMAT_NONE);
876 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) {
877 assert(mt->format == MESA_FORMAT_RGBX8888_REV);
878 }
879
880 assert(map->mode & GL_MAP_WRITE_BIT);
881 assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
882
883 map->stride = _mesa_format_row_stride(mt->etc_format, map->w);
884 map->buffer = malloc(_mesa_format_image_size(mt->etc_format,
885 map->w, map->h, 1));
886 map->ptr = map->buffer;
887 }
888
889 static void
890 intel_miptree_unmap_etc(struct intel_context *intel,
891 struct intel_mipmap_tree *mt,
892 struct intel_miptree_map *map,
893 unsigned int level,
894 unsigned int slice)
895 {
896 uint32_t image_x;
897 uint32_t image_y;
898 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
899
900 image_x += map->x;
901 image_y += map->y;
902
903 uint8_t *dst = intel_miptree_map_raw(intel, mt)
904 + image_y * mt->region->pitch
905 + image_x * mt->region->cpp;
906
907 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8)
908 _mesa_etc1_unpack_rgba8888(dst, mt->region->pitch,
909 map->ptr, map->stride,
910 map->w, map->h);
911 else
912 _mesa_unpack_etc2_format(dst, mt->region->pitch,
913 map->ptr, map->stride,
914 map->w, map->h, mt->etc_format);
915
916 intel_miptree_unmap_raw(intel, mt);
917 free(map->buffer);
918 }
919
920 /**
921 * Create and attach a map to the miptree at (level, slice). Return the
922 * attached map.
923 */
924 static struct intel_miptree_map*
925 intel_miptree_attach_map(struct intel_mipmap_tree *mt,
926 unsigned int level,
927 unsigned int slice,
928 unsigned int x,
929 unsigned int y,
930 unsigned int w,
931 unsigned int h,
932 GLbitfield mode)
933 {
934 struct intel_miptree_map *map = calloc(1, sizeof(*map));
935
936 if (!map)
937 return NULL;
938
939 assert(mt->level[level].slice[slice].map == NULL);
940 mt->level[level].slice[slice].map = map;
941
942 map->mode = mode;
943 map->x = x;
944 map->y = y;
945 map->w = w;
946 map->h = h;
947
948 return map;
949 }
950
951 /**
952 * Release the map at (level, slice).
953 */
954 static void
955 intel_miptree_release_map(struct intel_mipmap_tree *mt,
956 unsigned int level,
957 unsigned int slice)
958 {
959 struct intel_miptree_map **map;
960
961 map = &mt->level[level].slice[slice].map;
962 free(*map);
963 *map = NULL;
964 }
965
966 void
967 intel_miptree_map(struct intel_context *intel,
968 struct intel_mipmap_tree *mt,
969 unsigned int level,
970 unsigned int slice,
971 unsigned int x,
972 unsigned int y,
973 unsigned int w,
974 unsigned int h,
975 GLbitfield mode,
976 void **out_ptr,
977 int *out_stride)
978 {
979 struct intel_miptree_map *map;
980
981 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
982 if (!map){
983 *out_ptr = NULL;
984 *out_stride = 0;
985 return;
986 }
987
988 if (mt->etc_format != MESA_FORMAT_NONE &&
989 !(mode & BRW_MAP_DIRECT_BIT)) {
990 intel_miptree_map_etc(intel, mt, map, level, slice);
991 }
992 /* See intel_miptree_blit() for details on the 32k pitch limit. */
993 else if (intel->has_llc &&
994 !(mode & GL_MAP_WRITE_BIT) &&
995 !mt->compressed &&
996 (mt->region->tiling == I915_TILING_X ||
997 (intel->gen >= 6 && mt->region->tiling == I915_TILING_Y)) &&
998 mt->region->pitch < 32768) {
999 intel_miptree_map_blit(intel, mt, map, level, slice);
1000 } else if (mt->region->tiling != I915_TILING_NONE &&
1001 mt->region->bo->size >= intel->max_gtt_map_object_size) {
1002 assert(mt->region->pitch < 32768);
1003 intel_miptree_map_blit(intel, mt, map, level, slice);
1004 } else {
1005 intel_miptree_map_gtt(intel, mt, map, level, slice);
1006 }
1007
1008 *out_ptr = map->ptr;
1009 *out_stride = map->stride;
1010
1011 if (map->ptr == NULL)
1012 intel_miptree_release_map(mt, level, slice);
1013 }
1014
1015 void
1016 intel_miptree_unmap(struct intel_context *intel,
1017 struct intel_mipmap_tree *mt,
1018 unsigned int level,
1019 unsigned int slice)
1020 {
1021 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1022
1023 if (!map)
1024 return;
1025
1026 DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
1027 mt, _mesa_get_format_name(mt->format), level, slice);
1028
1029 if (mt->etc_format != MESA_FORMAT_NONE &&
1030 !(map->mode & BRW_MAP_DIRECT_BIT)) {
1031 intel_miptree_unmap_etc(intel, mt, map, level, slice);
1032 } else if (map->mt) {
1033 intel_miptree_unmap_blit(intel, mt, map, level, slice);
1034 } else {
1035 intel_miptree_unmap_gtt(intel, mt, map, level, slice);
1036 }
1037
1038 intel_miptree_release_map(mt, level, slice);
1039 }