i915: Use enum color_logic_ops for blits
[mesa.git] / src / mesa / drivers / dri / i915 / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <GL/gl.h>
29 #include <GL/internal/dri_interface.h>
30
31 #include "intel_batchbuffer.h"
32 #include "intel_chipset.h"
33 #include "intel_context.h"
34 #include "intel_mipmap_tree.h"
35 #include "intel_regions.h"
36 #include "intel_tex_layout.h"
37 #include "intel_tex.h"
38 #include "intel_blit.h"
39
40 #include "main/enums.h"
41 #include "main/formats.h"
42 #include "main/glformats.h"
43 #include "main/teximage.h"
44
45 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
46
47 static GLenum
48 target_to_target(GLenum target)
49 {
50 switch (target) {
51 case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
52 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
53 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
54 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
55 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
56 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
57 return GL_TEXTURE_CUBE_MAP_ARB;
58 default:
59 return target;
60 }
61 }
62
63 struct intel_mipmap_tree *
64 intel_miptree_create_layout(struct intel_context *intel,
65 GLenum target,
66 mesa_format format,
67 GLuint first_level,
68 GLuint last_level,
69 GLuint width0,
70 GLuint height0,
71 GLuint depth0)
72 {
73 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
74 if (!mt)
75 return NULL;
76
77 DBG("%s target %s format %s level %d..%d <-- %p\n", __func__,
78 _mesa_enum_to_string(target),
79 _mesa_get_format_name(format),
80 first_level, last_level, mt);
81
82 mt->target = target_to_target(target);
83 mt->format = format;
84 mt->first_level = first_level;
85 mt->last_level = last_level;
86
87 /* The cpp is bytes per (1, blockheight)-sized block for compressed
88 * textures. This is why you'll see divides by blockheight all over
89 */
90 unsigned bw, bh;
91 _mesa_get_format_block_size(format, &bw, &bh);
92 assert(_mesa_get_format_bytes(mt->format) % bw == 0);
93 mt->cpp = _mesa_get_format_bytes(mt->format) / bw;
94
95 mt->compressed = _mesa_is_format_compressed(format);
96 mt->refcount = 1;
97
98 if (target == GL_TEXTURE_CUBE_MAP) {
99 assert(depth0 == 1);
100 depth0 = 6;
101 }
102
103 mt->physical_width0 = width0;
104 mt->physical_height0 = height0;
105 mt->physical_depth0 = depth0;
106
107 intel_get_texture_alignment_unit(intel, mt->format,
108 &mt->align_w, &mt->align_h);
109
110 if (intel->is_945)
111 i945_miptree_layout(mt);
112 else
113 i915_miptree_layout(mt);
114
115 return mt;
116 }
117
118 /**
119 * \brief Helper function for intel_miptree_create().
120 */
121 static uint32_t
122 intel_miptree_choose_tiling(struct intel_context *intel,
123 mesa_format format,
124 uint32_t width0,
125 enum intel_miptree_tiling_mode requested,
126 struct intel_mipmap_tree *mt)
127 {
128 /* Some usages may want only one type of tiling, like depth miptrees (Y
129 * tiled), or temporary BOs for uploading data once (linear).
130 */
131 switch (requested) {
132 case INTEL_MIPTREE_TILING_ANY:
133 break;
134 case INTEL_MIPTREE_TILING_Y:
135 return I915_TILING_Y;
136 case INTEL_MIPTREE_TILING_NONE:
137 return I915_TILING_NONE;
138 }
139
140 int minimum_pitch = mt->total_width * mt->cpp;
141
142 /* If the width is much smaller than a tile, don't bother tiling. */
143 if (minimum_pitch < 64)
144 return I915_TILING_NONE;
145
146 if (ALIGN(minimum_pitch, 512) >= 32768) {
147 perf_debug("%dx%d miptree too large to blit, falling back to untiled",
148 mt->total_width, mt->total_height);
149 return I915_TILING_NONE;
150 }
151
152 /* We don't have BLORP to handle Y-tiled blits, so use X-tiling. */
153 return I915_TILING_X;
154 }
155
156 struct intel_mipmap_tree *
157 intel_miptree_create(struct intel_context *intel,
158 GLenum target,
159 mesa_format format,
160 GLuint first_level,
161 GLuint last_level,
162 GLuint width0,
163 GLuint height0,
164 GLuint depth0,
165 bool expect_accelerated_upload,
166 enum intel_miptree_tiling_mode requested_tiling)
167 {
168 struct intel_mipmap_tree *mt;
169 GLuint total_width, total_height;
170
171
172 mt = intel_miptree_create_layout(intel, target, format,
173 first_level, last_level, width0,
174 height0, depth0);
175
176 /* pitch == 0 || height == 0 indicates the null texture */
177 if (!mt || !mt->total_width || !mt->total_height) {
178 intel_miptree_release(&mt);
179 return NULL;
180 }
181
182 total_width = mt->total_width;
183 total_height = mt->total_height;
184
185 uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
186 requested_tiling,
187 mt);
188 bool y_or_x = tiling == (I915_TILING_Y | I915_TILING_X);
189
190 mt->region = intel_region_alloc(intel->intelScreen,
191 y_or_x ? I915_TILING_Y : tiling,
192 mt->cpp,
193 total_width,
194 total_height,
195 expect_accelerated_upload);
196
197 /* If the region is too large to fit in the aperture, we need to use the
198 * BLT engine to support it. The BLT paths can't currently handle Y-tiling,
199 * so we need to fall back to X.
200 */
201 if (y_or_x && mt->region->bo->size >= intel->max_gtt_map_object_size) {
202 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
203 mt->total_width, mt->total_height);
204 intel_region_release(&mt->region);
205
206 mt->region = intel_region_alloc(intel->intelScreen,
207 I915_TILING_X,
208 mt->cpp,
209 total_width,
210 total_height,
211 expect_accelerated_upload);
212 }
213
214 mt->offset = 0;
215
216 if (!mt->region) {
217 intel_miptree_release(&mt);
218 return NULL;
219 }
220
221 return mt;
222 }
223
224 struct intel_mipmap_tree *
225 intel_miptree_create_for_bo(struct intel_context *intel,
226 drm_intel_bo *bo,
227 mesa_format format,
228 uint32_t offset,
229 uint32_t width,
230 uint32_t height,
231 int pitch,
232 uint32_t tiling)
233 {
234 struct intel_mipmap_tree *mt;
235
236 struct intel_region *region = calloc(1, sizeof(*region));
237 if (!region)
238 return NULL;
239
240 /* Nothing will be able to use this miptree with the BO if the offset isn't
241 * aligned.
242 */
243 if (tiling != I915_TILING_NONE)
244 assert(offset % 4096 == 0);
245
246 /* miptrees can't handle negative pitch. If you need flipping of images,
247 * that's outside of the scope of the mt.
248 */
249 assert(pitch >= 0);
250
251 mt = intel_miptree_create_layout(intel, GL_TEXTURE_2D, format,
252 0, 0,
253 width, height, 1);
254 if (!mt) {
255 free(region);
256 return mt;
257 }
258
259 region->cpp = mt->cpp;
260 region->width = width;
261 region->height = height;
262 region->pitch = pitch;
263 region->refcount = 1;
264 drm_intel_bo_reference(bo);
265 region->bo = bo;
266 region->tiling = tiling;
267
268 mt->region = region;
269 mt->offset = offset;
270
271 return mt;
272 }
273
274
275 /**
276 * Wraps the given region with a miptree.
277 */
278 struct intel_mipmap_tree *
279 intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
280 unsigned dri_attachment,
281 mesa_format format,
282 struct intel_region *region)
283 {
284 struct intel_mipmap_tree *mt = NULL;
285
286 /* Only the front and back buffers, which are color buffers, are shared
287 * through DRI2.
288 */
289 assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
290 dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
291 dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
292 assert(_mesa_get_format_base_format(format) == GL_RGB ||
293 _mesa_get_format_base_format(format) == GL_RGBA);
294
295 mt = intel_miptree_create_for_bo(intel,
296 region->bo,
297 format,
298 0,
299 region->width,
300 region->height,
301 region->pitch,
302 region->tiling);
303 if (!mt)
304 return NULL;
305 mt->region->name = region->name;
306
307 return mt;
308 }
309
310 /**
311 * Wraps the given region with a miptree.
312 */
313 struct intel_mipmap_tree *
314 intel_miptree_create_for_image_buffer(struct intel_context *intel,
315 enum __DRIimageBufferMask buffer_type,
316 mesa_format format,
317 uint32_t num_samples,
318 struct intel_region *region)
319 {
320 struct intel_mipmap_tree *mt = NULL;
321
322 /* Only the front and back buffers, which are color buffers, are allocated
323 * through the image loader.
324 */
325 assert(_mesa_get_format_base_format(format) == GL_RGB ||
326 _mesa_get_format_base_format(format) == GL_RGBA);
327
328 mt = intel_miptree_create_for_bo(intel,
329 region->bo,
330 format,
331 0,
332 region->width,
333 region->height,
334 region->pitch,
335 region->tiling);
336 return mt;
337 }
338
339 struct intel_mipmap_tree *
340 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
341 mesa_format format,
342 uint32_t width,
343 uint32_t height)
344 {
345 uint32_t depth = 1;
346
347 return intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
348 width, height, depth, true,
349 INTEL_MIPTREE_TILING_ANY);
350 }
351
352 void
353 intel_miptree_reference(struct intel_mipmap_tree **dst,
354 struct intel_mipmap_tree *src)
355 {
356 if (*dst == src)
357 return;
358
359 intel_miptree_release(dst);
360
361 if (src) {
362 src->refcount++;
363 DBG("%s %p refcount now %d\n", __func__, src, src->refcount);
364 }
365
366 *dst = src;
367 }
368
369
370 void
371 intel_miptree_release(struct intel_mipmap_tree **mt)
372 {
373 if (!*mt)
374 return;
375
376 DBG("%s %p refcount will be %d\n", __func__, *mt, (*mt)->refcount - 1);
377 if (--(*mt)->refcount <= 0) {
378 GLuint i;
379
380 DBG("%s deleting %p\n", __func__, *mt);
381
382 intel_region_release(&((*mt)->region));
383
384 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
385 free((*mt)->level[i].slice);
386 }
387
388 free(*mt);
389 }
390 *mt = NULL;
391 }
392
393 void
394 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
395 int *width, int *height, int *depth)
396 {
397 switch (image->TexObject->Target) {
398 default:
399 *width = image->Width;
400 *height = image->Height;
401 *depth = image->Depth;
402 break;
403 }
404 }
405
406 /**
407 * Can the image be pulled into a unified mipmap tree? This mirrors
408 * the completeness test in a lot of ways.
409 *
410 * Not sure whether I want to pass gl_texture_image here.
411 */
412 bool
413 intel_miptree_match_image(struct intel_mipmap_tree *mt,
414 struct gl_texture_image *image)
415 {
416 struct intel_texture_image *intelImage = intel_texture_image(image);
417 GLuint level = intelImage->base.Base.Level;
418 int width, height, depth;
419
420 /* glTexImage* choose the texture object based on the target passed in, and
421 * objects can't change targets over their lifetimes, so this should be
422 * true.
423 */
424 assert(target_to_target(image->TexObject->Target) == mt->target);
425
426 mesa_format mt_format = mt->format;
427
428 if (image->TexFormat != mt_format)
429 return false;
430
431 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
432
433 if (mt->target == GL_TEXTURE_CUBE_MAP)
434 depth = 6;
435
436 /* Test image dimensions against the base level image adjusted for
437 * minification. This will also catch images not present in the
438 * tree, changed targets, etc.
439 */
440 if (width != mt->level[level].width ||
441 height != mt->level[level].height ||
442 depth != mt->level[level].depth) {
443 return false;
444 }
445
446 return true;
447 }
448
449
450 void
451 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
452 GLuint level,
453 GLuint x, GLuint y,
454 GLuint w, GLuint h, GLuint d)
455 {
456 mt->level[level].width = w;
457 mt->level[level].height = h;
458 mt->level[level].depth = d;
459 mt->level[level].level_x = x;
460 mt->level[level].level_y = y;
461
462 DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __func__,
463 level, w, h, d, x, y);
464
465 assert(mt->level[level].slice == NULL);
466
467 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
468 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
469 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
470 }
471
472
473 void
474 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
475 GLuint level, GLuint img,
476 GLuint x, GLuint y)
477 {
478 if (img == 0 && level == 0)
479 assert(x == 0 && y == 0);
480
481 assert(img < mt->level[level].depth);
482
483 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
484 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
485
486 DBG("%s level %d img %d pos %d,%d\n",
487 __func__, level, img,
488 mt->level[level].slice[img].x_offset,
489 mt->level[level].slice[img].y_offset);
490 }
491
492 void
493 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
494 GLuint level, GLuint slice,
495 GLuint *x, GLuint *y)
496 {
497 assert(slice < mt->level[level].depth);
498
499 *x = mt->level[level].slice[slice].x_offset;
500 *y = mt->level[level].slice[slice].y_offset;
501 }
502
503 /**
504 * Rendering with tiled buffers requires that the base address of the buffer
505 * be aligned to a page boundary. For renderbuffers, and sometimes with
506 * textures, we may want the surface to point at a texture image level that
507 * isn't at a page boundary.
508 *
509 * This function returns an appropriately-aligned base offset
510 * according to the tiling restrictions, plus any required x/y offset
511 * from there.
512 */
513 uint32_t
514 intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
515 GLuint level, GLuint slice,
516 uint32_t *tile_x,
517 uint32_t *tile_y)
518 {
519 struct intel_region *region = mt->region;
520 uint32_t x, y;
521 uint32_t mask_x, mask_y;
522
523 intel_region_get_tile_masks(region, &mask_x, &mask_y);
524 intel_miptree_get_image_offset(mt, level, slice, &x, &y);
525
526 *tile_x = x & mask_x;
527 *tile_y = y & mask_y;
528
529 return intel_region_get_aligned_offset(region, x & ~mask_x, y & ~mask_y);
530 }
531
532 static void
533 intel_miptree_copy_slice_sw(struct intel_context *intel,
534 struct intel_mipmap_tree *dst_mt,
535 struct intel_mipmap_tree *src_mt,
536 int level,
537 int slice,
538 int width,
539 int height)
540 {
541 void *src, *dst;
542 int src_stride, dst_stride;
543 int cpp = dst_mt->cpp;
544
545 intel_miptree_map(intel, src_mt,
546 level, slice,
547 0, 0,
548 width, height,
549 GL_MAP_READ_BIT,
550 &src, &src_stride);
551
552 intel_miptree_map(intel, dst_mt,
553 level, slice,
554 0, 0,
555 width, height,
556 GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT,
557 &dst, &dst_stride);
558
559 DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
560 _mesa_get_format_name(src_mt->format),
561 src_mt, src, src_stride,
562 _mesa_get_format_name(dst_mt->format),
563 dst_mt, dst, dst_stride,
564 width, height);
565
566 int row_size = cpp * width;
567 if (src_stride == row_size &&
568 dst_stride == row_size) {
569 memcpy(dst, src, row_size * height);
570 } else {
571 for (int i = 0; i < height; i++) {
572 memcpy(dst, src, row_size);
573 dst += dst_stride;
574 src += src_stride;
575 }
576 }
577
578 intel_miptree_unmap(intel, dst_mt, level, slice);
579 intel_miptree_unmap(intel, src_mt, level, slice);
580 }
581
582 static void
583 intel_miptree_copy_slice(struct intel_context *intel,
584 struct intel_mipmap_tree *dst_mt,
585 struct intel_mipmap_tree *src_mt,
586 int level,
587 int face,
588 int depth)
589
590 {
591 mesa_format format = src_mt->format;
592 uint32_t width = src_mt->level[level].width;
593 uint32_t height = src_mt->level[level].height;
594 int slice;
595
596 if (face > 0)
597 slice = face;
598 else
599 slice = depth;
600
601 assert(depth < src_mt->level[level].depth);
602 assert(src_mt->format == dst_mt->format);
603
604 if (dst_mt->compressed) {
605 height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
606 width = ALIGN(width, dst_mt->align_w);
607 }
608
609 uint32_t dst_x, dst_y, src_x, src_y;
610 intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
611 intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
612
613 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
614 _mesa_get_format_name(src_mt->format),
615 src_mt, src_x, src_y, src_mt->region->pitch,
616 _mesa_get_format_name(dst_mt->format),
617 dst_mt, dst_x, dst_y, dst_mt->region->pitch,
618 width, height);
619
620 if (!intel_miptree_blit(intel,
621 src_mt, level, slice, 0, 0, false,
622 dst_mt, level, slice, 0, 0, false,
623 width, height, COLOR_LOGICOP_COPY)) {
624 perf_debug("miptree validate blit for %s failed\n",
625 _mesa_get_format_name(format));
626
627 intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
628 width, height);
629 }
630 }
631
632 /**
633 * Copies the image's current data to the given miptree, and associates that
634 * miptree with the image.
635 *
636 * If \c invalidate is true, then the actual image data does not need to be
637 * copied, but the image still needs to be associated to the new miptree (this
638 * is set to true if we're about to clear the image).
639 */
640 void
641 intel_miptree_copy_teximage(struct intel_context *intel,
642 struct intel_texture_image *intelImage,
643 struct intel_mipmap_tree *dst_mt,
644 bool invalidate)
645 {
646 struct intel_mipmap_tree *src_mt = intelImage->mt;
647 struct intel_texture_object *intel_obj =
648 intel_texture_object(intelImage->base.Base.TexObject);
649 int level = intelImage->base.Base.Level;
650 int face = intelImage->base.Base.Face;
651 GLuint depth = intelImage->base.Base.Depth;
652
653 if (!invalidate) {
654 for (int slice = 0; slice < depth; slice++) {
655 intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
656 }
657 }
658
659 intel_miptree_reference(&intelImage->mt, dst_mt);
660 intel_obj->needs_validate = true;
661 }
662
663 void *
664 intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
665 {
666 drm_intel_bo *bo = mt->region->bo;
667
668 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
669 if (drm_intel_bo_busy(bo)) {
670 perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
671 }
672 }
673
674 intel_flush(&intel->ctx);
675
676 if (mt->region->tiling != I915_TILING_NONE)
677 drm_intel_gem_bo_map_gtt(bo);
678 else
679 drm_intel_bo_map(bo, true);
680
681 return bo->virtual;
682 }
683
684 void
685 intel_miptree_unmap_raw(struct intel_mipmap_tree *mt)
686 {
687 drm_intel_bo_unmap(mt->region->bo);
688 }
689
690 static void
691 intel_miptree_map_gtt(struct intel_context *intel,
692 struct intel_mipmap_tree *mt,
693 struct intel_miptree_map *map,
694 unsigned int level, unsigned int slice)
695 {
696 unsigned int bw, bh;
697 void *base;
698 unsigned int image_x, image_y;
699 int x = map->x;
700 int y = map->y;
701
702 /* For compressed formats, the stride is the number of bytes per
703 * row of blocks. intel_miptree_get_image_offset() already does
704 * the divide.
705 */
706 _mesa_get_format_block_size(mt->format, &bw, &bh);
707 assert(y % bh == 0);
708 y /= bh;
709
710 base = intel_miptree_map_raw(intel, mt) + mt->offset;
711
712 if (base == NULL)
713 map->ptr = NULL;
714 else {
715 /* Note that in the case of cube maps, the caller must have passed the
716 * slice number referencing the face.
717 */
718 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
719 x += image_x;
720 y += image_y;
721
722 map->stride = mt->region->pitch;
723 map->ptr = base + y * map->stride + x * mt->cpp;
724 }
725
726 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
727 map->x, map->y, map->w, map->h,
728 mt, _mesa_get_format_name(mt->format),
729 x, y, map->ptr, map->stride);
730 }
731
732 static void
733 intel_miptree_unmap_gtt(struct intel_mipmap_tree *mt)
734 {
735 intel_miptree_unmap_raw(mt);
736 }
737
738 static void
739 intel_miptree_map_blit(struct intel_context *intel,
740 struct intel_mipmap_tree *mt,
741 struct intel_miptree_map *map,
742 unsigned int level, unsigned int slice)
743 {
744 map->mt = intel_miptree_create(intel, GL_TEXTURE_2D, mt->format,
745 0, 0,
746 map->w, map->h, 1,
747 false,
748 INTEL_MIPTREE_TILING_NONE);
749 if (!map->mt) {
750 fprintf(stderr, "Failed to allocate blit temporary\n");
751 goto fail;
752 }
753 map->stride = map->mt->region->pitch;
754
755 if (!intel_miptree_blit(intel,
756 mt, level, slice,
757 map->x, map->y, false,
758 map->mt, 0, 0,
759 0, 0, false,
760 map->w, map->h, COLOR_LOGICOP_COPY)) {
761 fprintf(stderr, "Failed to blit\n");
762 goto fail;
763 }
764
765 intel_batchbuffer_flush(intel);
766 map->ptr = intel_miptree_map_raw(intel, map->mt);
767
768 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
769 map->x, map->y, map->w, map->h,
770 mt, _mesa_get_format_name(mt->format),
771 level, slice, map->ptr, map->stride);
772
773 return;
774
775 fail:
776 intel_miptree_release(&map->mt);
777 map->ptr = NULL;
778 map->stride = 0;
779 }
780
781 static void
782 intel_miptree_unmap_blit(struct intel_context *intel,
783 struct intel_mipmap_tree *mt,
784 struct intel_miptree_map *map,
785 unsigned int level,
786 unsigned int slice)
787 {
788 struct gl_context *ctx = &intel->ctx;
789
790 intel_miptree_unmap_raw(map->mt);
791
792 if (map->mode & GL_MAP_WRITE_BIT) {
793 bool ok = intel_miptree_blit(intel,
794 map->mt, 0, 0,
795 0, 0, false,
796 mt, level, slice,
797 map->x, map->y, false,
798 map->w, map->h, COLOR_LOGICOP_COPY);
799 WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
800 }
801
802 intel_miptree_release(&map->mt);
803 }
804
805 /**
806 * Create and attach a map to the miptree at (level, slice). Return the
807 * attached map.
808 */
809 static struct intel_miptree_map*
810 intel_miptree_attach_map(struct intel_mipmap_tree *mt,
811 unsigned int level,
812 unsigned int slice,
813 unsigned int x,
814 unsigned int y,
815 unsigned int w,
816 unsigned int h,
817 GLbitfield mode)
818 {
819 struct intel_miptree_map *map = calloc(1, sizeof(*map));
820
821 if (!map)
822 return NULL;
823
824 assert(mt->level[level].slice[slice].map == NULL);
825 mt->level[level].slice[slice].map = map;
826
827 map->mode = mode;
828 map->x = x;
829 map->y = y;
830 map->w = w;
831 map->h = h;
832
833 return map;
834 }
835
836 /**
837 * Release the map at (level, slice).
838 */
839 static void
840 intel_miptree_release_map(struct intel_mipmap_tree *mt,
841 unsigned int level,
842 unsigned int slice)
843 {
844 struct intel_miptree_map **map;
845
846 map = &mt->level[level].slice[slice].map;
847 free(*map);
848 *map = NULL;
849 }
850
851 void
852 intel_miptree_map(struct intel_context *intel,
853 struct intel_mipmap_tree *mt,
854 unsigned int level,
855 unsigned int slice,
856 unsigned int x,
857 unsigned int y,
858 unsigned int w,
859 unsigned int h,
860 GLbitfield mode,
861 void **out_ptr,
862 int *out_stride)
863 {
864 struct intel_miptree_map *map;
865
866 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
867 if (!map) {
868 *out_ptr = NULL;
869 *out_stride = 0;
870 return;
871 }
872
873 /* See intel_miptree_blit() for details on the 32k pitch limit. */
874 if (mt->region->tiling != I915_TILING_NONE &&
875 mt->region->bo->size >= intel->max_gtt_map_object_size) {
876 assert(mt->region->pitch < 32768);
877 intel_miptree_map_blit(intel, mt, map, level, slice);
878 } else {
879 intel_miptree_map_gtt(intel, mt, map, level, slice);
880 }
881
882 *out_ptr = map->ptr;
883 *out_stride = map->stride;
884
885 if (map->ptr == NULL)
886 intel_miptree_release_map(mt, level, slice);
887 }
888
889 void
890 intel_miptree_unmap(struct intel_context *intel,
891 struct intel_mipmap_tree *mt,
892 unsigned int level,
893 unsigned int slice)
894 {
895 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
896
897 if (!map)
898 return;
899
900 DBG("%s: mt %p (%s) level %d slice %d\n", __func__,
901 mt, _mesa_get_format_name(mt->format), level, slice);
902
903 if (map->mt) {
904 intel_miptree_unmap_blit(intel, mt, map, level, slice);
905 } else {
906 intel_miptree_unmap_gtt(mt);
907 }
908
909 intel_miptree_release_map(mt, level, slice);
910 }