i965: Push miptree tiling request into flags
[mesa.git] / src / mesa / drivers / dri / i965 / intel_mipmap_tree.c
1 /**************************************************************************
2 *
3 * Copyright 2006 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <GL/gl.h>
29 #include <GL/internal/dri_interface.h>
30
31 #include "intel_batchbuffer.h"
32 #include "intel_mipmap_tree.h"
33 #include "intel_resolve_map.h"
34 #include "intel_tex.h"
35 #include "intel_blit.h"
36 #include "intel_fbo.h"
37
38 #include "brw_blorp.h"
39 #include "brw_context.h"
40
41 #include "main/enums.h"
42 #include "main/fbobject.h"
43 #include "main/formats.h"
44 #include "main/glformats.h"
45 #include "main/texcompress_etc.h"
46 #include "main/teximage.h"
47 #include "main/streaming-load-memcpy.h"
48 #include "x86/common_x86_asm.h"
49
50 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
51
52 static bool
53 intel_miptree_alloc_mcs(struct brw_context *brw,
54 struct intel_mipmap_tree *mt,
55 GLuint num_samples);
56
57 /**
58 * Determine which MSAA layout should be used by the MSAA surface being
59 * created, based on the chip generation and the surface type.
60 */
61 static enum intel_msaa_layout
62 compute_msaa_layout(struct brw_context *brw, mesa_format format, GLenum target,
63 bool disable_aux_buffers)
64 {
65 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
66 if (brw->gen < 7)
67 return INTEL_MSAA_LAYOUT_IMS;
68
69 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
70 switch (_mesa_get_format_base_format(format)) {
71 case GL_DEPTH_COMPONENT:
72 case GL_STENCIL_INDEX:
73 case GL_DEPTH_STENCIL:
74 return INTEL_MSAA_LAYOUT_IMS;
75 default:
76 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
77 *
78 * This field must be set to 0 for all SINT MSRTs when all RT channels
79 * are not written
80 *
81 * In practice this means that we have to disable MCS for all signed
82 * integer MSAA buffers. The alternative, to disable MCS only when one
83 * of the render target channels is disabled, is impractical because it
84 * would require converting between CMS and UMS MSAA layouts on the fly,
85 * which is expensive.
86 */
87 if (brw->gen == 7 && _mesa_get_format_datatype(format) == GL_INT) {
88 return INTEL_MSAA_LAYOUT_UMS;
89 } else if (disable_aux_buffers) {
90 /* We can't use the CMS layout because it uses an aux buffer, the MCS
91 * buffer. So fallback to UMS, which is identical to CMS without the
92 * MCS. */
93 return INTEL_MSAA_LAYOUT_UMS;
94 } else {
95 return INTEL_MSAA_LAYOUT_CMS;
96 }
97 }
98 }
99
100
101 /**
102 * For single-sampled render targets ("non-MSRT"), the MCS buffer is a
103 * scaled-down bitfield representation of the color buffer which is capable of
104 * recording when blocks of the color buffer are equal to the clear value.
105 * This function returns the block size that will be used by the MCS buffer
106 * corresponding to a certain color miptree.
107 *
108 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
109 * beneath the "Fast Color Clear" bullet (p327):
110 *
111 * The following table describes the RT alignment
112 *
113 * Pixels Lines
114 * TiledY RT CL
115 * bpp
116 * 32 8 4
117 * 64 4 4
118 * 128 2 4
119 * TiledX RT CL
120 * bpp
121 * 32 16 2
122 * 64 8 2
123 * 128 4 2
124 *
125 * This alignment has the following uses:
126 *
127 * - For figuring out the size of the MCS buffer. Each 4k tile in the MCS
128 * buffer contains 128 blocks horizontally and 256 blocks vertically.
129 *
130 * - For figuring out alignment restrictions for a fast clear operation. Fast
131 * clear operations must always clear aligned multiples of 16 blocks
132 * horizontally and 32 blocks vertically.
133 *
134 * - For scaling down the coordinates sent through the render pipeline during
135 * a fast clear. X coordinates must be scaled down by 8 times the block
136 * width, and Y coordinates by 16 times the block height.
137 *
138 * - For scaling down the coordinates sent through the render pipeline during
139 * a "Render Target Resolve" operation. X coordinates must be scaled down
140 * by half the block width, and Y coordinates by half the block height.
141 */
142 void
143 intel_get_non_msrt_mcs_alignment(struct brw_context *brw,
144 struct intel_mipmap_tree *mt,
145 unsigned *width_px, unsigned *height)
146 {
147 switch (mt->tiling) {
148 default:
149 unreachable("Non-MSRT MCS requires X or Y tiling");
150 /* In release builds, fall through */
151 case I915_TILING_Y:
152 *width_px = 32 / mt->cpp;
153 *height = 4;
154 break;
155 case I915_TILING_X:
156 *width_px = 64 / mt->cpp;
157 *height = 2;
158 }
159 }
160
161 bool
162 intel_tiling_supports_non_msrt_mcs(struct brw_context *brw, unsigned tiling)
163 {
164 /* From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render
165 * Target(s)", beneath the "Fast Color Clear" bullet (p326):
166 *
167 * - Support is limited to tiled render targets.
168 *
169 * Gen9 changes the restriction to Y-tile only.
170 */
171 if (brw->gen >= 9)
172 return tiling == I915_TILING_Y;
173 else if (brw->gen >= 7)
174 return tiling != I915_TILING_NONE;
175 else
176 return false;
177 }
178
179 /**
180 * For a single-sampled render target ("non-MSRT"), determine if an MCS buffer
181 * can be used. This doesn't (and should not) inspect any of the properties of
182 * the miptree's BO.
183 *
184 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
185 * beneath the "Fast Color Clear" bullet (p326):
186 *
187 * - Support is for non-mip-mapped and non-array surface types only.
188 *
189 * And then later, on p327:
190 *
191 * - MCS buffer for non-MSRT is supported only for RT formats 32bpp,
192 * 64bpp, and 128bpp.
193 */
194 bool
195 intel_miptree_is_fast_clear_capable(struct brw_context *brw,
196 struct intel_mipmap_tree *mt)
197 {
198 /* MCS support does not exist prior to Gen7 */
199 if (brw->gen < 7)
200 return false;
201
202 if (mt->disable_aux_buffers)
203 return false;
204
205 /* MCS is only supported for color buffers */
206 switch (_mesa_get_format_base_format(mt->format)) {
207 case GL_DEPTH_COMPONENT:
208 case GL_DEPTH_STENCIL:
209 case GL_STENCIL_INDEX:
210 return false;
211 }
212
213 if (mt->cpp != 4 && mt->cpp != 8 && mt->cpp != 16)
214 return false;
215 if (mt->first_level != 0 || mt->last_level != 0) {
216 if (brw->gen >= 8) {
217 perf_debug("Multi-LOD fast clear - giving up (%dx%dx%d).\n",
218 mt->logical_width0, mt->logical_height0, mt->last_level);
219 }
220
221 return false;
222 }
223 if (mt->physical_depth0 != 1) {
224 if (brw->gen >= 8) {
225 perf_debug("Layered fast clear - giving up. (%dx%d%d)\n",
226 mt->logical_width0, mt->logical_height0,
227 mt->physical_depth0);
228 }
229
230 return false;
231 }
232
233 /* There's no point in using an MCS buffer if the surface isn't in a
234 * renderable format.
235 */
236 if (!brw->format_supported_as_render_target[mt->format])
237 return false;
238
239 return true;
240 }
241
242
243 /**
244 * Determine depth format corresponding to a depth+stencil format,
245 * for separate stencil.
246 */
247 mesa_format
248 intel_depth_format_for_depthstencil_format(mesa_format format) {
249 switch (format) {
250 case MESA_FORMAT_Z24_UNORM_S8_UINT:
251 return MESA_FORMAT_Z24_UNORM_X8_UINT;
252 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT:
253 return MESA_FORMAT_Z_FLOAT32;
254 default:
255 return format;
256 }
257 }
258
259
260 /**
261 * @param for_bo Indicates that the caller is
262 * intel_miptree_create_for_bo(). If true, then do not create
263 * \c stencil_mt.
264 */
265 static struct intel_mipmap_tree *
266 intel_miptree_create_layout(struct brw_context *brw,
267 GLenum target,
268 mesa_format format,
269 GLuint first_level,
270 GLuint last_level,
271 GLuint width0,
272 GLuint height0,
273 GLuint depth0,
274 GLuint num_samples,
275 uint32_t layout_flags)
276 {
277 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
278 if (!mt)
279 return NULL;
280
281 DBG("%s target %s format %s level %d..%d slices %d <-- %p\n", __func__,
282 _mesa_lookup_enum_by_nr(target),
283 _mesa_get_format_name(format),
284 first_level, last_level, depth0, mt);
285
286 if (target == GL_TEXTURE_1D_ARRAY) {
287 /* For a 1D Array texture the OpenGL API will treat the height0
288 * parameter as the number of array slices. For Intel hardware, we treat
289 * the 1D array as a 2D Array with a height of 1.
290 *
291 * So, when we first come through this path to create a 1D Array
292 * texture, height0 stores the number of slices, and depth0 is 1. In
293 * this case, we want to swap height0 and depth0.
294 *
295 * Since some miptrees will be created based on the base miptree, we may
296 * come through this path and see height0 as 1 and depth0 being the
297 * number of slices. In this case we don't need to do the swap.
298 */
299 assert(height0 == 1 || depth0 == 1);
300 if (height0 > 1) {
301 depth0 = height0;
302 height0 = 1;
303 }
304 }
305
306 mt->target = target;
307 mt->format = format;
308 mt->first_level = first_level;
309 mt->last_level = last_level;
310 mt->logical_width0 = width0;
311 mt->logical_height0 = height0;
312 mt->logical_depth0 = depth0;
313 mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_NO_MCS;
314 mt->disable_aux_buffers = (layout_flags & MIPTREE_LAYOUT_DISABLE_AUX) != 0;
315 exec_list_make_empty(&mt->hiz_map);
316
317 /* The cpp is bytes per (1, blockheight)-sized block for compressed
318 * textures. This is why you'll see divides by blockheight all over
319 */
320 unsigned bw, bh;
321 _mesa_get_format_block_size(format, &bw, &bh);
322 assert(_mesa_get_format_bytes(mt->format) % bw == 0);
323 mt->cpp = _mesa_get_format_bytes(mt->format) / bw;
324
325 mt->num_samples = num_samples;
326 mt->compressed = _mesa_is_format_compressed(format);
327 mt->msaa_layout = INTEL_MSAA_LAYOUT_NONE;
328 mt->refcount = 1;
329
330 if (num_samples > 1) {
331 /* Adjust width/height/depth for MSAA */
332 mt->msaa_layout = compute_msaa_layout(brw, format,
333 mt->target, mt->disable_aux_buffers);
334 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
335 /* From the Ivybridge PRM, Volume 1, Part 1, page 108:
336 * "If the surface is multisampled and it is a depth or stencil
337 * surface or Multisampled Surface StorageFormat in SURFACE_STATE is
338 * MSFMT_DEPTH_STENCIL, WL and HL must be adjusted as follows before
339 * proceeding:
340 *
341 * +----------------------------------------------------------------+
342 * | Num Multisamples | W_l = | H_l = |
343 * +----------------------------------------------------------------+
344 * | 2 | ceiling(W_l / 2) * 4 | H_l (no adjustment) |
345 * | 4 | ceiling(W_l / 2) * 4 | ceiling(H_l / 2) * 4 |
346 * | 8 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 4 |
347 * | 16 | ceiling(W_l / 2) * 8 | ceiling(H_l / 2) * 8 |
348 * +----------------------------------------------------------------+
349 * "
350 *
351 * Note that MSFMT_DEPTH_STENCIL just means the IMS (interleaved)
352 * format rather than UMS/CMS (array slices). The Sandybridge PRM,
353 * Volume 1, Part 1, Page 111 has the same formula for 4x MSAA.
354 *
355 * Another more complicated explanation for these adjustments comes
356 * from the Sandybridge PRM, volume 4, part 1, page 31:
357 *
358 * "Any of the other messages (sample*, LOD, load4) used with a
359 * (4x) multisampled surface will in-effect sample a surface with
360 * double the height and width as that indicated in the surface
361 * state. Each pixel position on the original-sized surface is
362 * replaced with a 2x2 of samples with the following arrangement:
363 *
364 * sample 0 sample 2
365 * sample 1 sample 3"
366 *
367 * Thus, when sampling from a multisampled texture, it behaves as
368 * though the layout in memory for (x,y,sample) is:
369 *
370 * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
371 * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
372 *
373 * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
374 * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
375 *
376 * However, the actual layout of multisampled data in memory is:
377 *
378 * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
379 * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
380 *
381 * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
382 * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
383 *
384 * This pattern repeats for each 2x2 pixel block.
385 *
386 * As a result, when calculating the size of our 4-sample buffer for
387 * an odd width or height, we have to align before scaling up because
388 * sample 3 is in that bottom right 2x2 block.
389 */
390 switch (num_samples) {
391 case 2:
392 assert(brw->gen >= 8);
393 width0 = ALIGN(width0, 2) * 2;
394 height0 = ALIGN(height0, 2);
395 break;
396 case 4:
397 width0 = ALIGN(width0, 2) * 2;
398 height0 = ALIGN(height0, 2) * 2;
399 break;
400 case 8:
401 width0 = ALIGN(width0, 2) * 4;
402 height0 = ALIGN(height0, 2) * 2;
403 break;
404 default:
405 /* num_samples should already have been quantized to 0, 1, 2, 4, or
406 * 8.
407 */
408 unreachable("not reached");
409 }
410 } else {
411 /* Non-interleaved */
412 depth0 *= num_samples;
413 }
414 }
415
416 /* Set array_layout to ALL_SLICES_AT_EACH_LOD when array_spacing_lod0 can
417 * be used. array_spacing_lod0 is only used for non-IMS MSAA surfaces on
418 * Gen 7 and 8. On Gen 8 and 9 this layout is not available but it is still
419 * used on Gen8 to make it pick a qpitch value which doesn't include space
420 * for the mipmaps. On Gen9 this is not necessary because it will
421 * automatically pick a packed qpitch value whenever mt->first_level ==
422 * mt->last_level.
423 * TODO: can we use it elsewhere?
424 * TODO: also disable this on Gen8 and pick the qpitch value like Gen9
425 */
426 if (brw->gen >= 9) {
427 mt->array_layout = ALL_LOD_IN_EACH_SLICE;
428 } else {
429 switch (mt->msaa_layout) {
430 case INTEL_MSAA_LAYOUT_NONE:
431 case INTEL_MSAA_LAYOUT_IMS:
432 mt->array_layout = ALL_LOD_IN_EACH_SLICE;
433 break;
434 case INTEL_MSAA_LAYOUT_UMS:
435 case INTEL_MSAA_LAYOUT_CMS:
436 mt->array_layout = ALL_SLICES_AT_EACH_LOD;
437 break;
438 }
439 }
440
441 if (target == GL_TEXTURE_CUBE_MAP) {
442 assert(depth0 == 1);
443 depth0 = 6;
444 }
445
446 mt->physical_width0 = width0;
447 mt->physical_height0 = height0;
448 mt->physical_depth0 = depth0;
449
450 if (!(layout_flags & MIPTREE_LAYOUT_FOR_BO) &&
451 _mesa_get_format_base_format(format) == GL_DEPTH_STENCIL &&
452 (brw->must_use_separate_stencil ||
453 (brw->has_separate_stencil &&
454 intel_miptree_wants_hiz_buffer(brw, mt)))) {
455 uint32_t stencil_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
456 if (brw->gen == 6) {
457 stencil_flags |= MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD |
458 MIPTREE_LAYOUT_ALLOC_ANY_TILED;
459 }
460
461 mt->stencil_mt = intel_miptree_create(brw,
462 mt->target,
463 MESA_FORMAT_S_UINT8,
464 mt->first_level,
465 mt->last_level,
466 mt->logical_width0,
467 mt->logical_height0,
468 mt->logical_depth0,
469 num_samples,
470 stencil_flags);
471
472 if (!mt->stencil_mt) {
473 intel_miptree_release(&mt);
474 return NULL;
475 }
476
477 /* Fix up the Z miptree format for how we're splitting out separate
478 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
479 */
480 mt->format = intel_depth_format_for_depthstencil_format(mt->format);
481 mt->cpp = 4;
482
483 if (format == mt->format) {
484 _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
485 _mesa_get_format_name(mt->format));
486 }
487 }
488
489 if (layout_flags & MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD)
490 mt->array_layout = ALL_SLICES_AT_EACH_LOD;
491
492 /*
493 * Obey HALIGN_16 constraints for Gen8 and Gen9 buffers which are
494 * multisampled or have an AUX buffer attached to it.
495 *
496 * GEN | MSRT | AUX_CCS_* or AUX_MCS
497 * -------------------------------------------
498 * 9 | HALIGN_16 | HALIGN_16
499 * 8 | HALIGN_ANY | HALIGN_16
500 * 7 | ? | ?
501 * 6 | ? | ?
502 */
503 if (intel_miptree_is_fast_clear_capable(brw, mt)) {
504 if (brw->gen >= 9 || (brw->gen == 8 && num_samples <= 1))
505 layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
506 } else if (brw->gen >= 9 && num_samples > 1) {
507 layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
508 } else {
509 /* For now, nothing else has this requirement */
510 assert((layout_flags & MIPTREE_LAYOUT_FORCE_HALIGN16) == 0);
511 }
512
513 brw_miptree_layout(brw, mt, layout_flags);
514
515 if (mt->disable_aux_buffers)
516 assert(mt->msaa_layout != INTEL_MSAA_LAYOUT_CMS);
517
518 return mt;
519 }
520
521
522 /**
523 * Choose an appropriate uncompressed format for a requested
524 * compressed format, if unsupported.
525 */
526 mesa_format
527 intel_lower_compressed_format(struct brw_context *brw, mesa_format format)
528 {
529 /* No need to lower ETC formats on these platforms,
530 * they are supported natively.
531 */
532 if (brw->gen >= 8 || brw->is_baytrail)
533 return format;
534
535 switch (format) {
536 case MESA_FORMAT_ETC1_RGB8:
537 return MESA_FORMAT_R8G8B8X8_UNORM;
538 case MESA_FORMAT_ETC2_RGB8:
539 return MESA_FORMAT_R8G8B8X8_UNORM;
540 case MESA_FORMAT_ETC2_SRGB8:
541 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC:
542 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1:
543 return MESA_FORMAT_B8G8R8A8_SRGB;
544 case MESA_FORMAT_ETC2_RGBA8_EAC:
545 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1:
546 return MESA_FORMAT_R8G8B8A8_UNORM;
547 case MESA_FORMAT_ETC2_R11_EAC:
548 return MESA_FORMAT_R_UNORM16;
549 case MESA_FORMAT_ETC2_SIGNED_R11_EAC:
550 return MESA_FORMAT_R_SNORM16;
551 case MESA_FORMAT_ETC2_RG11_EAC:
552 return MESA_FORMAT_R16G16_UNORM;
553 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC:
554 return MESA_FORMAT_R16G16_SNORM;
555 default:
556 /* Non ETC1 / ETC2 format */
557 return format;
558 }
559 }
560
561 /* This function computes Yf/Ys tiled bo size, alignment and pitch. */
562 static unsigned long
563 intel_get_yf_ys_bo_size(struct intel_mipmap_tree *mt, unsigned *alignment,
564 unsigned long *pitch)
565 {
566 const uint32_t bpp = mt->cpp * 8;
567 const uint32_t aspect_ratio = (bpp == 16 || bpp == 64) ? 2 : 1;
568 uint32_t tile_width, tile_height;
569 unsigned long stride, size, aligned_y;
570
571 assert(mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE);
572
573 switch (bpp) {
574 case 8:
575 tile_height = 64;
576 break;
577 case 16:
578 case 32:
579 tile_height = 32;
580 break;
581 case 64:
582 case 128:
583 tile_height = 16;
584 break;
585 default:
586 unreachable("not reached");
587 }
588
589 if (mt->tr_mode == INTEL_MIPTREE_TRMODE_YS)
590 tile_height *= 4;
591
592 aligned_y = ALIGN(mt->total_height, tile_height);
593 stride = mt->total_width * mt->cpp;
594 tile_width = tile_height * mt->cpp * aspect_ratio;
595 stride = ALIGN(stride, tile_width);
596 size = stride * aligned_y;
597
598 if (mt->tr_mode == INTEL_MIPTREE_TRMODE_YF) {
599 assert(size % 4096 == 0);
600 *alignment = 4096;
601 } else {
602 assert(size % (64 * 1024) == 0);
603 *alignment = 64 * 1024;
604 }
605 *pitch = stride;
606 return size;
607 }
608
609 struct intel_mipmap_tree *
610 intel_miptree_create(struct brw_context *brw,
611 GLenum target,
612 mesa_format format,
613 GLuint first_level,
614 GLuint last_level,
615 GLuint width0,
616 GLuint height0,
617 GLuint depth0,
618 GLuint num_samples,
619 uint32_t layout_flags)
620 {
621 struct intel_mipmap_tree *mt;
622 mesa_format tex_format = format;
623 mesa_format etc_format = MESA_FORMAT_NONE;
624 GLuint total_width, total_height;
625 uint32_t alloc_flags = 0;
626
627 format = intel_lower_compressed_format(brw, format);
628
629 etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
630
631 assert((layout_flags & MIPTREE_LAYOUT_DISABLE_AUX) == 0);
632 assert((layout_flags & MIPTREE_LAYOUT_FOR_BO) == 0);
633 mt = intel_miptree_create_layout(brw, target, format,
634 first_level, last_level, width0,
635 height0, depth0, num_samples,
636 layout_flags);
637 /*
638 * pitch == 0 || height == 0 indicates the null texture
639 */
640 if (!mt || !mt->total_width || !mt->total_height) {
641 intel_miptree_release(&mt);
642 return NULL;
643 }
644
645 total_width = mt->total_width;
646 total_height = mt->total_height;
647
648 if (format == MESA_FORMAT_S_UINT8) {
649 /* Align to size of W tile, 64x64. */
650 total_width = ALIGN(total_width, 64);
651 total_height = ALIGN(total_height, 64);
652 }
653
654 bool y_or_x = false;
655
656 if (mt->tiling == (I915_TILING_Y | I915_TILING_X)) {
657 y_or_x = true;
658 mt->tiling = I915_TILING_Y;
659 }
660
661 if (layout_flags & MIPTREE_LAYOUT_ACCELERATED_UPLOAD)
662 alloc_flags |= BO_ALLOC_FOR_RENDER;
663
664 unsigned long pitch;
665 mt->etc_format = etc_format;
666
667 if (mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE) {
668 unsigned alignment = 0;
669 unsigned long size;
670 size = intel_get_yf_ys_bo_size(mt, &alignment, &pitch);
671 assert(size);
672 mt->bo = drm_intel_bo_alloc_for_render(brw->bufmgr, "miptree",
673 size, alignment);
674 } else {
675 mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
676 total_width, total_height, mt->cpp,
677 &mt->tiling, &pitch,
678 alloc_flags);
679 }
680
681 mt->pitch = pitch;
682
683 /* If the BO is too large to fit in the aperture, we need to use the
684 * BLT engine to support it. Prior to Sandybridge, the BLT paths can't
685 * handle Y-tiling, so we need to fall back to X.
686 */
687 if (brw->gen < 6 && y_or_x && mt->bo->size >= brw->max_gtt_map_object_size) {
688 perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
689 mt->total_width, mt->total_height);
690
691 mt->tiling = I915_TILING_X;
692 drm_intel_bo_unreference(mt->bo);
693 mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
694 total_width, total_height, mt->cpp,
695 &mt->tiling, &pitch, alloc_flags);
696 mt->pitch = pitch;
697 }
698
699 mt->offset = 0;
700
701 if (!mt->bo) {
702 intel_miptree_release(&mt);
703 return NULL;
704 }
705
706
707 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
708 assert(mt->num_samples > 1);
709 if (!intel_miptree_alloc_mcs(brw, mt, num_samples)) {
710 intel_miptree_release(&mt);
711 return NULL;
712 }
713 }
714
715 /* If this miptree is capable of supporting fast color clears, set
716 * fast_clear_state appropriately to ensure that fast clears will occur.
717 * Allocation of the MCS miptree will be deferred until the first fast
718 * clear actually occurs.
719 */
720 if (intel_tiling_supports_non_msrt_mcs(brw, mt->tiling) &&
721 intel_miptree_is_fast_clear_capable(brw, mt)) {
722 mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED;
723 assert(brw->gen < 8 || mt->align_w == 16 || num_samples <= 1);
724 }
725
726 return mt;
727 }
728
729 struct intel_mipmap_tree *
730 intel_miptree_create_for_bo(struct brw_context *brw,
731 drm_intel_bo *bo,
732 mesa_format format,
733 uint32_t offset,
734 uint32_t width,
735 uint32_t height,
736 uint32_t depth,
737 int pitch,
738 uint32_t layout_flags)
739 {
740 struct intel_mipmap_tree *mt;
741 uint32_t tiling, swizzle;
742 GLenum target;
743
744 drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
745
746 /* Nothing will be able to use this miptree with the BO if the offset isn't
747 * aligned.
748 */
749 if (tiling != I915_TILING_NONE)
750 assert(offset % 4096 == 0);
751
752 /* miptrees can't handle negative pitch. If you need flipping of images,
753 * that's outside of the scope of the mt.
754 */
755 assert(pitch >= 0);
756
757 target = depth > 1 ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D;
758
759 /* The BO already has a tiling format and we shouldn't confuse the lower
760 * layers by making it try to find a tiling format again.
761 */
762 assert(layout_flags & MIPTREE_LAYOUT_ALLOC_ANY_TILED == 0);
763 assert(layout_flags & MIPTREE_LAYOUT_ALLOC_LINEAR == 0);
764
765 layout_flags |= MIPTREE_LAYOUT_FOR_BO;
766 mt = intel_miptree_create_layout(brw, target, format,
767 0, 0,
768 width, height, depth, 0,
769 layout_flags);
770 if (!mt)
771 return NULL;
772
773 drm_intel_bo_reference(bo);
774 mt->bo = bo;
775 mt->pitch = pitch;
776 mt->offset = offset;
777 mt->tiling = tiling;
778
779 return mt;
780 }
781
782 /**
783 * For a singlesample renderbuffer, this simply wraps the given BO with a
784 * miptree.
785 *
786 * For a multisample renderbuffer, this wraps the window system's
787 * (singlesample) BO with a singlesample miptree attached to the
788 * intel_renderbuffer, then creates a multisample miptree attached to irb->mt
789 * that will contain the actual rendering (which is lazily resolved to
790 * irb->singlesample_mt).
791 */
792 void
793 intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
794 struct intel_renderbuffer *irb,
795 drm_intel_bo *bo,
796 uint32_t width, uint32_t height,
797 uint32_t pitch)
798 {
799 struct intel_mipmap_tree *singlesample_mt = NULL;
800 struct intel_mipmap_tree *multisample_mt = NULL;
801 struct gl_renderbuffer *rb = &irb->Base.Base;
802 mesa_format format = rb->Format;
803 int num_samples = rb->NumSamples;
804
805 /* Only the front and back buffers, which are color buffers, are allocated
806 * through the image loader.
807 */
808 assert(_mesa_get_format_base_format(format) == GL_RGB ||
809 _mesa_get_format_base_format(format) == GL_RGBA);
810
811 singlesample_mt = intel_miptree_create_for_bo(intel,
812 bo,
813 format,
814 0,
815 width,
816 height,
817 1,
818 pitch,
819 0);
820 if (!singlesample_mt)
821 goto fail;
822
823 /* If this miptree is capable of supporting fast color clears, set
824 * mcs_state appropriately to ensure that fast clears will occur.
825 * Allocation of the MCS miptree will be deferred until the first fast
826 * clear actually occurs.
827 */
828 if (intel_tiling_supports_non_msrt_mcs(intel, singlesample_mt->tiling) &&
829 intel_miptree_is_fast_clear_capable(intel, singlesample_mt))
830 singlesample_mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED;
831
832 if (num_samples == 0) {
833 intel_miptree_release(&irb->mt);
834 irb->mt = singlesample_mt;
835
836 assert(!irb->singlesample_mt);
837 } else {
838 intel_miptree_release(&irb->singlesample_mt);
839 irb->singlesample_mt = singlesample_mt;
840
841 if (!irb->mt ||
842 irb->mt->logical_width0 != width ||
843 irb->mt->logical_height0 != height) {
844 multisample_mt = intel_miptree_create_for_renderbuffer(intel,
845 format,
846 width,
847 height,
848 num_samples);
849 if (!multisample_mt)
850 goto fail;
851
852 irb->need_downsample = false;
853 intel_miptree_release(&irb->mt);
854 irb->mt = multisample_mt;
855 }
856 }
857 return;
858
859 fail:
860 intel_miptree_release(&irb->singlesample_mt);
861 intel_miptree_release(&irb->mt);
862 return;
863 }
864
865 struct intel_mipmap_tree*
866 intel_miptree_create_for_renderbuffer(struct brw_context *brw,
867 mesa_format format,
868 uint32_t width,
869 uint32_t height,
870 uint32_t num_samples)
871 {
872 struct intel_mipmap_tree *mt;
873 uint32_t depth = 1;
874 bool ok;
875 GLenum target = num_samples > 1 ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
876 const uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD |
877 MIPTREE_LAYOUT_ALLOC_ANY_TILED;
878
879
880 mt = intel_miptree_create(brw, target, format, 0, 0,
881 width, height, depth, num_samples,
882 layout_flags);
883 if (!mt)
884 goto fail;
885
886 if (intel_miptree_wants_hiz_buffer(brw, mt)) {
887 ok = intel_miptree_alloc_hiz(brw, mt);
888 if (!ok)
889 goto fail;
890 }
891
892 return mt;
893
894 fail:
895 intel_miptree_release(&mt);
896 return NULL;
897 }
898
899 void
900 intel_miptree_reference(struct intel_mipmap_tree **dst,
901 struct intel_mipmap_tree *src)
902 {
903 if (*dst == src)
904 return;
905
906 intel_miptree_release(dst);
907
908 if (src) {
909 src->refcount++;
910 DBG("%s %p refcount now %d\n", __func__, src, src->refcount);
911 }
912
913 *dst = src;
914 }
915
916
917 void
918 intel_miptree_release(struct intel_mipmap_tree **mt)
919 {
920 if (!*mt)
921 return;
922
923 DBG("%s %p refcount will be %d\n", __func__, *mt, (*mt)->refcount - 1);
924 if (--(*mt)->refcount <= 0) {
925 GLuint i;
926
927 DBG("%s deleting %p\n", __func__, *mt);
928
929 drm_intel_bo_unreference((*mt)->bo);
930 intel_miptree_release(&(*mt)->stencil_mt);
931 if ((*mt)->hiz_buf) {
932 if ((*mt)->hiz_buf->mt)
933 intel_miptree_release(&(*mt)->hiz_buf->mt);
934 else
935 drm_intel_bo_unreference((*mt)->hiz_buf->bo);
936 free((*mt)->hiz_buf);
937 }
938 intel_miptree_release(&(*mt)->mcs_mt);
939 intel_resolve_map_clear(&(*mt)->hiz_map);
940
941 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
942 free((*mt)->level[i].slice);
943 }
944
945 free(*mt);
946 }
947 *mt = NULL;
948 }
949
950 void
951 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
952 int *width, int *height, int *depth)
953 {
954 switch (image->TexObject->Target) {
955 case GL_TEXTURE_1D_ARRAY:
956 *width = image->Width;
957 *height = 1;
958 *depth = image->Height;
959 break;
960 default:
961 *width = image->Width;
962 *height = image->Height;
963 *depth = image->Depth;
964 break;
965 }
966 }
967
968 /**
969 * Can the image be pulled into a unified mipmap tree? This mirrors
970 * the completeness test in a lot of ways.
971 *
972 * Not sure whether I want to pass gl_texture_image here.
973 */
974 bool
975 intel_miptree_match_image(struct intel_mipmap_tree *mt,
976 struct gl_texture_image *image)
977 {
978 struct intel_texture_image *intelImage = intel_texture_image(image);
979 GLuint level = intelImage->base.Base.Level;
980 int width, height, depth;
981
982 /* glTexImage* choose the texture object based on the target passed in, and
983 * objects can't change targets over their lifetimes, so this should be
984 * true.
985 */
986 assert(image->TexObject->Target == mt->target);
987
988 mesa_format mt_format = mt->format;
989 if (mt->format == MESA_FORMAT_Z24_UNORM_X8_UINT && mt->stencil_mt)
990 mt_format = MESA_FORMAT_Z24_UNORM_S8_UINT;
991 if (mt->format == MESA_FORMAT_Z_FLOAT32 && mt->stencil_mt)
992 mt_format = MESA_FORMAT_Z32_FLOAT_S8X24_UINT;
993 if (mt->etc_format != MESA_FORMAT_NONE)
994 mt_format = mt->etc_format;
995
996 if (image->TexFormat != mt_format)
997 return false;
998
999 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
1000
1001 if (mt->target == GL_TEXTURE_CUBE_MAP)
1002 depth = 6;
1003
1004 int level_depth = mt->level[level].depth;
1005 if (mt->num_samples > 1) {
1006 switch (mt->msaa_layout) {
1007 case INTEL_MSAA_LAYOUT_NONE:
1008 case INTEL_MSAA_LAYOUT_IMS:
1009 break;
1010 case INTEL_MSAA_LAYOUT_UMS:
1011 case INTEL_MSAA_LAYOUT_CMS:
1012 level_depth /= mt->num_samples;
1013 break;
1014 }
1015 }
1016
1017 /* Test image dimensions against the base level image adjusted for
1018 * minification. This will also catch images not present in the
1019 * tree, changed targets, etc.
1020 */
1021 if (width != minify(mt->logical_width0, level - mt->first_level) ||
1022 height != minify(mt->logical_height0, level - mt->first_level) ||
1023 depth != level_depth) {
1024 return false;
1025 }
1026
1027 if (image->NumSamples != mt->num_samples)
1028 return false;
1029
1030 return true;
1031 }
1032
1033
1034 void
1035 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
1036 GLuint level,
1037 GLuint x, GLuint y, GLuint d)
1038 {
1039 mt->level[level].depth = d;
1040 mt->level[level].level_x = x;
1041 mt->level[level].level_y = y;
1042
1043 DBG("%s level %d, depth %d, offset %d,%d\n", __func__,
1044 level, d, x, y);
1045
1046 assert(mt->level[level].slice == NULL);
1047
1048 mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
1049 mt->level[level].slice[0].x_offset = mt->level[level].level_x;
1050 mt->level[level].slice[0].y_offset = mt->level[level].level_y;
1051 }
1052
1053
1054 void
1055 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
1056 GLuint level, GLuint img,
1057 GLuint x, GLuint y)
1058 {
1059 if (img == 0 && level == 0)
1060 assert(x == 0 && y == 0);
1061
1062 assert(img < mt->level[level].depth);
1063
1064 mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
1065 mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
1066
1067 DBG("%s level %d img %d pos %d,%d\n",
1068 __func__, level, img,
1069 mt->level[level].slice[img].x_offset,
1070 mt->level[level].slice[img].y_offset);
1071 }
1072
1073 void
1074 intel_miptree_get_image_offset(const struct intel_mipmap_tree *mt,
1075 GLuint level, GLuint slice,
1076 GLuint *x, GLuint *y)
1077 {
1078 assert(slice < mt->level[level].depth);
1079
1080 *x = mt->level[level].slice[slice].x_offset;
1081 *y = mt->level[level].slice[slice].y_offset;
1082 }
1083
1084 /**
1085 * This function computes masks that may be used to select the bits of the X
1086 * and Y coordinates that indicate the offset within a tile. If the BO is
1087 * untiled, the masks are set to 0.
1088 */
1089 void
1090 intel_miptree_get_tile_masks(const struct intel_mipmap_tree *mt,
1091 uint32_t *mask_x, uint32_t *mask_y,
1092 bool map_stencil_as_y_tiled)
1093 {
1094 int cpp = mt->cpp;
1095 uint32_t tiling = mt->tiling;
1096
1097 if (map_stencil_as_y_tiled)
1098 tiling = I915_TILING_Y;
1099
1100 switch (tiling) {
1101 default:
1102 unreachable("not reached");
1103 case I915_TILING_NONE:
1104 *mask_x = *mask_y = 0;
1105 break;
1106 case I915_TILING_X:
1107 *mask_x = 512 / cpp - 1;
1108 *mask_y = 7;
1109 break;
1110 case I915_TILING_Y:
1111 *mask_x = 128 / cpp - 1;
1112 *mask_y = 31;
1113 break;
1114 }
1115 }
1116
1117 /**
1118 * Compute the offset (in bytes) from the start of the BO to the given x
1119 * and y coordinate. For tiled BOs, caller must ensure that x and y are
1120 * multiples of the tile size.
1121 */
1122 uint32_t
1123 intel_miptree_get_aligned_offset(const struct intel_mipmap_tree *mt,
1124 uint32_t x, uint32_t y,
1125 bool map_stencil_as_y_tiled)
1126 {
1127 int cpp = mt->cpp;
1128 uint32_t pitch = mt->pitch;
1129 uint32_t tiling = mt->tiling;
1130
1131 if (map_stencil_as_y_tiled) {
1132 tiling = I915_TILING_Y;
1133
1134 /* When mapping a W-tiled stencil buffer as Y-tiled, each 64-high W-tile
1135 * gets transformed into a 32-high Y-tile. Accordingly, the pitch of
1136 * the resulting surface is twice the pitch of the original miptree,
1137 * since each row in the Y-tiled view corresponds to two rows in the
1138 * actual W-tiled surface. So we need to correct the pitch before
1139 * computing the offsets.
1140 */
1141 pitch *= 2;
1142 }
1143
1144 switch (tiling) {
1145 default:
1146 unreachable("not reached");
1147 case I915_TILING_NONE:
1148 return y * pitch + x * cpp;
1149 case I915_TILING_X:
1150 assert((x % (512 / cpp)) == 0);
1151 assert((y % 8) == 0);
1152 return y * pitch + x / (512 / cpp) * 4096;
1153 case I915_TILING_Y:
1154 assert((x % (128 / cpp)) == 0);
1155 assert((y % 32) == 0);
1156 return y * pitch + x / (128 / cpp) * 4096;
1157 }
1158 }
1159
1160 /**
1161 * Rendering with tiled buffers requires that the base address of the buffer
1162 * be aligned to a page boundary. For renderbuffers, and sometimes with
1163 * textures, we may want the surface to point at a texture image level that
1164 * isn't at a page boundary.
1165 *
1166 * This function returns an appropriately-aligned base offset
1167 * according to the tiling restrictions, plus any required x/y offset
1168 * from there.
1169 */
1170 uint32_t
1171 intel_miptree_get_tile_offsets(const struct intel_mipmap_tree *mt,
1172 GLuint level, GLuint slice,
1173 uint32_t *tile_x,
1174 uint32_t *tile_y)
1175 {
1176 uint32_t x, y;
1177 uint32_t mask_x, mask_y;
1178
1179 intel_miptree_get_tile_masks(mt, &mask_x, &mask_y, false);
1180 intel_miptree_get_image_offset(mt, level, slice, &x, &y);
1181
1182 *tile_x = x & mask_x;
1183 *tile_y = y & mask_y;
1184
1185 return intel_miptree_get_aligned_offset(mt, x & ~mask_x, y & ~mask_y, false);
1186 }
1187
1188 static void
1189 intel_miptree_copy_slice_sw(struct brw_context *brw,
1190 struct intel_mipmap_tree *dst_mt,
1191 struct intel_mipmap_tree *src_mt,
1192 int level,
1193 int slice,
1194 int width,
1195 int height)
1196 {
1197 void *src, *dst;
1198 ptrdiff_t src_stride, dst_stride;
1199 int cpp = dst_mt->cpp;
1200
1201 intel_miptree_map(brw, src_mt,
1202 level, slice,
1203 0, 0,
1204 width, height,
1205 GL_MAP_READ_BIT | BRW_MAP_DIRECT_BIT,
1206 &src, &src_stride);
1207
1208 intel_miptree_map(brw, dst_mt,
1209 level, slice,
1210 0, 0,
1211 width, height,
1212 GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT |
1213 BRW_MAP_DIRECT_BIT,
1214 &dst, &dst_stride);
1215
1216 DBG("sw blit %s mt %p %p/%"PRIdPTR" -> %s mt %p %p/%"PRIdPTR" (%dx%d)\n",
1217 _mesa_get_format_name(src_mt->format),
1218 src_mt, src, src_stride,
1219 _mesa_get_format_name(dst_mt->format),
1220 dst_mt, dst, dst_stride,
1221 width, height);
1222
1223 int row_size = cpp * width;
1224 if (src_stride == row_size &&
1225 dst_stride == row_size) {
1226 memcpy(dst, src, row_size * height);
1227 } else {
1228 for (int i = 0; i < height; i++) {
1229 memcpy(dst, src, row_size);
1230 dst += dst_stride;
1231 src += src_stride;
1232 }
1233 }
1234
1235 intel_miptree_unmap(brw, dst_mt, level, slice);
1236 intel_miptree_unmap(brw, src_mt, level, slice);
1237
1238 /* Don't forget to copy the stencil data over, too. We could have skipped
1239 * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
1240 * shuffling the two data sources in/out of temporary storage instead of
1241 * the direct mapping we get this way.
1242 */
1243 if (dst_mt->stencil_mt) {
1244 assert(src_mt->stencil_mt);
1245 intel_miptree_copy_slice_sw(brw, dst_mt->stencil_mt, src_mt->stencil_mt,
1246 level, slice, width, height);
1247 }
1248 }
1249
1250 static void
1251 intel_miptree_copy_slice(struct brw_context *brw,
1252 struct intel_mipmap_tree *dst_mt,
1253 struct intel_mipmap_tree *src_mt,
1254 int level,
1255 int face,
1256 int depth)
1257
1258 {
1259 mesa_format format = src_mt->format;
1260 uint32_t width = minify(src_mt->physical_width0, level - src_mt->first_level);
1261 uint32_t height = minify(src_mt->physical_height0, level - src_mt->first_level);
1262 int slice;
1263
1264 if (face > 0)
1265 slice = face;
1266 else
1267 slice = depth;
1268
1269 assert(depth < src_mt->level[level].depth);
1270 assert(src_mt->format == dst_mt->format);
1271
1272 if (dst_mt->compressed) {
1273 unsigned int i, j;
1274 _mesa_get_format_block_size(dst_mt->format, &i, &j);
1275 height = ALIGN(height, j) / j;
1276 width = ALIGN(width, i);
1277 }
1278
1279 /* If it's a packed depth/stencil buffer with separate stencil, the blit
1280 * below won't apply since we can't do the depth's Y tiling or the
1281 * stencil's W tiling in the blitter.
1282 */
1283 if (src_mt->stencil_mt) {
1284 intel_miptree_copy_slice_sw(brw,
1285 dst_mt, src_mt,
1286 level, slice,
1287 width, height);
1288 return;
1289 }
1290
1291 uint32_t dst_x, dst_y, src_x, src_y;
1292 intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
1293 intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
1294
1295 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
1296 _mesa_get_format_name(src_mt->format),
1297 src_mt, src_x, src_y, src_mt->pitch,
1298 _mesa_get_format_name(dst_mt->format),
1299 dst_mt, dst_x, dst_y, dst_mt->pitch,
1300 width, height);
1301
1302 if (!intel_miptree_blit(brw,
1303 src_mt, level, slice, 0, 0, false,
1304 dst_mt, level, slice, 0, 0, false,
1305 width, height, GL_COPY)) {
1306 perf_debug("miptree validate blit for %s failed\n",
1307 _mesa_get_format_name(format));
1308
1309 intel_miptree_copy_slice_sw(brw, dst_mt, src_mt, level, slice,
1310 width, height);
1311 }
1312 }
1313
1314 /**
1315 * Copies the image's current data to the given miptree, and associates that
1316 * miptree with the image.
1317 *
1318 * If \c invalidate is true, then the actual image data does not need to be
1319 * copied, but the image still needs to be associated to the new miptree (this
1320 * is set to true if we're about to clear the image).
1321 */
1322 void
1323 intel_miptree_copy_teximage(struct brw_context *brw,
1324 struct intel_texture_image *intelImage,
1325 struct intel_mipmap_tree *dst_mt,
1326 bool invalidate)
1327 {
1328 struct intel_mipmap_tree *src_mt = intelImage->mt;
1329 struct intel_texture_object *intel_obj =
1330 intel_texture_object(intelImage->base.Base.TexObject);
1331 int level = intelImage->base.Base.Level;
1332 int face = intelImage->base.Base.Face;
1333
1334 GLuint depth;
1335 if (intel_obj->base.Target == GL_TEXTURE_1D_ARRAY)
1336 depth = intelImage->base.Base.Height;
1337 else
1338 depth = intelImage->base.Base.Depth;
1339
1340 if (!invalidate) {
1341 for (int slice = 0; slice < depth; slice++) {
1342 intel_miptree_copy_slice(brw, dst_mt, src_mt, level, face, slice);
1343 }
1344 }
1345
1346 intel_miptree_reference(&intelImage->mt, dst_mt);
1347 intel_obj->needs_validate = true;
1348 }
1349
1350 static bool
1351 intel_miptree_alloc_mcs(struct brw_context *brw,
1352 struct intel_mipmap_tree *mt,
1353 GLuint num_samples)
1354 {
1355 assert(brw->gen >= 7); /* MCS only used on Gen7+ */
1356 assert(mt->mcs_mt == NULL);
1357 assert(!mt->disable_aux_buffers);
1358
1359 /* Choose the correct format for the MCS buffer. All that really matters
1360 * is that we allocate the right buffer size, since we'll always be
1361 * accessing this miptree using MCS-specific hardware mechanisms, which
1362 * infer the correct format based on num_samples.
1363 */
1364 mesa_format format;
1365 switch (num_samples) {
1366 case 2:
1367 case 4:
1368 /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
1369 * each sample).
1370 */
1371 format = MESA_FORMAT_R_UNORM8;
1372 break;
1373 case 8:
1374 /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
1375 * for each sample, plus 8 padding bits).
1376 */
1377 format = MESA_FORMAT_R_UINT32;
1378 break;
1379 default:
1380 unreachable("Unrecognized sample count in intel_miptree_alloc_mcs");
1381 };
1382
1383 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
1384 *
1385 * "The MCS surface must be stored as Tile Y."
1386 */
1387 const uint32_t mcs_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD |
1388 MIPTREE_LAYOUT_ALLOC_YTILED;
1389 mt->mcs_mt = intel_miptree_create(brw,
1390 mt->target,
1391 format,
1392 mt->first_level,
1393 mt->last_level,
1394 mt->logical_width0,
1395 mt->logical_height0,
1396 mt->logical_depth0,
1397 0 /* num_samples */,
1398 mcs_flags);
1399
1400 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
1401 *
1402 * When MCS buffer is enabled and bound to MSRT, it is required that it
1403 * is cleared prior to any rendering.
1404 *
1405 * Since we don't use the MCS buffer for any purpose other than rendering,
1406 * it makes sense to just clear it immediately upon allocation.
1407 *
1408 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
1409 */
1410 void *data = intel_miptree_map_raw(brw, mt->mcs_mt);
1411 memset(data, 0xff, mt->mcs_mt->total_height * mt->mcs_mt->pitch);
1412 intel_miptree_unmap_raw(brw, mt->mcs_mt);
1413 mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_CLEAR;
1414
1415 return mt->mcs_mt;
1416 }
1417
1418
1419 bool
1420 intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw,
1421 struct intel_mipmap_tree *mt)
1422 {
1423 assert(mt->mcs_mt == NULL);
1424 assert(!mt->disable_aux_buffers);
1425
1426 /* The format of the MCS buffer is opaque to the driver; all that matters
1427 * is that we get its size and pitch right. We'll pretend that the format
1428 * is R32. Since an MCS tile covers 128 blocks horizontally, and a Y-tiled
1429 * R32 buffer is 32 pixels across, we'll need to scale the width down by
1430 * the block width and then a further factor of 4. Since an MCS tile
1431 * covers 256 blocks vertically, and a Y-tiled R32 buffer is 32 rows high,
1432 * we'll need to scale the height down by the block height and then a
1433 * further factor of 8.
1434 */
1435 const mesa_format format = MESA_FORMAT_R_UINT32;
1436 unsigned block_width_px;
1437 unsigned block_height;
1438 intel_get_non_msrt_mcs_alignment(brw, mt, &block_width_px, &block_height);
1439 unsigned width_divisor = block_width_px * 4;
1440 unsigned height_divisor = block_height * 8;
1441 unsigned mcs_width =
1442 ALIGN(mt->logical_width0, width_divisor) / width_divisor;
1443 unsigned mcs_height =
1444 ALIGN(mt->logical_height0, height_divisor) / height_divisor;
1445 assert(mt->logical_depth0 == 1);
1446 uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD |
1447 MIPTREE_LAYOUT_ALLOC_YTILED;
1448 if (brw->gen >= 8) {
1449 layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
1450 }
1451 mt->mcs_mt = intel_miptree_create(brw,
1452 mt->target,
1453 format,
1454 mt->first_level,
1455 mt->last_level,
1456 mcs_width,
1457 mcs_height,
1458 mt->logical_depth0,
1459 0 /* num_samples */,
1460 layout_flags);
1461
1462 return mt->mcs_mt;
1463 }
1464
1465
1466 /**
1467 * Helper for intel_miptree_alloc_hiz() that sets
1468 * \c mt->level[level].has_hiz. Return true if and only if
1469 * \c has_hiz was set.
1470 */
1471 static bool
1472 intel_miptree_level_enable_hiz(struct brw_context *brw,
1473 struct intel_mipmap_tree *mt,
1474 uint32_t level)
1475 {
1476 assert(mt->hiz_buf);
1477
1478 if (brw->gen >= 8 || brw->is_haswell) {
1479 uint32_t width = minify(mt->physical_width0, level);
1480 uint32_t height = minify(mt->physical_height0, level);
1481
1482 /* Disable HiZ for LOD > 0 unless the width is 8 aligned
1483 * and the height is 4 aligned. This allows our HiZ support
1484 * to fulfill Haswell restrictions for HiZ ops. For LOD == 0,
1485 * we can grow the width & height to allow the HiZ op to
1486 * force the proper size alignments.
1487 */
1488 if (level > 0 && ((width & 7) || (height & 3))) {
1489 DBG("mt %p level %d: HiZ DISABLED\n", mt, level);
1490 return false;
1491 }
1492 }
1493
1494 DBG("mt %p level %d: HiZ enabled\n", mt, level);
1495 mt->level[level].has_hiz = true;
1496 return true;
1497 }
1498
1499
1500 /**
1501 * Helper for intel_miptree_alloc_hiz() that determines the required hiz
1502 * buffer dimensions and allocates a bo for the hiz buffer.
1503 */
1504 static struct intel_miptree_aux_buffer *
1505 intel_gen7_hiz_buf_create(struct brw_context *brw,
1506 struct intel_mipmap_tree *mt)
1507 {
1508 unsigned z_width = mt->logical_width0;
1509 unsigned z_height = mt->logical_height0;
1510 const unsigned z_depth = MAX2(mt->logical_depth0, 1);
1511 unsigned hz_width, hz_height;
1512 struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
1513
1514 if (!buf)
1515 return NULL;
1516
1517 /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents
1518 * adjustments required for Z_Height and Z_Width based on multisampling.
1519 */
1520 if (brw->gen < 9) {
1521 switch (mt->num_samples) {
1522 case 0:
1523 case 1:
1524 break;
1525 case 2:
1526 case 4:
1527 z_width *= 2;
1528 z_height *= 2;
1529 break;
1530 case 8:
1531 z_width *= 4;
1532 z_height *= 2;
1533 break;
1534 default:
1535 unreachable("unsupported sample count");
1536 }
1537 }
1538
1539 const unsigned vertical_align = 8; /* 'j' in the docs */
1540 const unsigned H0 = z_height;
1541 const unsigned h0 = ALIGN(H0, vertical_align);
1542 const unsigned h1 = ALIGN(minify(H0, 1), vertical_align);
1543 const unsigned Z0 = z_depth;
1544
1545 /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */
1546 hz_width = ALIGN(z_width, 16);
1547
1548 if (mt->target == GL_TEXTURE_3D) {
1549 unsigned H_i = H0;
1550 unsigned Z_i = Z0;
1551 hz_height = 0;
1552 for (int level = mt->first_level; level <= mt->last_level; ++level) {
1553 unsigned h_i = ALIGN(H_i, vertical_align);
1554 /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1555 hz_height += h_i * Z_i;
1556 H_i = minify(H_i, 1);
1557 Z_i = minify(Z_i, 1);
1558 }
1559 /* HZ_Height =
1560 * (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i)))
1561 */
1562 hz_height = DIV_ROUND_UP(hz_height, 2);
1563 } else {
1564 const unsigned hz_qpitch = h0 + h1 + (12 * vertical_align);
1565 if (mt->target == GL_TEXTURE_CUBE_MAP_ARRAY ||
1566 mt->target == GL_TEXTURE_CUBE_MAP) {
1567 /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth * 6/2) /8 ) * 8 */
1568 hz_height = DIV_ROUND_UP(hz_qpitch * Z0 * 6, 2 * 8) * 8;
1569 } else {
1570 /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth/2) /8 ) * 8 */
1571 hz_height = DIV_ROUND_UP(hz_qpitch * Z0, 2 * 8) * 8;
1572 }
1573 }
1574
1575 unsigned long pitch;
1576 uint32_t tiling = I915_TILING_Y;
1577 buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
1578 hz_width, hz_height, 1,
1579 &tiling, &pitch,
1580 BO_ALLOC_FOR_RENDER);
1581 if (!buf->bo) {
1582 free(buf);
1583 return NULL;
1584 } else if (tiling != I915_TILING_Y) {
1585 drm_intel_bo_unreference(buf->bo);
1586 free(buf);
1587 return NULL;
1588 }
1589
1590 buf->pitch = pitch;
1591
1592 return buf;
1593 }
1594
1595
1596 /**
1597 * Helper for intel_miptree_alloc_hiz() that determines the required hiz
1598 * buffer dimensions and allocates a bo for the hiz buffer.
1599 */
1600 static struct intel_miptree_aux_buffer *
1601 intel_gen8_hiz_buf_create(struct brw_context *brw,
1602 struct intel_mipmap_tree *mt)
1603 {
1604 unsigned z_width = mt->logical_width0;
1605 unsigned z_height = mt->logical_height0;
1606 const unsigned z_depth = MAX2(mt->logical_depth0, 1);
1607 unsigned hz_width, hz_height;
1608 struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
1609
1610 if (!buf)
1611 return NULL;
1612
1613 /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents
1614 * adjustments required for Z_Height and Z_Width based on multisampling.
1615 */
1616 switch (mt->num_samples) {
1617 case 0:
1618 case 1:
1619 break;
1620 case 2:
1621 case 4:
1622 z_width *= 2;
1623 z_height *= 2;
1624 break;
1625 case 8:
1626 z_width *= 4;
1627 z_height *= 2;
1628 break;
1629 default:
1630 unreachable("unsupported sample count");
1631 }
1632
1633 const unsigned vertical_align = 8; /* 'j' in the docs */
1634 const unsigned H0 = z_height;
1635 const unsigned h0 = ALIGN(H0, vertical_align);
1636 const unsigned h1 = ALIGN(minify(H0, 1), vertical_align);
1637 const unsigned Z0 = z_depth;
1638
1639 /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */
1640 hz_width = ALIGN(z_width, 16);
1641
1642 unsigned H_i = H0;
1643 unsigned Z_i = Z0;
1644 unsigned sum_h_i = 0;
1645 unsigned hz_height_3d_sum = 0;
1646 for (int level = mt->first_level; level <= mt->last_level; ++level) {
1647 unsigned i = level - mt->first_level;
1648 unsigned h_i = ALIGN(H_i, vertical_align);
1649 /* sum(i=2 to m; h_i) */
1650 if (i >= 2) {
1651 sum_h_i += h_i;
1652 }
1653 /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1654 hz_height_3d_sum += h_i * Z_i;
1655 H_i = minify(H_i, 1);
1656 Z_i = minify(Z_i, 1);
1657 }
1658 /* HZ_QPitch = h0 + max(h1, sum(i=2 to m; h_i)) */
1659 buf->qpitch = h0 + MAX2(h1, sum_h_i);
1660
1661 if (mt->target == GL_TEXTURE_3D) {
1662 /* (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
1663 hz_height = DIV_ROUND_UP(hz_height_3d_sum, 2);
1664 } else {
1665 /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * Z_Depth */
1666 hz_height = DIV_ROUND_UP(buf->qpitch, 2 * 8) * 8 * Z0;
1667 if (mt->target == GL_TEXTURE_CUBE_MAP_ARRAY ||
1668 mt->target == GL_TEXTURE_CUBE_MAP) {
1669 /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * 6 * Z_Depth
1670 *
1671 * We can can just take our hz_height calculation from above, and
1672 * multiply by 6 for the cube map and cube map array types.
1673 */
1674 hz_height *= 6;
1675 }
1676 }
1677
1678 unsigned long pitch;
1679 uint32_t tiling = I915_TILING_Y;
1680 buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
1681 hz_width, hz_height, 1,
1682 &tiling, &pitch,
1683 BO_ALLOC_FOR_RENDER);
1684 if (!buf->bo) {
1685 free(buf);
1686 return NULL;
1687 } else if (tiling != I915_TILING_Y) {
1688 drm_intel_bo_unreference(buf->bo);
1689 free(buf);
1690 return NULL;
1691 }
1692
1693 buf->pitch = pitch;
1694
1695 return buf;
1696 }
1697
1698
1699 static struct intel_miptree_aux_buffer *
1700 intel_hiz_miptree_buf_create(struct brw_context *brw,
1701 struct intel_mipmap_tree *mt)
1702 {
1703 struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
1704 uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
1705
1706 if (brw->gen == 6)
1707 layout_flags |= MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD;
1708
1709 if (!buf)
1710 return NULL;
1711
1712 layout_flags |= MIPTREE_LAYOUT_ALLOC_ANY_TILED;
1713 buf->mt = intel_miptree_create(brw,
1714 mt->target,
1715 mt->format,
1716 mt->first_level,
1717 mt->last_level,
1718 mt->logical_width0,
1719 mt->logical_height0,
1720 mt->logical_depth0,
1721 mt->num_samples,
1722 layout_flags);
1723 if (!buf->mt) {
1724 free(buf);
1725 return NULL;
1726 }
1727
1728 buf->bo = buf->mt->bo;
1729 buf->pitch = buf->mt->pitch;
1730 buf->qpitch = buf->mt->qpitch;
1731
1732 return buf;
1733 }
1734
1735 bool
1736 intel_miptree_wants_hiz_buffer(struct brw_context *brw,
1737 struct intel_mipmap_tree *mt)
1738 {
1739 if (!brw->has_hiz)
1740 return false;
1741
1742 if (mt->hiz_buf != NULL)
1743 return false;
1744
1745 if (mt->disable_aux_buffers)
1746 return false;
1747
1748 switch (mt->format) {
1749 case MESA_FORMAT_Z_FLOAT32:
1750 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT:
1751 case MESA_FORMAT_Z24_UNORM_X8_UINT:
1752 case MESA_FORMAT_Z24_UNORM_S8_UINT:
1753 case MESA_FORMAT_Z_UNORM16:
1754 return true;
1755 default:
1756 return false;
1757 }
1758 }
1759
1760 bool
1761 intel_miptree_alloc_hiz(struct brw_context *brw,
1762 struct intel_mipmap_tree *mt)
1763 {
1764 assert(mt->hiz_buf == NULL);
1765 assert(!mt->disable_aux_buffers);
1766
1767 if (brw->gen == 7) {
1768 mt->hiz_buf = intel_gen7_hiz_buf_create(brw, mt);
1769 } else if (brw->gen >= 8) {
1770 mt->hiz_buf = intel_gen8_hiz_buf_create(brw, mt);
1771 } else {
1772 mt->hiz_buf = intel_hiz_miptree_buf_create(brw, mt);
1773 }
1774
1775 if (!mt->hiz_buf)
1776 return false;
1777
1778 /* Mark that all slices need a HiZ resolve. */
1779 for (int level = mt->first_level; level <= mt->last_level; ++level) {
1780 if (!intel_miptree_level_enable_hiz(brw, mt, level))
1781 continue;
1782
1783 for (int layer = 0; layer < mt->level[level].depth; ++layer) {
1784 struct intel_resolve_map *m = malloc(sizeof(struct intel_resolve_map));
1785 exec_node_init(&m->link);
1786 m->level = level;
1787 m->layer = layer;
1788 m->need = GEN6_HIZ_OP_HIZ_RESOLVE;
1789
1790 exec_list_push_tail(&mt->hiz_map, &m->link);
1791 }
1792 }
1793
1794 return true;
1795 }
1796
1797 /**
1798 * Does the miptree slice have hiz enabled?
1799 */
1800 bool
1801 intel_miptree_level_has_hiz(struct intel_mipmap_tree *mt, uint32_t level)
1802 {
1803 intel_miptree_check_level_layer(mt, level, 0);
1804 return mt->level[level].has_hiz;
1805 }
1806
1807 void
1808 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
1809 uint32_t level,
1810 uint32_t layer)
1811 {
1812 if (!intel_miptree_level_has_hiz(mt, level))
1813 return;
1814
1815 intel_resolve_map_set(&mt->hiz_map,
1816 level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
1817 }
1818
1819
1820 void
1821 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
1822 uint32_t level,
1823 uint32_t layer)
1824 {
1825 if (!intel_miptree_level_has_hiz(mt, level))
1826 return;
1827
1828 intel_resolve_map_set(&mt->hiz_map,
1829 level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
1830 }
1831
1832 void
1833 intel_miptree_set_all_slices_need_depth_resolve(struct intel_mipmap_tree *mt,
1834 uint32_t level)
1835 {
1836 uint32_t layer;
1837 uint32_t end_layer = mt->level[level].depth;
1838
1839 for (layer = 0; layer < end_layer; layer++) {
1840 intel_miptree_slice_set_needs_depth_resolve(mt, level, layer);
1841 }
1842 }
1843
1844 static bool
1845 intel_miptree_slice_resolve(struct brw_context *brw,
1846 struct intel_mipmap_tree *mt,
1847 uint32_t level,
1848 uint32_t layer,
1849 enum gen6_hiz_op need)
1850 {
1851 intel_miptree_check_level_layer(mt, level, layer);
1852
1853 struct intel_resolve_map *item =
1854 intel_resolve_map_get(&mt->hiz_map, level, layer);
1855
1856 if (!item || item->need != need)
1857 return false;
1858
1859 intel_hiz_exec(brw, mt, level, layer, need);
1860 intel_resolve_map_remove(item);
1861 return true;
1862 }
1863
1864 bool
1865 intel_miptree_slice_resolve_hiz(struct brw_context *brw,
1866 struct intel_mipmap_tree *mt,
1867 uint32_t level,
1868 uint32_t layer)
1869 {
1870 return intel_miptree_slice_resolve(brw, mt, level, layer,
1871 GEN6_HIZ_OP_HIZ_RESOLVE);
1872 }
1873
1874 bool
1875 intel_miptree_slice_resolve_depth(struct brw_context *brw,
1876 struct intel_mipmap_tree *mt,
1877 uint32_t level,
1878 uint32_t layer)
1879 {
1880 return intel_miptree_slice_resolve(brw, mt, level, layer,
1881 GEN6_HIZ_OP_DEPTH_RESOLVE);
1882 }
1883
1884 static bool
1885 intel_miptree_all_slices_resolve(struct brw_context *brw,
1886 struct intel_mipmap_tree *mt,
1887 enum gen6_hiz_op need)
1888 {
1889 bool did_resolve = false;
1890
1891 foreach_list_typed_safe(struct intel_resolve_map, map, link, &mt->hiz_map) {
1892 if (map->need != need)
1893 continue;
1894
1895 intel_hiz_exec(brw, mt, map->level, map->layer, need);
1896 intel_resolve_map_remove(map);
1897 did_resolve = true;
1898 }
1899
1900 return did_resolve;
1901 }
1902
1903 bool
1904 intel_miptree_all_slices_resolve_hiz(struct brw_context *brw,
1905 struct intel_mipmap_tree *mt)
1906 {
1907 return intel_miptree_all_slices_resolve(brw, mt,
1908 GEN6_HIZ_OP_HIZ_RESOLVE);
1909 }
1910
1911 bool
1912 intel_miptree_all_slices_resolve_depth(struct brw_context *brw,
1913 struct intel_mipmap_tree *mt)
1914 {
1915 return intel_miptree_all_slices_resolve(brw, mt,
1916 GEN6_HIZ_OP_DEPTH_RESOLVE);
1917 }
1918
1919
1920 void
1921 intel_miptree_resolve_color(struct brw_context *brw,
1922 struct intel_mipmap_tree *mt)
1923 {
1924 switch (mt->fast_clear_state) {
1925 case INTEL_FAST_CLEAR_STATE_NO_MCS:
1926 case INTEL_FAST_CLEAR_STATE_RESOLVED:
1927 /* No resolve needed */
1928 break;
1929 case INTEL_FAST_CLEAR_STATE_UNRESOLVED:
1930 case INTEL_FAST_CLEAR_STATE_CLEAR:
1931 /* Fast color clear resolves only make sense for non-MSAA buffers. */
1932 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE)
1933 brw_meta_resolve_color(brw, mt);
1934 break;
1935 }
1936 }
1937
1938
1939 /**
1940 * Make it possible to share the BO backing the given miptree with another
1941 * process or another miptree.
1942 *
1943 * Fast color clears are unsafe with shared buffers, so we need to resolve and
1944 * then discard the MCS buffer, if present. We also set the fast_clear_state
1945 * to INTEL_FAST_CLEAR_STATE_NO_MCS to ensure that no MCS buffer gets
1946 * allocated in the future.
1947 */
1948 void
1949 intel_miptree_make_shareable(struct brw_context *brw,
1950 struct intel_mipmap_tree *mt)
1951 {
1952 /* MCS buffers are also used for multisample buffers, but we can't resolve
1953 * away a multisample MCS buffer because it's an integral part of how the
1954 * pixel data is stored. Fortunately this code path should never be
1955 * reached for multisample buffers.
1956 */
1957 assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE);
1958
1959 if (mt->mcs_mt) {
1960 intel_miptree_resolve_color(brw, mt);
1961 intel_miptree_release(&mt->mcs_mt);
1962 mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_NO_MCS;
1963 }
1964 }
1965
1966
1967 /**
1968 * \brief Get pointer offset into stencil buffer.
1969 *
1970 * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
1971 * must decode the tile's layout in software.
1972 *
1973 * See
1974 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
1975 * Format.
1976 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
1977 *
1978 * Even though the returned offset is always positive, the return type is
1979 * signed due to
1980 * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
1981 * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
1982 */
1983 static intptr_t
1984 intel_offset_S8(uint32_t stride, uint32_t x, uint32_t y, bool swizzled)
1985 {
1986 uint32_t tile_size = 4096;
1987 uint32_t tile_width = 64;
1988 uint32_t tile_height = 64;
1989 uint32_t row_size = 64 * stride;
1990
1991 uint32_t tile_x = x / tile_width;
1992 uint32_t tile_y = y / tile_height;
1993
1994 /* The byte's address relative to the tile's base addres. */
1995 uint32_t byte_x = x % tile_width;
1996 uint32_t byte_y = y % tile_height;
1997
1998 uintptr_t u = tile_y * row_size
1999 + tile_x * tile_size
2000 + 512 * (byte_x / 8)
2001 + 64 * (byte_y / 8)
2002 + 32 * ((byte_y / 4) % 2)
2003 + 16 * ((byte_x / 4) % 2)
2004 + 8 * ((byte_y / 2) % 2)
2005 + 4 * ((byte_x / 2) % 2)
2006 + 2 * (byte_y % 2)
2007 + 1 * (byte_x % 2);
2008
2009 if (swizzled) {
2010 /* adjust for bit6 swizzling */
2011 if (((byte_x / 8) % 2) == 1) {
2012 if (((byte_y / 8) % 2) == 0) {
2013 u += 64;
2014 } else {
2015 u -= 64;
2016 }
2017 }
2018 }
2019
2020 return u;
2021 }
2022
2023 void
2024 intel_miptree_updownsample(struct brw_context *brw,
2025 struct intel_mipmap_tree *src,
2026 struct intel_mipmap_tree *dst)
2027 {
2028 if (brw->gen < 8) {
2029 brw_blorp_blit_miptrees(brw,
2030 src, 0 /* level */, 0 /* layer */, src->format,
2031 dst, 0 /* level */, 0 /* layer */, dst->format,
2032 0, 0,
2033 src->logical_width0, src->logical_height0,
2034 0, 0,
2035 dst->logical_width0, dst->logical_height0,
2036 GL_NEAREST, false, false /*mirror x, y*/);
2037 } else if (src->format == MESA_FORMAT_S_UINT8) {
2038 brw_meta_stencil_updownsample(brw, src, dst);
2039 } else {
2040 brw_meta_updownsample(brw, src, dst);
2041 }
2042
2043 if (src->stencil_mt) {
2044 if (brw->gen >= 8) {
2045 brw_meta_stencil_updownsample(brw, src->stencil_mt, dst);
2046 return;
2047 }
2048
2049 brw_blorp_blit_miptrees(brw,
2050 src->stencil_mt, 0 /* level */, 0 /* layer */,
2051 src->stencil_mt->format,
2052 dst->stencil_mt, 0 /* level */, 0 /* layer */,
2053 dst->stencil_mt->format,
2054 0, 0,
2055 src->logical_width0, src->logical_height0,
2056 0, 0,
2057 dst->logical_width0, dst->logical_height0,
2058 GL_NEAREST, false, false /*mirror x, y*/);
2059 }
2060 }
2061
2062 void *
2063 intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
2064 {
2065 /* CPU accesses to color buffers don't understand fast color clears, so
2066 * resolve any pending fast color clears before we map.
2067 */
2068 intel_miptree_resolve_color(brw, mt);
2069
2070 drm_intel_bo *bo = mt->bo;
2071
2072 if (drm_intel_bo_references(brw->batch.bo, bo))
2073 intel_batchbuffer_flush(brw);
2074
2075 if (mt->tiling != I915_TILING_NONE)
2076 brw_bo_map_gtt(brw, bo, "miptree");
2077 else
2078 brw_bo_map(brw, bo, true, "miptree");
2079
2080 return bo->virtual;
2081 }
2082
2083 void
2084 intel_miptree_unmap_raw(struct brw_context *brw,
2085 struct intel_mipmap_tree *mt)
2086 {
2087 drm_intel_bo_unmap(mt->bo);
2088 }
2089
2090 static void
2091 intel_miptree_map_gtt(struct brw_context *brw,
2092 struct intel_mipmap_tree *mt,
2093 struct intel_miptree_map *map,
2094 unsigned int level, unsigned int slice)
2095 {
2096 unsigned int bw, bh;
2097 void *base;
2098 unsigned int image_x, image_y;
2099 intptr_t x = map->x;
2100 intptr_t y = map->y;
2101
2102 /* For compressed formats, the stride is the number of bytes per
2103 * row of blocks. intel_miptree_get_image_offset() already does
2104 * the divide.
2105 */
2106 _mesa_get_format_block_size(mt->format, &bw, &bh);
2107 assert(y % bh == 0);
2108 y /= bh;
2109
2110 base = intel_miptree_map_raw(brw, mt) + mt->offset;
2111
2112 if (base == NULL)
2113 map->ptr = NULL;
2114 else {
2115 /* Note that in the case of cube maps, the caller must have passed the
2116 * slice number referencing the face.
2117 */
2118 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2119 x += image_x;
2120 y += image_y;
2121
2122 map->stride = mt->pitch;
2123 map->ptr = base + y * map->stride + x * mt->cpp;
2124 }
2125
2126 DBG("%s: %d,%d %dx%d from mt %p (%s) "
2127 "%"PRIiPTR",%"PRIiPTR" = %p/%d\n", __func__,
2128 map->x, map->y, map->w, map->h,
2129 mt, _mesa_get_format_name(mt->format),
2130 x, y, map->ptr, map->stride);
2131 }
2132
2133 static void
2134 intel_miptree_unmap_gtt(struct brw_context *brw,
2135 struct intel_mipmap_tree *mt,
2136 struct intel_miptree_map *map,
2137 unsigned int level,
2138 unsigned int slice)
2139 {
2140 intel_miptree_unmap_raw(brw, mt);
2141 }
2142
2143 static void
2144 intel_miptree_map_blit(struct brw_context *brw,
2145 struct intel_mipmap_tree *mt,
2146 struct intel_miptree_map *map,
2147 unsigned int level, unsigned int slice)
2148 {
2149 map->mt = intel_miptree_create(brw, GL_TEXTURE_2D, mt->format,
2150 0, 0,
2151 map->w, map->h, 1,
2152 0, 0);
2153
2154 if (!map->mt) {
2155 fprintf(stderr, "Failed to allocate blit temporary\n");
2156 goto fail;
2157 }
2158 map->stride = map->mt->pitch;
2159
2160 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2161 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2162 * invalidate is set, since we'll be writing the whole rectangle from our
2163 * temporary buffer back out.
2164 */
2165 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
2166 if (!intel_miptree_blit(brw,
2167 mt, level, slice,
2168 map->x, map->y, false,
2169 map->mt, 0, 0,
2170 0, 0, false,
2171 map->w, map->h, GL_COPY)) {
2172 fprintf(stderr, "Failed to blit\n");
2173 goto fail;
2174 }
2175 }
2176
2177 map->ptr = intel_miptree_map_raw(brw, map->mt);
2178
2179 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
2180 map->x, map->y, map->w, map->h,
2181 mt, _mesa_get_format_name(mt->format),
2182 level, slice, map->ptr, map->stride);
2183
2184 return;
2185
2186 fail:
2187 intel_miptree_release(&map->mt);
2188 map->ptr = NULL;
2189 map->stride = 0;
2190 }
2191
2192 static void
2193 intel_miptree_unmap_blit(struct brw_context *brw,
2194 struct intel_mipmap_tree *mt,
2195 struct intel_miptree_map *map,
2196 unsigned int level,
2197 unsigned int slice)
2198 {
2199 struct gl_context *ctx = &brw->ctx;
2200
2201 intel_miptree_unmap_raw(brw, map->mt);
2202
2203 if (map->mode & GL_MAP_WRITE_BIT) {
2204 bool ok = intel_miptree_blit(brw,
2205 map->mt, 0, 0,
2206 0, 0, false,
2207 mt, level, slice,
2208 map->x, map->y, false,
2209 map->w, map->h, GL_COPY);
2210 WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
2211 }
2212
2213 intel_miptree_release(&map->mt);
2214 }
2215
2216 /**
2217 * "Map" a buffer by copying it to an untiled temporary using MOVNTDQA.
2218 */
2219 #if defined(USE_SSE41)
2220 static void
2221 intel_miptree_map_movntdqa(struct brw_context *brw,
2222 struct intel_mipmap_tree *mt,
2223 struct intel_miptree_map *map,
2224 unsigned int level, unsigned int slice)
2225 {
2226 assert(map->mode & GL_MAP_READ_BIT);
2227 assert(!(map->mode & GL_MAP_WRITE_BIT));
2228
2229 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
2230 map->x, map->y, map->w, map->h,
2231 mt, _mesa_get_format_name(mt->format),
2232 level, slice, map->ptr, map->stride);
2233
2234 /* Map the original image */
2235 uint32_t image_x;
2236 uint32_t image_y;
2237 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2238 image_x += map->x;
2239 image_y += map->y;
2240
2241 void *src = intel_miptree_map_raw(brw, mt);
2242 if (!src)
2243 return;
2244 src += image_y * mt->pitch;
2245 src += image_x * mt->cpp;
2246
2247 /* Due to the pixel offsets for the particular image being mapped, our
2248 * src pointer may not be 16-byte aligned. However, if the pitch is
2249 * divisible by 16, then the amount by which it's misaligned will remain
2250 * consistent from row to row.
2251 */
2252 assert((mt->pitch % 16) == 0);
2253 const int misalignment = ((uintptr_t) src) & 15;
2254
2255 /* Create an untiled temporary buffer for the mapping. */
2256 const unsigned width_bytes = _mesa_format_row_stride(mt->format, map->w);
2257
2258 map->stride = ALIGN(misalignment + width_bytes, 16);
2259
2260 map->buffer = _mesa_align_malloc(map->stride * map->h, 16);
2261 /* Offset the destination so it has the same misalignment as src. */
2262 map->ptr = map->buffer + misalignment;
2263
2264 assert((((uintptr_t) map->ptr) & 15) == misalignment);
2265
2266 for (uint32_t y = 0; y < map->h; y++) {
2267 void *dst_ptr = map->ptr + y * map->stride;
2268 void *src_ptr = src + y * mt->pitch;
2269
2270 _mesa_streaming_load_memcpy(dst_ptr, src_ptr, width_bytes);
2271 }
2272
2273 intel_miptree_unmap_raw(brw, mt);
2274 }
2275
2276 static void
2277 intel_miptree_unmap_movntdqa(struct brw_context *brw,
2278 struct intel_mipmap_tree *mt,
2279 struct intel_miptree_map *map,
2280 unsigned int level,
2281 unsigned int slice)
2282 {
2283 _mesa_align_free(map->buffer);
2284 map->buffer = NULL;
2285 map->ptr = NULL;
2286 }
2287 #endif
2288
2289 static void
2290 intel_miptree_map_s8(struct brw_context *brw,
2291 struct intel_mipmap_tree *mt,
2292 struct intel_miptree_map *map,
2293 unsigned int level, unsigned int slice)
2294 {
2295 map->stride = map->w;
2296 map->buffer = map->ptr = malloc(map->stride * map->h);
2297 if (!map->buffer)
2298 return;
2299
2300 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2301 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2302 * invalidate is set, since we'll be writing the whole rectangle from our
2303 * temporary buffer back out.
2304 */
2305 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
2306 uint8_t *untiled_s8_map = map->ptr;
2307 uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt);
2308 unsigned int image_x, image_y;
2309
2310 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2311
2312 for (uint32_t y = 0; y < map->h; y++) {
2313 for (uint32_t x = 0; x < map->w; x++) {
2314 ptrdiff_t offset = intel_offset_S8(mt->pitch,
2315 x + image_x + map->x,
2316 y + image_y + map->y,
2317 brw->has_swizzling);
2318 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
2319 }
2320 }
2321
2322 intel_miptree_unmap_raw(brw, mt);
2323
2324 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __func__,
2325 map->x, map->y, map->w, map->h,
2326 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
2327 } else {
2328 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__,
2329 map->x, map->y, map->w, map->h,
2330 mt, map->ptr, map->stride);
2331 }
2332 }
2333
2334 static void
2335 intel_miptree_unmap_s8(struct brw_context *brw,
2336 struct intel_mipmap_tree *mt,
2337 struct intel_miptree_map *map,
2338 unsigned int level,
2339 unsigned int slice)
2340 {
2341 if (map->mode & GL_MAP_WRITE_BIT) {
2342 unsigned int image_x, image_y;
2343 uint8_t *untiled_s8_map = map->ptr;
2344 uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt);
2345
2346 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2347
2348 for (uint32_t y = 0; y < map->h; y++) {
2349 for (uint32_t x = 0; x < map->w; x++) {
2350 ptrdiff_t offset = intel_offset_S8(mt->pitch,
2351 x + map->x,
2352 y + map->y,
2353 brw->has_swizzling);
2354 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
2355 }
2356 }
2357
2358 intel_miptree_unmap_raw(brw, mt);
2359 }
2360
2361 free(map->buffer);
2362 }
2363
2364 static void
2365 intel_miptree_map_etc(struct brw_context *brw,
2366 struct intel_mipmap_tree *mt,
2367 struct intel_miptree_map *map,
2368 unsigned int level,
2369 unsigned int slice)
2370 {
2371 assert(mt->etc_format != MESA_FORMAT_NONE);
2372 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) {
2373 assert(mt->format == MESA_FORMAT_R8G8B8X8_UNORM);
2374 }
2375
2376 assert(map->mode & GL_MAP_WRITE_BIT);
2377 assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
2378
2379 map->stride = _mesa_format_row_stride(mt->etc_format, map->w);
2380 map->buffer = malloc(_mesa_format_image_size(mt->etc_format,
2381 map->w, map->h, 1));
2382 map->ptr = map->buffer;
2383 }
2384
2385 static void
2386 intel_miptree_unmap_etc(struct brw_context *brw,
2387 struct intel_mipmap_tree *mt,
2388 struct intel_miptree_map *map,
2389 unsigned int level,
2390 unsigned int slice)
2391 {
2392 uint32_t image_x;
2393 uint32_t image_y;
2394 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2395
2396 image_x += map->x;
2397 image_y += map->y;
2398
2399 uint8_t *dst = intel_miptree_map_raw(brw, mt)
2400 + image_y * mt->pitch
2401 + image_x * mt->cpp;
2402
2403 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8)
2404 _mesa_etc1_unpack_rgba8888(dst, mt->pitch,
2405 map->ptr, map->stride,
2406 map->w, map->h);
2407 else
2408 _mesa_unpack_etc2_format(dst, mt->pitch,
2409 map->ptr, map->stride,
2410 map->w, map->h, mt->etc_format);
2411
2412 intel_miptree_unmap_raw(brw, mt);
2413 free(map->buffer);
2414 }
2415
2416 /**
2417 * Mapping function for packed depth/stencil miptrees backed by real separate
2418 * miptrees for depth and stencil.
2419 *
2420 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
2421 * separate from the depth buffer. Yet at the GL API level, we have to expose
2422 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
2423 * be able to map that memory for texture storage and glReadPixels-type
2424 * operations. We give Mesa core that access by mallocing a temporary and
2425 * copying the data between the actual backing store and the temporary.
2426 */
2427 static void
2428 intel_miptree_map_depthstencil(struct brw_context *brw,
2429 struct intel_mipmap_tree *mt,
2430 struct intel_miptree_map *map,
2431 unsigned int level, unsigned int slice)
2432 {
2433 struct intel_mipmap_tree *z_mt = mt;
2434 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
2435 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z_FLOAT32;
2436 int packed_bpp = map_z32f_x24s8 ? 8 : 4;
2437
2438 map->stride = map->w * packed_bpp;
2439 map->buffer = map->ptr = malloc(map->stride * map->h);
2440 if (!map->buffer)
2441 return;
2442
2443 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2444 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2445 * invalidate is set, since we'll be writing the whole rectangle from our
2446 * temporary buffer back out.
2447 */
2448 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
2449 uint32_t *packed_map = map->ptr;
2450 uint8_t *s_map = intel_miptree_map_raw(brw, s_mt);
2451 uint32_t *z_map = intel_miptree_map_raw(brw, z_mt);
2452 unsigned int s_image_x, s_image_y;
2453 unsigned int z_image_x, z_image_y;
2454
2455 intel_miptree_get_image_offset(s_mt, level, slice,
2456 &s_image_x, &s_image_y);
2457 intel_miptree_get_image_offset(z_mt, level, slice,
2458 &z_image_x, &z_image_y);
2459
2460 for (uint32_t y = 0; y < map->h; y++) {
2461 for (uint32_t x = 0; x < map->w; x++) {
2462 int map_x = map->x + x, map_y = map->y + y;
2463 ptrdiff_t s_offset = intel_offset_S8(s_mt->pitch,
2464 map_x + s_image_x,
2465 map_y + s_image_y,
2466 brw->has_swizzling);
2467 ptrdiff_t z_offset = ((map_y + z_image_y) *
2468 (z_mt->pitch / 4) +
2469 (map_x + z_image_x));
2470 uint8_t s = s_map[s_offset];
2471 uint32_t z = z_map[z_offset];
2472
2473 if (map_z32f_x24s8) {
2474 packed_map[(y * map->w + x) * 2 + 0] = z;
2475 packed_map[(y * map->w + x) * 2 + 1] = s;
2476 } else {
2477 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
2478 }
2479 }
2480 }
2481
2482 intel_miptree_unmap_raw(brw, s_mt);
2483 intel_miptree_unmap_raw(brw, z_mt);
2484
2485 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
2486 __func__,
2487 map->x, map->y, map->w, map->h,
2488 z_mt, map->x + z_image_x, map->y + z_image_y,
2489 s_mt, map->x + s_image_x, map->y + s_image_y,
2490 map->ptr, map->stride);
2491 } else {
2492 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__,
2493 map->x, map->y, map->w, map->h,
2494 mt, map->ptr, map->stride);
2495 }
2496 }
2497
2498 static void
2499 intel_miptree_unmap_depthstencil(struct brw_context *brw,
2500 struct intel_mipmap_tree *mt,
2501 struct intel_miptree_map *map,
2502 unsigned int level,
2503 unsigned int slice)
2504 {
2505 struct intel_mipmap_tree *z_mt = mt;
2506 struct intel_mipmap_tree *s_mt = mt->stencil_mt;
2507 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z_FLOAT32;
2508
2509 if (map->mode & GL_MAP_WRITE_BIT) {
2510 uint32_t *packed_map = map->ptr;
2511 uint8_t *s_map = intel_miptree_map_raw(brw, s_mt);
2512 uint32_t *z_map = intel_miptree_map_raw(brw, z_mt);
2513 unsigned int s_image_x, s_image_y;
2514 unsigned int z_image_x, z_image_y;
2515
2516 intel_miptree_get_image_offset(s_mt, level, slice,
2517 &s_image_x, &s_image_y);
2518 intel_miptree_get_image_offset(z_mt, level, slice,
2519 &z_image_x, &z_image_y);
2520
2521 for (uint32_t y = 0; y < map->h; y++) {
2522 for (uint32_t x = 0; x < map->w; x++) {
2523 ptrdiff_t s_offset = intel_offset_S8(s_mt->pitch,
2524 x + s_image_x + map->x,
2525 y + s_image_y + map->y,
2526 brw->has_swizzling);
2527 ptrdiff_t z_offset = ((y + z_image_y + map->y) *
2528 (z_mt->pitch / 4) +
2529 (x + z_image_x + map->x));
2530
2531 if (map_z32f_x24s8) {
2532 z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
2533 s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
2534 } else {
2535 uint32_t packed = packed_map[y * map->w + x];
2536 s_map[s_offset] = packed >> 24;
2537 z_map[z_offset] = packed;
2538 }
2539 }
2540 }
2541
2542 intel_miptree_unmap_raw(brw, s_mt);
2543 intel_miptree_unmap_raw(brw, z_mt);
2544
2545 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
2546 __func__,
2547 map->x, map->y, map->w, map->h,
2548 z_mt, _mesa_get_format_name(z_mt->format),
2549 map->x + z_image_x, map->y + z_image_y,
2550 s_mt, map->x + s_image_x, map->y + s_image_y,
2551 map->ptr, map->stride);
2552 }
2553
2554 free(map->buffer);
2555 }
2556
2557 /**
2558 * Create and attach a map to the miptree at (level, slice). Return the
2559 * attached map.
2560 */
2561 static struct intel_miptree_map*
2562 intel_miptree_attach_map(struct intel_mipmap_tree *mt,
2563 unsigned int level,
2564 unsigned int slice,
2565 unsigned int x,
2566 unsigned int y,
2567 unsigned int w,
2568 unsigned int h,
2569 GLbitfield mode)
2570 {
2571 struct intel_miptree_map *map = calloc(1, sizeof(*map));
2572
2573 if (!map)
2574 return NULL;
2575
2576 assert(mt->level[level].slice[slice].map == NULL);
2577 mt->level[level].slice[slice].map = map;
2578
2579 map->mode = mode;
2580 map->x = x;
2581 map->y = y;
2582 map->w = w;
2583 map->h = h;
2584
2585 return map;
2586 }
2587
2588 /**
2589 * Release the map at (level, slice).
2590 */
2591 static void
2592 intel_miptree_release_map(struct intel_mipmap_tree *mt,
2593 unsigned int level,
2594 unsigned int slice)
2595 {
2596 struct intel_miptree_map **map;
2597
2598 map = &mt->level[level].slice[slice].map;
2599 free(*map);
2600 *map = NULL;
2601 }
2602
2603 static bool
2604 can_blit_slice(struct intel_mipmap_tree *mt,
2605 unsigned int level, unsigned int slice)
2606 {
2607 uint32_t image_x;
2608 uint32_t image_y;
2609 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
2610 if (image_x >= 32768 || image_y >= 32768)
2611 return false;
2612
2613 /* See intel_miptree_blit() for details on the 32k pitch limit. */
2614 if (mt->pitch >= 32768)
2615 return false;
2616
2617 return true;
2618 }
2619
2620 static bool
2621 use_intel_mipree_map_blit(struct brw_context *brw,
2622 struct intel_mipmap_tree *mt,
2623 GLbitfield mode,
2624 unsigned int level,
2625 unsigned int slice)
2626 {
2627 if (brw->has_llc &&
2628 /* It's probably not worth swapping to the blit ring because of
2629 * all the overhead involved.
2630 */
2631 !(mode & GL_MAP_WRITE_BIT) &&
2632 !mt->compressed &&
2633 (mt->tiling == I915_TILING_X ||
2634 /* Prior to Sandybridge, the blitter can't handle Y tiling */
2635 (brw->gen >= 6 && mt->tiling == I915_TILING_Y)) &&
2636 can_blit_slice(mt, level, slice))
2637 return true;
2638
2639 if (mt->tiling != I915_TILING_NONE &&
2640 mt->bo->size >= brw->max_gtt_map_object_size) {
2641 assert(can_blit_slice(mt, level, slice));
2642 return true;
2643 }
2644
2645 return false;
2646 }
2647
2648 /**
2649 * Parameter \a out_stride has type ptrdiff_t not because the buffer stride may
2650 * exceed 32 bits but to diminish the likelihood subtle bugs in pointer
2651 * arithmetic overflow.
2652 *
2653 * If you call this function and use \a out_stride, then you're doing pointer
2654 * arithmetic on \a out_ptr. The type of \a out_stride doesn't prevent all
2655 * bugs. The caller must still take care to avoid 32-bit overflow errors in
2656 * all arithmetic expressions that contain buffer offsets and pixel sizes,
2657 * which usually have type uint32_t or GLuint.
2658 */
2659 void
2660 intel_miptree_map(struct brw_context *brw,
2661 struct intel_mipmap_tree *mt,
2662 unsigned int level,
2663 unsigned int slice,
2664 unsigned int x,
2665 unsigned int y,
2666 unsigned int w,
2667 unsigned int h,
2668 GLbitfield mode,
2669 void **out_ptr,
2670 ptrdiff_t *out_stride)
2671 {
2672 struct intel_miptree_map *map;
2673
2674 assert(mt->num_samples <= 1);
2675
2676 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
2677 if (!map){
2678 *out_ptr = NULL;
2679 *out_stride = 0;
2680 return;
2681 }
2682
2683 intel_miptree_slice_resolve_depth(brw, mt, level, slice);
2684 if (map->mode & GL_MAP_WRITE_BIT) {
2685 intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
2686 }
2687
2688 if (mt->format == MESA_FORMAT_S_UINT8) {
2689 intel_miptree_map_s8(brw, mt, map, level, slice);
2690 } else if (mt->etc_format != MESA_FORMAT_NONE &&
2691 !(mode & BRW_MAP_DIRECT_BIT)) {
2692 intel_miptree_map_etc(brw, mt, map, level, slice);
2693 } else if (mt->stencil_mt && !(mode & BRW_MAP_DIRECT_BIT)) {
2694 intel_miptree_map_depthstencil(brw, mt, map, level, slice);
2695 } else if (use_intel_mipree_map_blit(brw, mt, mode, level, slice)) {
2696 intel_miptree_map_blit(brw, mt, map, level, slice);
2697 #if defined(USE_SSE41)
2698 } else if (!(mode & GL_MAP_WRITE_BIT) &&
2699 !mt->compressed && cpu_has_sse4_1 &&
2700 (mt->pitch % 16 == 0)) {
2701 intel_miptree_map_movntdqa(brw, mt, map, level, slice);
2702 #endif
2703 } else {
2704 intel_miptree_map_gtt(brw, mt, map, level, slice);
2705 }
2706
2707 *out_ptr = map->ptr;
2708 *out_stride = map->stride;
2709
2710 if (map->ptr == NULL)
2711 intel_miptree_release_map(mt, level, slice);
2712 }
2713
2714 void
2715 intel_miptree_unmap(struct brw_context *brw,
2716 struct intel_mipmap_tree *mt,
2717 unsigned int level,
2718 unsigned int slice)
2719 {
2720 struct intel_miptree_map *map = mt->level[level].slice[slice].map;
2721
2722 assert(mt->num_samples <= 1);
2723
2724 if (!map)
2725 return;
2726
2727 DBG("%s: mt %p (%s) level %d slice %d\n", __func__,
2728 mt, _mesa_get_format_name(mt->format), level, slice);
2729
2730 if (mt->format == MESA_FORMAT_S_UINT8) {
2731 intel_miptree_unmap_s8(brw, mt, map, level, slice);
2732 } else if (mt->etc_format != MESA_FORMAT_NONE &&
2733 !(map->mode & BRW_MAP_DIRECT_BIT)) {
2734 intel_miptree_unmap_etc(brw, mt, map, level, slice);
2735 } else if (mt->stencil_mt && !(map->mode & BRW_MAP_DIRECT_BIT)) {
2736 intel_miptree_unmap_depthstencil(brw, mt, map, level, slice);
2737 } else if (map->mt) {
2738 intel_miptree_unmap_blit(brw, mt, map, level, slice);
2739 #if defined(USE_SSE41)
2740 } else if (map->buffer && cpu_has_sse4_1) {
2741 intel_miptree_unmap_movntdqa(brw, mt, map, level, slice);
2742 #endif
2743 } else {
2744 intel_miptree_unmap_gtt(brw, mt, map, level, slice);
2745 }
2746
2747 intel_miptree_release_map(mt, level, slice);
2748 }