2 * Copyright 2006 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include <GL/internal/dri_interface.h>
29 #include "intel_batchbuffer.h"
30 #include "intel_image.h"
31 #include "intel_mipmap_tree.h"
32 #include "intel_tex.h"
33 #include "intel_blit.h"
34 #include "intel_fbo.h"
36 #include "brw_blorp.h"
37 #include "brw_context.h"
38 #include "brw_state.h"
40 #include "main/enums.h"
41 #include "main/fbobject.h"
42 #include "main/formats.h"
43 #include "main/glformats.h"
44 #include "main/texcompress_etc.h"
45 #include "main/teximage.h"
46 #include "main/streaming-load-memcpy.h"
47 #include "x86/common_x86_asm.h"
49 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
51 static void *intel_miptree_map_raw(struct brw_context
*brw
,
52 struct intel_mipmap_tree
*mt
,
55 static void intel_miptree_unmap_raw(struct intel_mipmap_tree
*mt
);
58 intel_miptree_alloc_aux(struct brw_context
*brw
,
59 struct intel_mipmap_tree
*mt
);
62 is_mcs_supported(const struct brw_context
*brw
, mesa_format format
)
64 /* Prior to Gen7, all MSAA surfaces used IMS layout. */
68 /* In Gen7, IMS layout is only used for depth and stencil buffers. */
69 switch (_mesa_get_format_base_format(format
)) {
70 case GL_DEPTH_COMPONENT
:
71 case GL_STENCIL_INDEX
:
72 case GL_DEPTH_STENCIL
:
75 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
77 * This field must be set to 0 for all SINT MSRTs when all RT channels
80 * In practice this means that we have to disable MCS for all signed
81 * integer MSAA buffers. The alternative, to disable MCS only when one
82 * of the render target channels is disabled, is impractical because it
83 * would require converting between CMS and UMS MSAA layouts on the fly,
86 if (brw
->gen
== 7 && _mesa_get_format_datatype(format
) == GL_INT
) {
95 intel_tiling_supports_ccs(const struct brw_context
*brw
,
96 enum isl_tiling tiling
)
98 /* From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render
99 * Target(s)", beneath the "Fast Color Clear" bullet (p326):
101 * - Support is limited to tiled render targets.
103 * Gen9 changes the restriction to Y-tile only.
106 return tiling
== ISL_TILING_Y0
;
107 else if (brw
->gen
>= 7)
108 return tiling
!= ISL_TILING_LINEAR
;
114 * For a single-sampled render target ("non-MSRT"), determine if an MCS buffer
115 * can be used. This doesn't (and should not) inspect any of the properties of
118 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
119 * beneath the "Fast Color Clear" bullet (p326):
121 * - Support is for non-mip-mapped and non-array surface types only.
123 * And then later, on p327:
125 * - MCS buffer for non-MSRT is supported only for RT formats 32bpp,
128 * From the Skylake documentation, it is made clear that X-tiling is no longer
131 * - MCS and Lossless compression is supported for TiledY/TileYs/TileYf
135 intel_miptree_supports_ccs(struct brw_context
*brw
,
136 const struct intel_mipmap_tree
*mt
)
138 /* MCS support does not exist prior to Gen7 */
142 /* This function applies only to non-multisampled render targets. */
143 if (mt
->surf
.samples
> 1)
146 /* MCS is only supported for color buffers */
147 switch (_mesa_get_format_base_format(mt
->format
)) {
148 case GL_DEPTH_COMPONENT
:
149 case GL_DEPTH_STENCIL
:
150 case GL_STENCIL_INDEX
:
154 if (mt
->cpp
!= 4 && mt
->cpp
!= 8 && mt
->cpp
!= 16)
157 const bool mip_mapped
= mt
->first_level
!= 0 || mt
->last_level
!= 0;
158 const bool arrayed
= mt
->surf
.logical_level0_px
.array_len
> 1 ||
159 mt
->surf
.logical_level0_px
.depth
> 1;
162 /* Multisample surfaces with the CMS layout are not layered surfaces,
163 * yet still have physical_depth0 > 1. Assert that we don't
164 * accidentally reject a multisampled surface here. We should have
165 * rejected it earlier by explicitly checking the sample count.
167 assert(mt
->surf
.samples
== 1);
170 /* Handle the hardware restrictions...
172 * All GENs have the following restriction: "MCS buffer for non-MSRT is
173 * supported only for RT formats 32bpp, 64bpp, and 128bpp."
175 * From the HSW PRM Volume 7: 3D-Media-GPGPU, page 652: (Color Clear of
176 * Non-MultiSampler Render Target Restrictions) Support is for
177 * non-mip-mapped and non-array surface types only.
179 * From the BDW PRM Volume 7: 3D-Media-GPGPU, page 649: (Color Clear of
180 * Non-MultiSampler Render Target Restriction). Mip-mapped and arrayed
181 * surfaces are supported with MCS buffer layout with these alignments in
182 * the RT space: Horizontal Alignment = 256 and Vertical Alignment = 128.
184 * From the SKL PRM Volume 7: 3D-Media-GPGPU, page 632: (Color Clear of
185 * Non-MultiSampler Render Target Restriction). Mip-mapped and arrayed
186 * surfaces are supported with MCS buffer layout with these alignments in
187 * the RT space: Horizontal Alignment = 128 and Vertical Alignment = 64.
189 if (brw
->gen
< 8 && (mip_mapped
|| arrayed
))
192 /* There's no point in using an MCS buffer if the surface isn't in a
195 if (!brw
->mesa_format_supports_render
[mt
->format
])
202 intel_tiling_supports_hiz(const struct brw_context
*brw
,
203 enum isl_tiling tiling
)
208 return tiling
== ISL_TILING_Y0
;
212 intel_miptree_supports_hiz(const struct brw_context
*brw
,
213 const struct intel_mipmap_tree
*mt
)
218 switch (mt
->format
) {
219 case MESA_FORMAT_Z_FLOAT32
:
220 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT
:
221 case MESA_FORMAT_Z24_UNORM_X8_UINT
:
222 case MESA_FORMAT_Z24_UNORM_S8_UINT
:
223 case MESA_FORMAT_Z_UNORM16
:
231 intel_miptree_supports_ccs_e(struct brw_context
*brw
,
232 const struct intel_mipmap_tree
*mt
)
237 /* For now compression is only enabled for integer formats even though
238 * there exist supported floating point formats also. This is a heuristic
239 * decision based on current public benchmarks. In none of the cases these
240 * formats provided any improvement but a few cases were seen to regress.
241 * Hence these are left to to be enabled in the future when they are known
244 if (_mesa_get_format_datatype(mt
->format
) == GL_FLOAT
)
247 if (!intel_miptree_supports_ccs(brw
, mt
))
250 /* Many window system buffers are sRGB even if they are never rendered as
251 * sRGB. For those, we want CCS_E for when sRGBEncode is false. When the
252 * surface is used as sRGB, we fall back to CCS_D.
254 mesa_format linear_format
= _mesa_get_srgb_format_linear(mt
->format
);
255 enum isl_format isl_format
= brw_isl_format_for_mesa_format(linear_format
);
256 return isl_format_supports_ccs_e(&brw
->screen
->devinfo
, isl_format
);
260 * Determine depth format corresponding to a depth+stencil format,
261 * for separate stencil.
264 intel_depth_format_for_depthstencil_format(mesa_format format
) {
266 case MESA_FORMAT_Z24_UNORM_S8_UINT
:
267 return MESA_FORMAT_Z24_UNORM_X8_UINT
;
268 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT
:
269 return MESA_FORMAT_Z_FLOAT32
;
276 create_mapping_table(GLenum target
, unsigned first_level
, unsigned last_level
,
277 unsigned depth0
, struct intel_mipmap_level
*table
)
279 for (unsigned level
= first_level
; level
<= last_level
; level
++) {
281 target
== GL_TEXTURE_3D
? minify(depth0
, level
) : depth0
;
283 table
[level
].slice
= calloc(d
, sizeof(*table
[0].slice
));
284 if (!table
[level
].slice
)
291 for (unsigned level
= first_level
; level
<= last_level
; level
++)
292 free(table
[level
].slice
);
298 needs_separate_stencil(const struct brw_context
*brw
,
299 struct intel_mipmap_tree
*mt
,
300 mesa_format format
, uint32_t layout_flags
)
303 if (layout_flags
& MIPTREE_LAYOUT_FOR_BO
)
306 if (_mesa_get_format_base_format(format
) != GL_DEPTH_STENCIL
)
309 if (brw
->must_use_separate_stencil
)
312 return brw
->has_separate_stencil
&&
313 intel_miptree_supports_hiz(brw
, mt
);
317 * Choose the aux usage for this miptree. This function must be called fairly
318 * late in the miptree create process after we have a tiling.
321 intel_miptree_choose_aux_usage(struct brw_context
*brw
,
322 struct intel_mipmap_tree
*mt
)
324 assert(mt
->aux_usage
== ISL_AUX_USAGE_NONE
);
326 if (mt
->surf
.samples
> 1 && is_mcs_supported(brw
, mt
->format
)) {
327 assert(mt
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_ARRAY
);
328 mt
->aux_usage
= ISL_AUX_USAGE_MCS
;
329 } else if (intel_tiling_supports_ccs(brw
, mt
->surf
.tiling
) &&
330 intel_miptree_supports_ccs(brw
, mt
)) {
331 if (!unlikely(INTEL_DEBUG
& DEBUG_NO_RBC
) &&
332 intel_miptree_supports_ccs_e(brw
, mt
)) {
333 mt
->aux_usage
= ISL_AUX_USAGE_CCS_E
;
335 mt
->aux_usage
= ISL_AUX_USAGE_CCS_D
;
337 } else if (intel_tiling_supports_hiz(brw
, mt
->surf
.tiling
) &&
338 intel_miptree_supports_hiz(brw
, mt
)) {
339 mt
->aux_usage
= ISL_AUX_USAGE_HIZ
;
342 /* We can do fast-clear on all auxiliary surface types that are
343 * allocated through the normal texture creation paths.
345 if (mt
->aux_usage
!= ISL_AUX_USAGE_NONE
)
346 mt
->supports_fast_clear
= true;
351 * Choose an appropriate uncompressed format for a requested
352 * compressed format, if unsupported.
355 intel_lower_compressed_format(struct brw_context
*brw
, mesa_format format
)
357 /* No need to lower ETC formats on these platforms,
358 * they are supported natively.
360 if (brw
->gen
>= 8 || brw
->is_baytrail
)
364 case MESA_FORMAT_ETC1_RGB8
:
365 return MESA_FORMAT_R8G8B8X8_UNORM
;
366 case MESA_FORMAT_ETC2_RGB8
:
367 return MESA_FORMAT_R8G8B8X8_UNORM
;
368 case MESA_FORMAT_ETC2_SRGB8
:
369 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC
:
370 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1
:
371 return MESA_FORMAT_B8G8R8A8_SRGB
;
372 case MESA_FORMAT_ETC2_RGBA8_EAC
:
373 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1
:
374 return MESA_FORMAT_R8G8B8A8_UNORM
;
375 case MESA_FORMAT_ETC2_R11_EAC
:
376 return MESA_FORMAT_R_UNORM16
;
377 case MESA_FORMAT_ETC2_SIGNED_R11_EAC
:
378 return MESA_FORMAT_R_SNORM16
;
379 case MESA_FORMAT_ETC2_RG11_EAC
:
380 return MESA_FORMAT_R16G16_UNORM
;
381 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC
:
382 return MESA_FORMAT_R16G16_SNORM
;
384 /* Non ETC1 / ETC2 format */
390 brw_get_num_logical_layers(const struct intel_mipmap_tree
*mt
, unsigned level
)
392 if (mt
->surf
.dim
== ISL_SURF_DIM_3D
)
393 return minify(mt
->surf
.logical_level0_px
.depth
, level
);
395 return mt
->surf
.logical_level0_px
.array_len
;
399 get_num_phys_layers(const struct isl_surf
*surf
, unsigned level
)
401 /* In case of physical dimensions one needs to consider also the layout.
402 * See isl_calc_phys_level0_extent_sa().
404 if (surf
->dim
!= ISL_SURF_DIM_3D
)
405 return surf
->phys_level0_sa
.array_len
;
407 if (surf
->dim_layout
== ISL_DIM_LAYOUT_GEN4_2D
)
408 return minify(surf
->phys_level0_sa
.array_len
, level
);
410 return minify(surf
->phys_level0_sa
.depth
, level
);
413 /** \brief Assert that the level and layer are valid for the miptree. */
415 intel_miptree_check_level_layer(const struct intel_mipmap_tree
*mt
,
423 assert(level
>= mt
->first_level
);
424 assert(level
<= mt
->last_level
);
425 assert(layer
< get_num_phys_layers(&mt
->surf
, level
));
428 static enum isl_aux_state
**
429 create_aux_state_map(struct intel_mipmap_tree
*mt
,
430 enum isl_aux_state initial
)
432 const uint32_t levels
= mt
->last_level
+ 1;
434 uint32_t total_slices
= 0;
435 for (uint32_t level
= 0; level
< levels
; level
++)
436 total_slices
+= brw_get_num_logical_layers(mt
, level
);
438 const size_t per_level_array_size
= levels
* sizeof(enum isl_aux_state
*);
440 /* We're going to allocate a single chunk of data for both the per-level
441 * reference array and the arrays of aux_state. This makes cleanup
442 * significantly easier.
444 const size_t total_size
= per_level_array_size
+
445 total_slices
* sizeof(enum isl_aux_state
);
446 void *data
= malloc(total_size
);
450 enum isl_aux_state
**per_level_arr
= data
;
451 enum isl_aux_state
*s
= data
+ per_level_array_size
;
452 for (uint32_t level
= 0; level
< levels
; level
++) {
453 per_level_arr
[level
] = s
;
454 const unsigned level_layers
= brw_get_num_logical_layers(mt
, level
);
455 for (uint32_t a
= 0; a
< level_layers
; a
++)
458 assert((void *)s
== data
+ total_size
);
460 return per_level_arr
;
464 free_aux_state_map(enum isl_aux_state
**state
)
470 need_to_retile_as_linear(struct brw_context
*brw
, unsigned row_pitch
,
471 enum isl_tiling tiling
, unsigned samples
)
476 if (tiling
== ISL_TILING_LINEAR
)
479 /* If the width is much smaller than a tile, don't bother tiling. */
483 if (ALIGN(row_pitch
, 512) >= 32768) {
484 perf_debug("row pitch %u too large to blit, falling back to untiled",
493 need_to_retile_as_x(const struct brw_context
*brw
, uint64_t size
,
494 enum isl_tiling tiling
)
496 /* If the BO is too large to fit in the aperture, we need to use the
497 * BLT engine to support it. Prior to Sandybridge, the BLT paths can't
498 * handle Y-tiling, so we need to fall back to X.
500 if (brw
->gen
< 6 && size
>= brw
->max_gtt_map_object_size
&&
501 tiling
== ISL_TILING_Y0
)
507 static struct intel_mipmap_tree
*
508 make_surface(struct brw_context
*brw
, GLenum target
, mesa_format format
,
509 unsigned first_level
, unsigned last_level
,
510 unsigned width0
, unsigned height0
, unsigned depth0
,
511 unsigned num_samples
, isl_tiling_flags_t tiling_flags
,
512 isl_surf_usage_flags_t isl_usage_flags
, uint32_t alloc_flags
,
513 unsigned row_pitch
, struct brw_bo
*bo
)
515 struct intel_mipmap_tree
*mt
= calloc(sizeof(*mt
), 1);
519 if (!create_mapping_table(target
, first_level
, last_level
, depth0
,
527 if (target
== GL_TEXTURE_CUBE_MAP
||
528 target
== GL_TEXTURE_CUBE_MAP_ARRAY
)
529 isl_usage_flags
|= ISL_SURF_USAGE_CUBE_BIT
;
531 DBG("%s: %s %s %ux %u:%u:%u %d..%d <-- %p\n",
533 _mesa_enum_to_string(target
),
534 _mesa_get_format_name(format
),
535 num_samples
, width0
, height0
, depth0
,
536 first_level
, last_level
, mt
);
538 struct isl_surf_init_info init_info
= {
539 .dim
= get_isl_surf_dim(target
),
540 .format
= translate_tex_format(brw
, format
, false),
543 .depth
= target
== GL_TEXTURE_3D
? depth0
: 1,
544 .levels
= last_level
- first_level
+ 1,
545 .array_len
= target
== GL_TEXTURE_3D
? 1 : depth0
,
546 .samples
= num_samples
,
547 .row_pitch
= row_pitch
,
548 .usage
= isl_usage_flags
,
549 .tiling_flags
= tiling_flags
,
552 if (!isl_surf_init_s(&brw
->isl_dev
, &mt
->surf
, &init_info
))
555 /* In case caller doesn't specifically request Y-tiling (needed
556 * unconditionally for depth), check for corner cases needing special
559 if (tiling_flags
& ~ISL_TILING_Y0_BIT
) {
560 if (need_to_retile_as_linear(brw
, mt
->surf
.row_pitch
,
561 mt
->surf
.tiling
, mt
->surf
.samples
)) {
562 init_info
.tiling_flags
= 1u << ISL_TILING_LINEAR
;
563 if (!isl_surf_init_s(&brw
->isl_dev
, &mt
->surf
, &init_info
))
565 } else if (need_to_retile_as_x(brw
, mt
->surf
.size
, mt
->surf
.tiling
)) {
566 init_info
.tiling_flags
= 1u << ISL_TILING_X
;
567 if (!isl_surf_init_s(&brw
->isl_dev
, &mt
->surf
, &init_info
))
572 /* In case of linear the buffer gets padded by fixed 64 bytes and therefore
573 * the size may not be multiple of row_pitch.
574 * See isl_apply_surface_padding().
576 if (mt
->surf
.tiling
!= ISL_TILING_LINEAR
)
577 assert(mt
->surf
.size
% mt
->surf
.row_pitch
== 0);
580 mt
->bo
= brw_bo_alloc_tiled(brw
->bufmgr
, "isl-miptree",
582 isl_tiling_to_i915_tiling(
584 mt
->surf
.row_pitch
, alloc_flags
);
591 mt
->first_level
= first_level
;
592 mt
->last_level
= last_level
;
595 mt
->aux_state
= NULL
;
596 mt
->cpp
= isl_format_get_layout(mt
->surf
.format
)->bpb
/ 8;
597 mt
->compressed
= _mesa_is_format_compressed(format
);
602 intel_miptree_release(&mt
);
607 make_separate_stencil_surface(struct brw_context
*brw
,
608 struct intel_mipmap_tree
*mt
)
610 mt
->stencil_mt
= make_surface(brw
, mt
->target
, MESA_FORMAT_S_UINT8
,
611 0, mt
->surf
.levels
- 1,
612 mt
->surf
.logical_level0_px
.width
,
613 mt
->surf
.logical_level0_px
.height
,
614 mt
->surf
.dim
== ISL_SURF_DIM_3D
?
615 mt
->surf
.logical_level0_px
.depth
:
616 mt
->surf
.logical_level0_px
.array_len
,
617 mt
->surf
.samples
, ISL_TILING_W_BIT
,
618 ISL_SURF_USAGE_STENCIL_BIT
|
619 ISL_SURF_USAGE_TEXTURE_BIT
,
620 BO_ALLOC_FOR_RENDER
, 0, NULL
);
625 mt
->stencil_mt
->r8stencil_needs_update
= true;
631 force_linear_tiling(uint32_t layout_flags
)
633 /* ANY includes NONE and Y bit. */
634 if (layout_flags
& MIPTREE_LAYOUT_TILING_Y
)
637 return layout_flags
& MIPTREE_LAYOUT_TILING_NONE
;
640 static struct intel_mipmap_tree
*
641 miptree_create(struct brw_context
*brw
,
650 uint32_t layout_flags
)
652 if (format
== MESA_FORMAT_S_UINT8
)
653 return make_surface(brw
, target
, format
, first_level
, last_level
,
654 width0
, height0
, depth0
, num_samples
,
656 ISL_SURF_USAGE_STENCIL_BIT
|
657 ISL_SURF_USAGE_TEXTURE_BIT
,
662 const GLenum base_format
= _mesa_get_format_base_format(format
);
663 if ((base_format
== GL_DEPTH_COMPONENT
||
664 base_format
== GL_DEPTH_STENCIL
) &&
665 !force_linear_tiling(layout_flags
)) {
666 /* Fix up the Z miptree format for how we're splitting out separate
667 * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
669 const mesa_format depth_only_format
=
670 intel_depth_format_for_depthstencil_format(format
);
671 struct intel_mipmap_tree
*mt
= make_surface(
672 brw
, target
, brw
->gen
>= 6 ? depth_only_format
: format
,
673 first_level
, last_level
,
674 width0
, height0
, depth0
, num_samples
, ISL_TILING_Y0_BIT
,
675 ISL_SURF_USAGE_DEPTH_BIT
| ISL_SURF_USAGE_TEXTURE_BIT
,
676 BO_ALLOC_FOR_RENDER
, 0, NULL
);
678 if (needs_separate_stencil(brw
, mt
, format
, layout_flags
) &&
679 !make_separate_stencil_surface(brw
, mt
)) {
680 intel_miptree_release(&mt
);
684 if (!(layout_flags
& MIPTREE_LAYOUT_DISABLE_AUX
))
685 intel_miptree_choose_aux_usage(brw
, mt
);
690 mesa_format tex_format
= format
;
691 mesa_format etc_format
= MESA_FORMAT_NONE
;
692 uint32_t alloc_flags
= 0;
694 format
= intel_lower_compressed_format(brw
, format
);
696 etc_format
= (format
!= tex_format
) ? tex_format
: MESA_FORMAT_NONE
;
698 assert((layout_flags
& MIPTREE_LAYOUT_FOR_BO
) == 0);
699 if (layout_flags
& MIPTREE_LAYOUT_ACCELERATED_UPLOAD
)
700 alloc_flags
|= BO_ALLOC_FOR_RENDER
;
702 isl_tiling_flags_t tiling_flags
= force_linear_tiling(layout_flags
) ?
703 ISL_TILING_LINEAR_BIT
: ISL_TILING_ANY_MASK
;
705 /* TODO: This used to be because there wasn't BLORP to handle Y-tiling. */
707 tiling_flags
&= ~ISL_TILING_Y0_BIT
;
709 struct intel_mipmap_tree
*mt
= make_surface(
711 first_level
, last_level
,
712 width0
, height0
, depth0
,
713 num_samples
, tiling_flags
,
714 ISL_SURF_USAGE_RENDER_TARGET_BIT
|
715 ISL_SURF_USAGE_TEXTURE_BIT
,
716 alloc_flags
, 0, NULL
);
720 mt
->etc_format
= etc_format
;
722 if (layout_flags
& MIPTREE_LAYOUT_FOR_SCANOUT
)
723 mt
->bo
->cache_coherent
= false;
725 if (!(layout_flags
& MIPTREE_LAYOUT_DISABLE_AUX
))
726 intel_miptree_choose_aux_usage(brw
, mt
);
731 struct intel_mipmap_tree
*
732 intel_miptree_create(struct brw_context
*brw
,
741 uint32_t layout_flags
)
743 assert(num_samples
> 0);
745 struct intel_mipmap_tree
*mt
= miptree_create(
747 first_level
, last_level
,
748 width0
, height0
, depth0
, num_samples
,
755 if (!intel_miptree_alloc_aux(brw
, mt
)) {
756 intel_miptree_release(&mt
);
763 struct intel_mipmap_tree
*
764 intel_miptree_create_for_bo(struct brw_context
*brw
,
772 uint32_t layout_flags
)
774 struct intel_mipmap_tree
*mt
;
775 uint32_t tiling
, swizzle
;
776 const GLenum target
= depth
> 1 ? GL_TEXTURE_2D_ARRAY
: GL_TEXTURE_2D
;
777 const GLenum base_format
= _mesa_get_format_base_format(format
);
779 if ((base_format
== GL_DEPTH_COMPONENT
||
780 base_format
== GL_DEPTH_STENCIL
)) {
781 const mesa_format depth_only_format
=
782 intel_depth_format_for_depthstencil_format(format
);
783 mt
= make_surface(brw
, target
,
784 brw
->gen
>= 6 ? depth_only_format
: format
,
785 0, 0, width
, height
, depth
, 1, ISL_TILING_Y0_BIT
,
786 ISL_SURF_USAGE_DEPTH_BIT
| ISL_SURF_USAGE_TEXTURE_BIT
,
787 BO_ALLOC_FOR_RENDER
, pitch
, bo
);
791 brw_bo_reference(bo
);
793 if (!(layout_flags
& MIPTREE_LAYOUT_DISABLE_AUX
))
794 intel_miptree_choose_aux_usage(brw
, mt
);
797 } else if (format
== MESA_FORMAT_S_UINT8
) {
798 mt
= make_surface(brw
, target
, MESA_FORMAT_S_UINT8
,
799 0, 0, width
, height
, depth
, 1,
801 ISL_SURF_USAGE_STENCIL_BIT
|
802 ISL_SURF_USAGE_TEXTURE_BIT
,
803 BO_ALLOC_FOR_RENDER
, pitch
, bo
);
807 assert(bo
->size
>= mt
->surf
.size
);
809 brw_bo_reference(bo
);
813 brw_bo_get_tiling(bo
, &tiling
, &swizzle
);
815 /* Nothing will be able to use this miptree with the BO if the offset isn't
818 if (tiling
!= I915_TILING_NONE
)
819 assert(offset
% 4096 == 0);
821 /* miptrees can't handle negative pitch. If you need flipping of images,
822 * that's outside of the scope of the mt.
826 /* The BO already has a tiling format and we shouldn't confuse the lower
827 * layers by making it try to find a tiling format again.
829 assert((layout_flags
& MIPTREE_LAYOUT_TILING_ANY
) == 0);
830 assert((layout_flags
& MIPTREE_LAYOUT_TILING_NONE
) == 0);
832 mt
= make_surface(brw
, target
, format
,
833 0, 0, width
, height
, depth
, 1,
834 1lu << isl_tiling_from_i915_tiling(tiling
),
835 ISL_SURF_USAGE_RENDER_TARGET_BIT
|
836 ISL_SURF_USAGE_TEXTURE_BIT
,
841 brw_bo_reference(bo
);
845 if (!(layout_flags
& MIPTREE_LAYOUT_DISABLE_AUX
))
846 intel_miptree_choose_aux_usage(brw
, mt
);
851 static struct intel_mipmap_tree
*
852 miptree_create_for_planar_image(struct brw_context
*brw
,
853 __DRIimage
*image
, GLenum target
)
855 const struct intel_image_format
*f
= image
->planar_format
;
856 struct intel_mipmap_tree
*planar_mt
= NULL
;
858 for (int i
= 0; i
< f
->nplanes
; i
++) {
859 const int index
= f
->planes
[i
].buffer_index
;
860 const uint32_t dri_format
= f
->planes
[i
].dri_format
;
861 const mesa_format format
= driImageFormatToGLFormat(dri_format
);
862 const uint32_t width
= image
->width
>> f
->planes
[i
].width_shift
;
863 const uint32_t height
= image
->height
>> f
->planes
[i
].height_shift
;
865 /* Disable creation of the texture's aux buffers because the driver
866 * exposes no EGL API to manage them. That is, there is no API for
867 * resolving the aux buffer's content to the main buffer nor for
868 * invalidating the aux buffer's content.
870 struct intel_mipmap_tree
*mt
=
871 intel_miptree_create_for_bo(brw
, image
->bo
, format
,
872 image
->offsets
[index
],
874 image
->strides
[index
],
875 MIPTREE_LAYOUT_DISABLE_AUX
);
884 planar_mt
->plane
[i
- 1] = mt
;
890 struct intel_mipmap_tree
*
891 intel_miptree_create_for_dri_image(struct brw_context
*brw
,
892 __DRIimage
*image
, GLenum target
,
893 enum isl_colorspace colorspace
,
894 bool is_winsys_image
)
896 if (image
->planar_format
&& image
->planar_format
->nplanes
> 0) {
897 assert(colorspace
== ISL_COLORSPACE_NONE
||
898 colorspace
== ISL_COLORSPACE_YUV
);
899 return miptree_create_for_planar_image(brw
, image
, target
);
902 mesa_format format
= image
->format
;
903 switch (colorspace
) {
904 case ISL_COLORSPACE_NONE
:
905 /* Keep the image format unmodified */
908 case ISL_COLORSPACE_LINEAR
:
909 format
=_mesa_get_srgb_format_linear(format
);
912 case ISL_COLORSPACE_SRGB
:
913 format
=_mesa_get_linear_format_srgb(format
);
917 unreachable("Inalid colorspace for non-planar image");
920 if (!brw
->ctx
.TextureFormatSupported
[format
]) {
921 /* The texture storage paths in core Mesa detect if the driver does not
922 * support the user-requested format, and then searches for a
923 * fallback format. The DRIimage code bypasses core Mesa, though. So we
924 * do the fallbacks here for important formats.
926 * We must support DRM_FOURCC_XBGR8888 textures because the Android
927 * framework produces HAL_PIXEL_FORMAT_RGBX8888 winsys surfaces, which
928 * the Chrome OS compositor consumes as dma_buf EGLImages.
930 format
= _mesa_format_fallback_rgbx_to_rgba(format
);
933 if (!brw
->ctx
.TextureFormatSupported
[format
])
936 /* If this image comes in from a window system, we have different
937 * requirements than if it comes in via an EGL import operation. Window
938 * system images can use any form of auxiliary compression we wish because
939 * they get "flushed" before being handed off to the window system and we
940 * have the opportunity to do resolves. Window system buffers also may be
941 * used for scanout so we need to flag that appropriately.
943 const uint32_t mt_layout_flags
=
944 is_winsys_image
? MIPTREE_LAYOUT_FOR_SCANOUT
: MIPTREE_LAYOUT_DISABLE_AUX
;
946 /* Disable creation of the texture's aux buffers because the driver exposes
947 * no EGL API to manage them. That is, there is no API for resolving the aux
948 * buffer's content to the main buffer nor for invalidating the aux buffer's
951 struct intel_mipmap_tree
*mt
=
952 intel_miptree_create_for_bo(brw
, image
->bo
, format
,
953 image
->offset
, image
->width
, image
->height
, 1,
954 image
->pitch
, mt_layout_flags
);
959 mt
->level
[0].level_x
= image
->tile_x
;
960 mt
->level
[0].level_y
= image
->tile_y
;
962 /* From "OES_EGL_image" error reporting. We report GL_INVALID_OPERATION
963 * for EGL images from non-tile aligned sufaces in gen4 hw and earlier which has
964 * trouble resolving back to destination image due to alignment issues.
966 if (!brw
->has_surface_tile_offset
) {
967 uint32_t draw_x
, draw_y
;
968 intel_miptree_get_tile_offsets(mt
, 0, 0, &draw_x
, &draw_y
);
970 if (draw_x
!= 0 || draw_y
!= 0) {
971 _mesa_error(&brw
->ctx
, GL_INVALID_OPERATION
, __func__
);
972 intel_miptree_release(&mt
);
977 if (!intel_miptree_alloc_aux(brw
, mt
)) {
978 intel_miptree_release(&mt
);
986 * For a singlesample renderbuffer, this simply wraps the given BO with a
989 * For a multisample renderbuffer, this wraps the window system's
990 * (singlesample) BO with a singlesample miptree attached to the
991 * intel_renderbuffer, then creates a multisample miptree attached to irb->mt
992 * that will contain the actual rendering (which is lazily resolved to
993 * irb->singlesample_mt).
996 intel_update_winsys_renderbuffer_miptree(struct brw_context
*intel
,
997 struct intel_renderbuffer
*irb
,
998 struct intel_mipmap_tree
*singlesample_mt
,
999 uint32_t width
, uint32_t height
,
1002 struct intel_mipmap_tree
*multisample_mt
= NULL
;
1003 struct gl_renderbuffer
*rb
= &irb
->Base
.Base
;
1004 mesa_format format
= rb
->Format
;
1005 const unsigned num_samples
= MAX2(rb
->NumSamples
, 1);
1007 /* Only the front and back buffers, which are color buffers, are allocated
1008 * through the image loader.
1010 assert(_mesa_get_format_base_format(format
) == GL_RGB
||
1011 _mesa_get_format_base_format(format
) == GL_RGBA
);
1013 assert(singlesample_mt
);
1015 if (num_samples
== 1) {
1016 intel_miptree_release(&irb
->mt
);
1017 irb
->mt
= singlesample_mt
;
1019 assert(!irb
->singlesample_mt
);
1021 intel_miptree_release(&irb
->singlesample_mt
);
1022 irb
->singlesample_mt
= singlesample_mt
;
1025 irb
->mt
->surf
.logical_level0_px
.width
!= width
||
1026 irb
->mt
->surf
.logical_level0_px
.height
!= height
) {
1027 multisample_mt
= intel_miptree_create_for_renderbuffer(intel
,
1032 if (!multisample_mt
)
1035 irb
->need_downsample
= false;
1036 intel_miptree_release(&irb
->mt
);
1037 irb
->mt
= multisample_mt
;
1043 intel_miptree_release(&irb
->mt
);
1047 struct intel_mipmap_tree
*
1048 intel_miptree_create_for_renderbuffer(struct brw_context
*brw
,
1052 uint32_t num_samples
)
1054 struct intel_mipmap_tree
*mt
;
1056 GLenum target
= num_samples
> 1 ? GL_TEXTURE_2D_MULTISAMPLE
: GL_TEXTURE_2D
;
1057 const uint32_t layout_flags
= MIPTREE_LAYOUT_ACCELERATED_UPLOAD
|
1058 MIPTREE_LAYOUT_TILING_ANY
;
1060 mt
= intel_miptree_create(brw
, target
, format
, 0, 0,
1061 width
, height
, depth
, num_samples
,
1069 intel_miptree_release(&mt
);
1074 intel_miptree_reference(struct intel_mipmap_tree
**dst
,
1075 struct intel_mipmap_tree
*src
)
1080 intel_miptree_release(dst
);
1084 DBG("%s %p refcount now %d\n", __func__
, src
, src
->refcount
);
1091 intel_miptree_aux_buffer_free(struct intel_miptree_aux_buffer
*aux_buf
)
1093 if (aux_buf
== NULL
)
1096 brw_bo_unreference(aux_buf
->bo
);
1102 intel_miptree_release(struct intel_mipmap_tree
**mt
)
1107 DBG("%s %p refcount will be %d\n", __func__
, *mt
, (*mt
)->refcount
- 1);
1108 if (--(*mt
)->refcount
<= 0) {
1111 DBG("%s deleting %p\n", __func__
, *mt
);
1113 brw_bo_unreference((*mt
)->bo
);
1114 intel_miptree_release(&(*mt
)->stencil_mt
);
1115 intel_miptree_release(&(*mt
)->r8stencil_mt
);
1116 intel_miptree_aux_buffer_free((*mt
)->hiz_buf
);
1117 intel_miptree_aux_buffer_free((*mt
)->mcs_buf
);
1118 free_aux_state_map((*mt
)->aux_state
);
1120 intel_miptree_release(&(*mt
)->plane
[0]);
1121 intel_miptree_release(&(*mt
)->plane
[1]);
1123 for (i
= 0; i
< MAX_TEXTURE_LEVELS
; i
++) {
1124 free((*mt
)->level
[i
].slice
);
1134 intel_get_image_dims(struct gl_texture_image
*image
,
1135 int *width
, int *height
, int *depth
)
1137 switch (image
->TexObject
->Target
) {
1138 case GL_TEXTURE_1D_ARRAY
:
1139 /* For a 1D Array texture the OpenGL API will treat the image height as
1140 * the number of array slices. For Intel hardware, we treat the 1D array
1141 * as a 2D Array with a height of 1. So, here we want to swap image
1144 assert(image
->Depth
== 1);
1145 *width
= image
->Width
;
1147 *depth
= image
->Height
;
1149 case GL_TEXTURE_CUBE_MAP
:
1150 /* For Cube maps, the mesa/main api layer gives us a depth of 1 even
1151 * though we really have 6 slices.
1153 assert(image
->Depth
== 1);
1154 *width
= image
->Width
;
1155 *height
= image
->Height
;
1159 *width
= image
->Width
;
1160 *height
= image
->Height
;
1161 *depth
= image
->Depth
;
1167 * Can the image be pulled into a unified mipmap tree? This mirrors
1168 * the completeness test in a lot of ways.
1170 * Not sure whether I want to pass gl_texture_image here.
1173 intel_miptree_match_image(struct intel_mipmap_tree
*mt
,
1174 struct gl_texture_image
*image
)
1176 struct intel_texture_image
*intelImage
= intel_texture_image(image
);
1177 GLuint level
= intelImage
->base
.Base
.Level
;
1178 int width
, height
, depth
;
1180 /* glTexImage* choose the texture object based on the target passed in, and
1181 * objects can't change targets over their lifetimes, so this should be
1184 assert(image
->TexObject
->Target
== mt
->target
);
1186 mesa_format mt_format
= mt
->format
;
1187 if (mt
->format
== MESA_FORMAT_Z24_UNORM_X8_UINT
&& mt
->stencil_mt
)
1188 mt_format
= MESA_FORMAT_Z24_UNORM_S8_UINT
;
1189 if (mt
->format
== MESA_FORMAT_Z_FLOAT32
&& mt
->stencil_mt
)
1190 mt_format
= MESA_FORMAT_Z32_FLOAT_S8X24_UINT
;
1191 if (mt
->etc_format
!= MESA_FORMAT_NONE
)
1192 mt_format
= mt
->etc_format
;
1194 if (image
->TexFormat
!= mt_format
)
1197 intel_get_image_dims(image
, &width
, &height
, &depth
);
1199 if (mt
->target
== GL_TEXTURE_CUBE_MAP
)
1202 if (level
>= mt
->surf
.levels
)
1205 const unsigned level_depth
=
1206 mt
->surf
.dim
== ISL_SURF_DIM_3D
?
1207 minify(mt
->surf
.logical_level0_px
.depth
, level
) :
1208 mt
->surf
.logical_level0_px
.array_len
;
1210 return width
== minify(mt
->surf
.logical_level0_px
.width
, level
) &&
1211 height
== minify(mt
->surf
.logical_level0_px
.height
, level
) &&
1212 depth
== level_depth
&&
1213 MAX2(image
->NumSamples
, 1) == mt
->surf
.samples
;
1217 intel_miptree_get_image_offset(const struct intel_mipmap_tree
*mt
,
1218 GLuint level
, GLuint slice
,
1219 GLuint
*x
, GLuint
*y
)
1221 if (level
== 0 && slice
== 0) {
1222 *x
= mt
->level
[0].level_x
;
1223 *y
= mt
->level
[0].level_y
;
1227 uint32_t x_offset_sa
, y_offset_sa
;
1229 /* Miptree itself can have an offset only if it represents a single
1230 * slice in an imported buffer object.
1231 * See intel_miptree_create_for_dri_image().
1233 assert(mt
->level
[0].level_x
== 0);
1234 assert(mt
->level
[0].level_y
== 0);
1236 /* Given level is relative to level zero while the miptree may be
1237 * represent just a subset of all levels starting from 'first_level'.
1239 assert(level
>= mt
->first_level
);
1240 level
-= mt
->first_level
;
1242 const unsigned z
= mt
->surf
.dim
== ISL_SURF_DIM_3D
? slice
: 0;
1243 slice
= mt
->surf
.dim
== ISL_SURF_DIM_3D
? 0 : slice
;
1244 isl_surf_get_image_offset_el(&mt
->surf
, level
, slice
, z
,
1245 &x_offset_sa
, &y_offset_sa
);
1253 * This function computes the tile_w (in bytes) and tile_h (in rows) of
1254 * different tiling patterns. If the BO is untiled, tile_w is set to cpp
1255 * and tile_h is set to 1.
1258 intel_get_tile_dims(enum isl_tiling tiling
, uint32_t cpp
,
1259 uint32_t *tile_w
, uint32_t *tile_h
)
1270 case ISL_TILING_LINEAR
:
1275 unreachable("not reached");
1281 * This function computes masks that may be used to select the bits of the X
1282 * and Y coordinates that indicate the offset within a tile. If the BO is
1283 * untiled, the masks are set to 0.
1286 intel_get_tile_masks(enum isl_tiling tiling
, uint32_t cpp
,
1287 uint32_t *mask_x
, uint32_t *mask_y
)
1289 uint32_t tile_w_bytes
, tile_h
;
1291 intel_get_tile_dims(tiling
, cpp
, &tile_w_bytes
, &tile_h
);
1293 *mask_x
= tile_w_bytes
/ cpp
- 1;
1294 *mask_y
= tile_h
- 1;
1298 * Compute the offset (in bytes) from the start of the BO to the given x
1299 * and y coordinate. For tiled BOs, caller must ensure that x and y are
1300 * multiples of the tile size.
1303 intel_miptree_get_aligned_offset(const struct intel_mipmap_tree
*mt
,
1304 uint32_t x
, uint32_t y
)
1307 uint32_t pitch
= mt
->surf
.row_pitch
;
1309 switch (mt
->surf
.tiling
) {
1311 unreachable("not reached");
1312 case ISL_TILING_LINEAR
:
1313 return y
* pitch
+ x
* cpp
;
1315 assert((x
% (512 / cpp
)) == 0);
1316 assert((y
% 8) == 0);
1317 return y
* pitch
+ x
/ (512 / cpp
) * 4096;
1319 assert((x
% (128 / cpp
)) == 0);
1320 assert((y
% 32) == 0);
1321 return y
* pitch
+ x
/ (128 / cpp
) * 4096;
1326 * Rendering with tiled buffers requires that the base address of the buffer
1327 * be aligned to a page boundary. For renderbuffers, and sometimes with
1328 * textures, we may want the surface to point at a texture image level that
1329 * isn't at a page boundary.
1331 * This function returns an appropriately-aligned base offset
1332 * according to the tiling restrictions, plus any required x/y offset
1336 intel_miptree_get_tile_offsets(const struct intel_mipmap_tree
*mt
,
1337 GLuint level
, GLuint slice
,
1342 uint32_t mask_x
, mask_y
;
1344 intel_get_tile_masks(mt
->surf
.tiling
, mt
->cpp
, &mask_x
, &mask_y
);
1345 intel_miptree_get_image_offset(mt
, level
, slice
, &x
, &y
);
1347 *tile_x
= x
& mask_x
;
1348 *tile_y
= y
& mask_y
;
1350 return intel_miptree_get_aligned_offset(mt
, x
& ~mask_x
, y
& ~mask_y
);
1354 intel_miptree_copy_slice_sw(struct brw_context
*brw
,
1355 struct intel_mipmap_tree
*src_mt
,
1356 unsigned src_level
, unsigned src_layer
,
1357 struct intel_mipmap_tree
*dst_mt
,
1358 unsigned dst_level
, unsigned dst_layer
,
1359 unsigned width
, unsigned height
)
1362 ptrdiff_t src_stride
, dst_stride
;
1363 const unsigned cpp
= (isl_format_get_layout(dst_mt
->surf
.format
)->bpb
/ 8);
1365 intel_miptree_map(brw
, src_mt
,
1366 src_level
, src_layer
,
1369 GL_MAP_READ_BIT
| BRW_MAP_DIRECT_BIT
,
1372 intel_miptree_map(brw
, dst_mt
,
1373 dst_level
, dst_layer
,
1376 GL_MAP_WRITE_BIT
| GL_MAP_INVALIDATE_RANGE_BIT
|
1380 DBG("sw blit %s mt %p %p/%"PRIdPTR
" -> %s mt %p %p/%"PRIdPTR
" (%dx%d)\n",
1381 _mesa_get_format_name(src_mt
->format
),
1382 src_mt
, src
, src_stride
,
1383 _mesa_get_format_name(dst_mt
->format
),
1384 dst_mt
, dst
, dst_stride
,
1387 int row_size
= cpp
* width
;
1388 if (src_stride
== row_size
&&
1389 dst_stride
== row_size
) {
1390 memcpy(dst
, src
, row_size
* height
);
1392 for (int i
= 0; i
< height
; i
++) {
1393 memcpy(dst
, src
, row_size
);
1399 intel_miptree_unmap(brw
, dst_mt
, dst_level
, dst_layer
);
1400 intel_miptree_unmap(brw
, src_mt
, src_level
, src_layer
);
1402 /* Don't forget to copy the stencil data over, too. We could have skipped
1403 * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
1404 * shuffling the two data sources in/out of temporary storage instead of
1405 * the direct mapping we get this way.
1407 if (dst_mt
->stencil_mt
) {
1408 assert(src_mt
->stencil_mt
);
1409 intel_miptree_copy_slice_sw(brw
,
1410 src_mt
->stencil_mt
, src_level
, src_layer
,
1411 dst_mt
->stencil_mt
, dst_level
, dst_layer
,
1417 intel_miptree_copy_slice(struct brw_context
*brw
,
1418 struct intel_mipmap_tree
*src_mt
,
1419 unsigned src_level
, unsigned src_layer
,
1420 struct intel_mipmap_tree
*dst_mt
,
1421 unsigned dst_level
, unsigned dst_layer
)
1424 mesa_format format
= src_mt
->format
;
1425 unsigned width
= minify(src_mt
->surf
.phys_level0_sa
.width
,
1426 src_level
- src_mt
->first_level
);
1427 unsigned height
= minify(src_mt
->surf
.phys_level0_sa
.height
,
1428 src_level
- src_mt
->first_level
);
1430 assert(src_layer
< get_num_phys_layers(&src_mt
->surf
,
1431 src_level
- src_mt
->first_level
));
1433 assert(src_mt
->format
== dst_mt
->format
);
1435 if (dst_mt
->compressed
) {
1437 _mesa_get_format_block_size(dst_mt
->format
, &i
, &j
);
1438 height
= ALIGN_NPOT(height
, j
) / j
;
1439 width
= ALIGN_NPOT(width
, i
) / i
;
1442 /* If it's a packed depth/stencil buffer with separate stencil, the blit
1443 * below won't apply since we can't do the depth's Y tiling or the
1444 * stencil's W tiling in the blitter.
1446 if (src_mt
->stencil_mt
) {
1447 intel_miptree_copy_slice_sw(brw
,
1448 src_mt
, src_level
, src_layer
,
1449 dst_mt
, dst_level
, dst_layer
,
1454 uint32_t dst_x
, dst_y
, src_x
, src_y
;
1455 intel_miptree_get_image_offset(dst_mt
, dst_level
, dst_layer
,
1457 intel_miptree_get_image_offset(src_mt
, src_level
, src_layer
,
1460 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
1461 _mesa_get_format_name(src_mt
->format
),
1462 src_mt
, src_x
, src_y
, src_mt
->surf
.row_pitch
,
1463 _mesa_get_format_name(dst_mt
->format
),
1464 dst_mt
, dst_x
, dst_y
, dst_mt
->surf
.row_pitch
,
1467 if (!intel_miptree_blit(brw
,
1468 src_mt
, src_level
, src_layer
, 0, 0, false,
1469 dst_mt
, dst_level
, dst_layer
, 0, 0, false,
1470 width
, height
, GL_COPY
)) {
1471 perf_debug("miptree validate blit for %s failed\n",
1472 _mesa_get_format_name(format
));
1474 intel_miptree_copy_slice_sw(brw
,
1475 src_mt
, src_level
, src_layer
,
1476 dst_mt
, dst_level
, dst_layer
,
1482 * Copies the image's current data to the given miptree, and associates that
1483 * miptree with the image.
1485 * If \c invalidate is true, then the actual image data does not need to be
1486 * copied, but the image still needs to be associated to the new miptree (this
1487 * is set to true if we're about to clear the image).
1490 intel_miptree_copy_teximage(struct brw_context
*brw
,
1491 struct intel_texture_image
*intelImage
,
1492 struct intel_mipmap_tree
*dst_mt
,
1495 struct intel_mipmap_tree
*src_mt
= intelImage
->mt
;
1496 struct intel_texture_object
*intel_obj
=
1497 intel_texture_object(intelImage
->base
.Base
.TexObject
);
1498 int level
= intelImage
->base
.Base
.Level
;
1499 const unsigned face
= intelImage
->base
.Base
.Face
;
1500 unsigned start_layer
, end_layer
;
1502 if (intel_obj
->base
.Target
== GL_TEXTURE_1D_ARRAY
) {
1504 assert(intelImage
->base
.Base
.Height
);
1506 end_layer
= intelImage
->base
.Base
.Height
- 1;
1507 } else if (face
> 0) {
1511 assert(intelImage
->base
.Base
.Depth
);
1513 end_layer
= intelImage
->base
.Base
.Depth
- 1;
1517 for (unsigned i
= start_layer
; i
<= end_layer
; i
++) {
1518 intel_miptree_copy_slice(brw
,
1524 intel_miptree_reference(&intelImage
->mt
, dst_mt
);
1525 intel_obj
->needs_validate
= true;
1529 intel_miptree_init_mcs(struct brw_context
*brw
,
1530 struct intel_mipmap_tree
*mt
,
1533 assert(mt
->mcs_buf
!= NULL
);
1535 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
1537 * When MCS buffer is enabled and bound to MSRT, it is required that it
1538 * is cleared prior to any rendering.
1540 * Since we don't use the MCS buffer for any purpose other than rendering,
1541 * it makes sense to just clear it immediately upon allocation.
1543 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
1545 void *map
= brw_bo_map(brw
, mt
->mcs_buf
->bo
, MAP_WRITE
);
1546 if (unlikely(map
== NULL
)) {
1547 fprintf(stderr
, "Failed to map mcs buffer into GTT\n");
1548 brw_bo_unreference(mt
->mcs_buf
->bo
);
1553 memset(data
, init_value
, mt
->mcs_buf
->size
);
1554 brw_bo_unmap(mt
->mcs_buf
->bo
);
1557 static struct intel_miptree_aux_buffer
*
1558 intel_alloc_aux_buffer(struct brw_context
*brw
,
1560 const struct isl_surf
*aux_surf
,
1561 uint32_t alloc_flags
,
1562 struct intel_mipmap_tree
*mt
)
1564 struct intel_miptree_aux_buffer
*buf
= calloc(sizeof(*buf
), 1);
1568 buf
->size
= aux_surf
->size
;
1569 buf
->pitch
= aux_surf
->row_pitch
;
1570 buf
->qpitch
= isl_surf_get_array_pitch_sa_rows(aux_surf
);
1572 /* ISL has stricter set of alignment rules then the drm allocator.
1573 * Therefore one can pass the ISL dimensions in terms of bytes instead of
1574 * trying to recalculate based on different format block sizes.
1576 buf
->bo
= brw_bo_alloc_tiled(brw
->bufmgr
, name
, buf
->size
,
1577 I915_TILING_Y
, buf
->pitch
, alloc_flags
);
1583 buf
->surf
= *aux_surf
;
1589 intel_miptree_alloc_mcs(struct brw_context
*brw
,
1590 struct intel_mipmap_tree
*mt
,
1593 assert(brw
->gen
>= 7); /* MCS only used on Gen7+ */
1594 assert(mt
->mcs_buf
== NULL
);
1595 assert(mt
->aux_usage
== ISL_AUX_USAGE_MCS
);
1597 /* Multisampled miptrees are only supported for single level. */
1598 assert(mt
->first_level
== 0);
1599 enum isl_aux_state
**aux_state
=
1600 create_aux_state_map(mt
, ISL_AUX_STATE_CLEAR
);
1604 struct isl_surf temp_mcs_surf
;
1606 MAYBE_UNUSED
bool ok
=
1607 isl_surf_get_mcs_surf(&brw
->isl_dev
, &mt
->surf
, &temp_mcs_surf
);
1610 /* Buffer needs to be initialised requiring the buffer to be immediately
1611 * mapped to cpu space for writing. Therefore do not use the gpu access
1612 * flag which can cause an unnecessary delay if the backing pages happened
1613 * to be just used by the GPU.
1615 const uint32_t alloc_flags
= 0;
1616 mt
->mcs_buf
= intel_alloc_aux_buffer(brw
, "mcs-miptree",
1617 &temp_mcs_surf
, alloc_flags
, mt
);
1623 mt
->aux_state
= aux_state
;
1625 intel_miptree_init_mcs(brw
, mt
, 0xFF);
1631 intel_miptree_alloc_ccs(struct brw_context
*brw
,
1632 struct intel_mipmap_tree
*mt
)
1634 assert(mt
->mcs_buf
== NULL
);
1635 assert(mt
->aux_usage
== ISL_AUX_USAGE_CCS_E
||
1636 mt
->aux_usage
== ISL_AUX_USAGE_CCS_D
);
1638 struct isl_surf temp_ccs_surf
;
1640 if (!isl_surf_get_ccs_surf(&brw
->isl_dev
, &mt
->surf
, &temp_ccs_surf
, 0))
1643 assert(temp_ccs_surf
.size
&&
1644 (temp_ccs_surf
.size
% temp_ccs_surf
.row_pitch
== 0));
1646 enum isl_aux_state
**aux_state
=
1647 create_aux_state_map(mt
, ISL_AUX_STATE_PASS_THROUGH
);
1651 /* When CCS_E is used, we need to ensure that the CCS starts off in a valid
1652 * state. From the Sky Lake PRM, "MCS Buffer for Render Target(s)":
1654 * "If Software wants to enable Color Compression without Fast clear,
1655 * Software needs to initialize MCS with zeros."
1657 * A CCS value of 0 indicates that the corresponding block is in the
1658 * pass-through state which is what we want.
1660 * For CCS_D, on the other hand, we don't care as we're about to perform a
1661 * fast-clear operation. In that case, being hot in caches more useful.
1663 const uint32_t alloc_flags
= mt
->aux_usage
== ISL_AUX_USAGE_CCS_E
?
1664 BO_ALLOC_ZEROED
: BO_ALLOC_FOR_RENDER
;
1665 mt
->mcs_buf
= intel_alloc_aux_buffer(brw
, "ccs-miptree",
1666 &temp_ccs_surf
, alloc_flags
, mt
);
1672 mt
->aux_state
= aux_state
;
1678 * Helper for intel_miptree_alloc_hiz() that sets
1679 * \c mt->level[level].has_hiz. Return true if and only if
1680 * \c has_hiz was set.
1683 intel_miptree_level_enable_hiz(struct brw_context
*brw
,
1684 struct intel_mipmap_tree
*mt
,
1687 assert(mt
->hiz_buf
);
1688 assert(mt
->surf
.size
> 0);
1690 if (brw
->gen
>= 8 || brw
->is_haswell
) {
1691 uint32_t width
= minify(mt
->surf
.phys_level0_sa
.width
, level
);
1692 uint32_t height
= minify(mt
->surf
.phys_level0_sa
.height
, level
);
1694 /* Disable HiZ for LOD > 0 unless the width is 8 aligned
1695 * and the height is 4 aligned. This allows our HiZ support
1696 * to fulfill Haswell restrictions for HiZ ops. For LOD == 0,
1697 * we can grow the width & height to allow the HiZ op to
1698 * force the proper size alignments.
1700 if (level
> 0 && ((width
& 7) || (height
& 3))) {
1701 DBG("mt %p level %d: HiZ DISABLED\n", mt
, level
);
1706 DBG("mt %p level %d: HiZ enabled\n", mt
, level
);
1707 mt
->level
[level
].has_hiz
= true;
1712 intel_miptree_alloc_hiz(struct brw_context
*brw
,
1713 struct intel_mipmap_tree
*mt
)
1715 assert(mt
->hiz_buf
== NULL
);
1716 assert(mt
->aux_usage
== ISL_AUX_USAGE_HIZ
);
1718 enum isl_aux_state
**aux_state
=
1719 create_aux_state_map(mt
, ISL_AUX_STATE_AUX_INVALID
);
1723 struct isl_surf temp_hiz_surf
;
1725 MAYBE_UNUSED
bool ok
=
1726 isl_surf_get_hiz_surf(&brw
->isl_dev
, &mt
->surf
, &temp_hiz_surf
);
1729 const uint32_t alloc_flags
= BO_ALLOC_FOR_RENDER
;
1730 mt
->hiz_buf
= intel_alloc_aux_buffer(brw
, "hiz-miptree",
1731 &temp_hiz_surf
, alloc_flags
, mt
);
1738 for (unsigned level
= mt
->first_level
; level
<= mt
->last_level
; ++level
)
1739 intel_miptree_level_enable_hiz(brw
, mt
, level
);
1741 mt
->aux_state
= aux_state
;
1748 * Allocate the initial aux surface for a miptree based on mt->aux_usage
1750 * Since MCS, HiZ, and CCS_E can compress more than just clear color, we
1751 * create the auxiliary surfaces up-front. CCS_D, on the other hand, can only
1752 * compress clear color so we wait until an actual fast-clear to allocate it.
1755 intel_miptree_alloc_aux(struct brw_context
*brw
,
1756 struct intel_mipmap_tree
*mt
)
1758 switch (mt
->aux_usage
) {
1759 case ISL_AUX_USAGE_NONE
:
1762 case ISL_AUX_USAGE_HIZ
:
1763 assert(!_mesa_is_format_color_format(mt
->format
));
1764 if (!intel_miptree_alloc_hiz(brw
, mt
))
1768 case ISL_AUX_USAGE_MCS
:
1769 assert(_mesa_is_format_color_format(mt
->format
));
1770 assert(mt
->surf
.samples
> 1);
1771 if (!intel_miptree_alloc_mcs(brw
, mt
, mt
->surf
.samples
))
1775 case ISL_AUX_USAGE_CCS_D
:
1776 /* Since CCS_D can only compress clear color so we wait until an actual
1777 * fast-clear to allocate it.
1781 case ISL_AUX_USAGE_CCS_E
:
1782 assert(_mesa_is_format_color_format(mt
->format
));
1783 assert(mt
->surf
.samples
== 1);
1784 if (!intel_miptree_alloc_ccs(brw
, mt
))
1789 unreachable("Invalid aux usage");
1794 * Can the miptree sample using the hiz buffer?
1797 intel_miptree_sample_with_hiz(struct brw_context
*brw
,
1798 struct intel_mipmap_tree
*mt
)
1800 /* It's unclear how well supported sampling from the hiz buffer is on GEN8,
1801 * so keep things conservative for now and never enable it unless we're SKL+.
1811 /* It seems the hardware won't fallback to the depth buffer if some of the
1812 * mipmap levels aren't available in the HiZ buffer. So we need all levels
1813 * of the texture to be HiZ enabled.
1815 for (unsigned level
= 0; level
< mt
->surf
.levels
; ++level
) {
1816 if (!intel_miptree_level_has_hiz(mt
, level
))
1820 /* If compressed multisampling is enabled, then we use it for the auxiliary
1823 * From the BDW PRM (Volume 2d: Command Reference: Structures
1824 * RENDER_SURFACE_STATE.AuxiliarySurfaceMode):
1826 * "If this field is set to AUX_HIZ, Number of Multisamples must be
1827 * MULTISAMPLECOUNT_1, and Surface Type cannot be SURFTYPE_3D.
1829 * There is no such blurb for 1D textures, but there is sufficient evidence
1830 * that this is broken on SKL+.
1832 return (mt
->surf
.samples
== 1 &&
1833 mt
->target
!= GL_TEXTURE_3D
&&
1834 mt
->target
!= GL_TEXTURE_1D
/* gen9+ restriction */);
1838 * Does the miptree slice have hiz enabled?
1841 intel_miptree_level_has_hiz(const struct intel_mipmap_tree
*mt
, uint32_t level
)
1843 intel_miptree_check_level_layer(mt
, level
, 0);
1844 return mt
->level
[level
].has_hiz
;
1847 static inline uint32_t
1848 miptree_level_range_length(const struct intel_mipmap_tree
*mt
,
1849 uint32_t start_level
, uint32_t num_levels
)
1851 assert(start_level
>= mt
->first_level
);
1852 assert(start_level
<= mt
->last_level
);
1854 if (num_levels
== INTEL_REMAINING_LAYERS
)
1855 num_levels
= mt
->last_level
- start_level
+ 1;
1856 /* Check for overflow */
1857 assert(start_level
+ num_levels
>= start_level
);
1858 assert(start_level
+ num_levels
<= mt
->last_level
+ 1);
1863 static inline uint32_t
1864 miptree_layer_range_length(const struct intel_mipmap_tree
*mt
, uint32_t level
,
1865 uint32_t start_layer
, uint32_t num_layers
)
1867 assert(level
<= mt
->last_level
);
1869 const uint32_t total_num_layers
= brw_get_num_logical_layers(mt
, level
);
1870 assert(start_layer
< total_num_layers
);
1871 if (num_layers
== INTEL_REMAINING_LAYERS
)
1872 num_layers
= total_num_layers
- start_layer
;
1873 /* Check for overflow */
1874 assert(start_layer
+ num_layers
>= start_layer
);
1875 assert(start_layer
+ num_layers
<= total_num_layers
);
1881 intel_miptree_has_color_unresolved(const struct intel_mipmap_tree
*mt
,
1882 unsigned start_level
, unsigned num_levels
,
1883 unsigned start_layer
, unsigned num_layers
)
1885 assert(_mesa_is_format_color_format(mt
->format
));
1890 /* Clamp the level range to fit the miptree */
1891 num_levels
= miptree_level_range_length(mt
, start_level
, num_levels
);
1893 for (uint32_t l
= 0; l
< num_levels
; l
++) {
1894 const uint32_t level
= start_level
+ l
;
1895 const uint32_t level_layers
=
1896 miptree_layer_range_length(mt
, level
, start_layer
, num_layers
);
1897 for (unsigned a
= 0; a
< level_layers
; a
++) {
1898 enum isl_aux_state aux_state
=
1899 intel_miptree_get_aux_state(mt
, level
, start_layer
+ a
);
1900 assert(aux_state
!= ISL_AUX_STATE_AUX_INVALID
);
1901 if (aux_state
!= ISL_AUX_STATE_PASS_THROUGH
)
1910 intel_miptree_check_color_resolve(const struct brw_context
*brw
,
1911 const struct intel_mipmap_tree
*mt
,
1912 unsigned level
, unsigned layer
)
1918 /* Fast color clear is supported for mipmapped surfaces only on Gen8+. */
1919 assert(brw
->gen
>= 8 ||
1920 (level
== 0 && mt
->first_level
== 0 && mt
->last_level
== 0));
1922 /* Compression of arrayed msaa surfaces is supported. */
1923 if (mt
->surf
.samples
> 1)
1926 /* Fast color clear is supported for non-msaa arrays only on Gen8+. */
1927 assert(brw
->gen
>= 8 ||
1929 mt
->surf
.logical_level0_px
.depth
== 1 &&
1930 mt
->surf
.logical_level0_px
.array_len
== 1));
1936 static enum blorp_fast_clear_op
1937 get_ccs_d_resolve_op(enum isl_aux_state aux_state
,
1938 enum isl_aux_usage aux_usage
,
1939 bool fast_clear_supported
)
1941 assert(aux_usage
== ISL_AUX_USAGE_NONE
|| aux_usage
== ISL_AUX_USAGE_CCS_D
);
1943 const bool ccs_supported
= aux_usage
== ISL_AUX_USAGE_CCS_D
;
1945 assert(ccs_supported
== fast_clear_supported
);
1947 switch (aux_state
) {
1948 case ISL_AUX_STATE_CLEAR
:
1949 case ISL_AUX_STATE_PARTIAL_CLEAR
:
1951 return BLORP_FAST_CLEAR_OP_RESOLVE_FULL
;
1953 return BLORP_FAST_CLEAR_OP_NONE
;
1955 case ISL_AUX_STATE_PASS_THROUGH
:
1956 return BLORP_FAST_CLEAR_OP_NONE
;
1958 case ISL_AUX_STATE_RESOLVED
:
1959 case ISL_AUX_STATE_AUX_INVALID
:
1960 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
1961 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
1965 unreachable("Invalid aux state for CCS_D");
1968 static enum blorp_fast_clear_op
1969 get_ccs_e_resolve_op(enum isl_aux_state aux_state
,
1970 enum isl_aux_usage aux_usage
,
1971 bool fast_clear_supported
)
1973 /* CCS_E surfaces can be accessed as CCS_D if we're careful. */
1974 assert(aux_usage
== ISL_AUX_USAGE_NONE
||
1975 aux_usage
== ISL_AUX_USAGE_CCS_D
||
1976 aux_usage
== ISL_AUX_USAGE_CCS_E
);
1978 if (aux_usage
== ISL_AUX_USAGE_CCS_D
)
1979 assert(fast_clear_supported
);
1981 switch (aux_state
) {
1982 case ISL_AUX_STATE_CLEAR
:
1983 case ISL_AUX_STATE_PARTIAL_CLEAR
:
1984 if (fast_clear_supported
)
1985 return BLORP_FAST_CLEAR_OP_NONE
;
1986 else if (aux_usage
== ISL_AUX_USAGE_CCS_E
)
1987 return BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL
;
1989 return BLORP_FAST_CLEAR_OP_RESOLVE_FULL
;
1991 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
1992 if (aux_usage
!= ISL_AUX_USAGE_CCS_E
)
1993 return BLORP_FAST_CLEAR_OP_RESOLVE_FULL
;
1994 else if (!fast_clear_supported
)
1995 return BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL
;
1997 return BLORP_FAST_CLEAR_OP_NONE
;
1999 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
2000 if (aux_usage
!= ISL_AUX_USAGE_CCS_E
)
2001 return BLORP_FAST_CLEAR_OP_RESOLVE_FULL
;
2003 return BLORP_FAST_CLEAR_OP_NONE
;
2005 case ISL_AUX_STATE_PASS_THROUGH
:
2006 return BLORP_FAST_CLEAR_OP_NONE
;
2008 case ISL_AUX_STATE_RESOLVED
:
2009 case ISL_AUX_STATE_AUX_INVALID
:
2013 unreachable("Invalid aux state for CCS_E");
2017 intel_miptree_prepare_ccs_access(struct brw_context
*brw
,
2018 struct intel_mipmap_tree
*mt
,
2019 uint32_t level
, uint32_t layer
,
2020 enum isl_aux_usage aux_usage
,
2021 bool fast_clear_supported
)
2023 enum isl_aux_state aux_state
= intel_miptree_get_aux_state(mt
, level
, layer
);
2025 enum blorp_fast_clear_op resolve_op
;
2026 if (mt
->aux_usage
== ISL_AUX_USAGE_CCS_E
) {
2027 resolve_op
= get_ccs_e_resolve_op(aux_state
, aux_usage
,
2028 fast_clear_supported
);
2030 assert(mt
->aux_usage
== ISL_AUX_USAGE_CCS_D
);
2031 resolve_op
= get_ccs_d_resolve_op(aux_state
, aux_usage
,
2032 fast_clear_supported
);
2035 if (resolve_op
!= BLORP_FAST_CLEAR_OP_NONE
) {
2036 intel_miptree_check_color_resolve(brw
, mt
, level
, layer
);
2037 brw_blorp_resolve_color(brw
, mt
, level
, layer
, resolve_op
);
2039 switch (resolve_op
) {
2040 case BLORP_FAST_CLEAR_OP_RESOLVE_FULL
:
2041 /* The CCS full resolve operation destroys the CCS and sets it to the
2042 * pass-through state. (You can also think of this as being both a
2043 * resolve and an ambiguate in one operation.)
2045 intel_miptree_set_aux_state(brw
, mt
, level
, layer
, 1,
2046 ISL_AUX_STATE_PASS_THROUGH
);
2049 case BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL
:
2050 intel_miptree_set_aux_state(brw
, mt
, level
, layer
, 1,
2051 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
2055 unreachable("Invalid resolve op");
2061 intel_miptree_finish_ccs_write(struct brw_context
*brw
,
2062 struct intel_mipmap_tree
*mt
,
2063 uint32_t level
, uint32_t layer
,
2064 enum isl_aux_usage aux_usage
)
2066 assert(aux_usage
== ISL_AUX_USAGE_NONE
||
2067 aux_usage
== ISL_AUX_USAGE_CCS_D
||
2068 aux_usage
== ISL_AUX_USAGE_CCS_E
);
2070 enum isl_aux_state aux_state
= intel_miptree_get_aux_state(mt
, level
, layer
);
2072 if (mt
->aux_usage
== ISL_AUX_USAGE_CCS_E
) {
2073 switch (aux_state
) {
2074 case ISL_AUX_STATE_CLEAR
:
2075 case ISL_AUX_STATE_PARTIAL_CLEAR
:
2076 assert(aux_usage
== ISL_AUX_USAGE_CCS_E
||
2077 aux_usage
== ISL_AUX_USAGE_CCS_D
);
2079 if (aux_usage
== ISL_AUX_USAGE_CCS_E
) {
2080 intel_miptree_set_aux_state(brw
, mt
, level
, layer
, 1,
2081 ISL_AUX_STATE_COMPRESSED_CLEAR
);
2082 } else if (aux_state
!= ISL_AUX_STATE_PARTIAL_CLEAR
) {
2083 intel_miptree_set_aux_state(brw
, mt
, level
, layer
, 1,
2084 ISL_AUX_STATE_PARTIAL_CLEAR
);
2088 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
2089 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
2090 assert(aux_usage
== ISL_AUX_USAGE_CCS_E
);
2091 break; /* Nothing to do */
2093 case ISL_AUX_STATE_PASS_THROUGH
:
2094 if (aux_usage
== ISL_AUX_USAGE_CCS_E
) {
2095 intel_miptree_set_aux_state(brw
, mt
, level
, layer
, 1,
2096 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
2102 case ISL_AUX_STATE_RESOLVED
:
2103 case ISL_AUX_STATE_AUX_INVALID
:
2104 unreachable("Invalid aux state for CCS_E");
2107 assert(mt
->aux_usage
== ISL_AUX_USAGE_CCS_D
);
2108 /* CCS_D is a bit simpler */
2109 switch (aux_state
) {
2110 case ISL_AUX_STATE_CLEAR
:
2111 assert(aux_usage
== ISL_AUX_USAGE_CCS_D
);
2112 intel_miptree_set_aux_state(brw
, mt
, level
, layer
, 1,
2113 ISL_AUX_STATE_PARTIAL_CLEAR
);
2116 case ISL_AUX_STATE_PARTIAL_CLEAR
:
2117 assert(aux_usage
== ISL_AUX_USAGE_CCS_D
);
2118 break; /* Nothing to do */
2120 case ISL_AUX_STATE_PASS_THROUGH
:
2124 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
2125 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
2126 case ISL_AUX_STATE_RESOLVED
:
2127 case ISL_AUX_STATE_AUX_INVALID
:
2128 unreachable("Invalid aux state for CCS_D");
2134 intel_miptree_prepare_mcs_access(struct brw_context
*brw
,
2135 struct intel_mipmap_tree
*mt
,
2137 enum isl_aux_usage aux_usage
,
2138 bool fast_clear_supported
)
2140 assert(aux_usage
== ISL_AUX_USAGE_MCS
);
2142 switch (intel_miptree_get_aux_state(mt
, 0, layer
)) {
2143 case ISL_AUX_STATE_CLEAR
:
2144 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
2145 if (!fast_clear_supported
) {
2146 brw_blorp_mcs_partial_resolve(brw
, mt
, layer
, 1);
2147 intel_miptree_set_aux_state(brw
, mt
, 0, layer
, 1,
2148 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
2152 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
2153 break; /* Nothing to do */
2155 case ISL_AUX_STATE_RESOLVED
:
2156 case ISL_AUX_STATE_PASS_THROUGH
:
2157 case ISL_AUX_STATE_AUX_INVALID
:
2158 case ISL_AUX_STATE_PARTIAL_CLEAR
:
2159 unreachable("Invalid aux state for MCS");
2164 intel_miptree_finish_mcs_write(struct brw_context
*brw
,
2165 struct intel_mipmap_tree
*mt
,
2167 enum isl_aux_usage aux_usage
)
2169 assert(aux_usage
== ISL_AUX_USAGE_MCS
);
2171 switch (intel_miptree_get_aux_state(mt
, 0, layer
)) {
2172 case ISL_AUX_STATE_CLEAR
:
2173 intel_miptree_set_aux_state(brw
, mt
, 0, layer
, 1,
2174 ISL_AUX_STATE_COMPRESSED_CLEAR
);
2177 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
2178 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
2179 break; /* Nothing to do */
2181 case ISL_AUX_STATE_RESOLVED
:
2182 case ISL_AUX_STATE_PASS_THROUGH
:
2183 case ISL_AUX_STATE_AUX_INVALID
:
2184 case ISL_AUX_STATE_PARTIAL_CLEAR
:
2185 unreachable("Invalid aux state for MCS");
2190 intel_miptree_prepare_hiz_access(struct brw_context
*brw
,
2191 struct intel_mipmap_tree
*mt
,
2192 uint32_t level
, uint32_t layer
,
2193 enum isl_aux_usage aux_usage
,
2194 bool fast_clear_supported
)
2196 assert(aux_usage
== ISL_AUX_USAGE_NONE
|| aux_usage
== ISL_AUX_USAGE_HIZ
);
2198 enum blorp_hiz_op hiz_op
= BLORP_HIZ_OP_NONE
;
2199 switch (intel_miptree_get_aux_state(mt
, level
, layer
)) {
2200 case ISL_AUX_STATE_CLEAR
:
2201 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
2202 if (aux_usage
!= ISL_AUX_USAGE_HIZ
|| !fast_clear_supported
)
2203 hiz_op
= BLORP_HIZ_OP_DEPTH_RESOLVE
;
2206 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
2207 if (aux_usage
!= ISL_AUX_USAGE_HIZ
)
2208 hiz_op
= BLORP_HIZ_OP_DEPTH_RESOLVE
;
2211 case ISL_AUX_STATE_PASS_THROUGH
:
2212 case ISL_AUX_STATE_RESOLVED
:
2215 case ISL_AUX_STATE_AUX_INVALID
:
2216 if (aux_usage
== ISL_AUX_USAGE_HIZ
)
2217 hiz_op
= BLORP_HIZ_OP_HIZ_RESOLVE
;
2220 case ISL_AUX_STATE_PARTIAL_CLEAR
:
2221 unreachable("Invalid HiZ state");
2224 if (hiz_op
!= BLORP_HIZ_OP_NONE
) {
2225 intel_hiz_exec(brw
, mt
, level
, layer
, 1, hiz_op
);
2228 case BLORP_HIZ_OP_DEPTH_RESOLVE
:
2229 intel_miptree_set_aux_state(brw
, mt
, level
, layer
, 1,
2230 ISL_AUX_STATE_RESOLVED
);
2233 case BLORP_HIZ_OP_HIZ_RESOLVE
:
2234 /* The HiZ resolve operation is actually an ambiguate */
2235 intel_miptree_set_aux_state(brw
, mt
, level
, layer
, 1,
2236 ISL_AUX_STATE_PASS_THROUGH
);
2240 unreachable("Invalid HiZ op");
2246 intel_miptree_finish_hiz_write(struct brw_context
*brw
,
2247 struct intel_mipmap_tree
*mt
,
2248 uint32_t level
, uint32_t layer
,
2249 enum isl_aux_usage aux_usage
)
2251 assert(aux_usage
== ISL_AUX_USAGE_NONE
|| aux_usage
== ISL_AUX_USAGE_HIZ
);
2253 switch (intel_miptree_get_aux_state(mt
, level
, layer
)) {
2254 case ISL_AUX_STATE_CLEAR
:
2255 assert(aux_usage
== ISL_AUX_USAGE_HIZ
);
2256 intel_miptree_set_aux_state(brw
, mt
, level
, layer
, 1,
2257 ISL_AUX_STATE_COMPRESSED_CLEAR
);
2260 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
2261 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
2262 assert(aux_usage
== ISL_AUX_USAGE_HIZ
);
2263 break; /* Nothing to do */
2265 case ISL_AUX_STATE_RESOLVED
:
2266 if (aux_usage
== ISL_AUX_USAGE_HIZ
) {
2267 intel_miptree_set_aux_state(brw
, mt
, level
, layer
, 1,
2268 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
2270 intel_miptree_set_aux_state(brw
, mt
, level
, layer
, 1,
2271 ISL_AUX_STATE_AUX_INVALID
);
2275 case ISL_AUX_STATE_PASS_THROUGH
:
2276 if (aux_usage
== ISL_AUX_USAGE_HIZ
) {
2277 intel_miptree_set_aux_state(brw
, mt
, level
, layer
, 1,
2278 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
2282 case ISL_AUX_STATE_AUX_INVALID
:
2283 assert(aux_usage
!= ISL_AUX_USAGE_HIZ
);
2286 case ISL_AUX_STATE_PARTIAL_CLEAR
:
2287 unreachable("Invalid HiZ state");
2292 intel_miptree_prepare_access(struct brw_context
*brw
,
2293 struct intel_mipmap_tree
*mt
,
2294 uint32_t start_level
, uint32_t num_levels
,
2295 uint32_t start_layer
, uint32_t num_layers
,
2296 enum isl_aux_usage aux_usage
,
2297 bool fast_clear_supported
)
2299 num_levels
= miptree_level_range_length(mt
, start_level
, num_levels
);
2301 switch (mt
->aux_usage
) {
2302 case ISL_AUX_USAGE_NONE
:
2306 case ISL_AUX_USAGE_MCS
:
2307 assert(mt
->mcs_buf
);
2308 assert(start_level
== 0 && num_levels
== 1);
2309 const uint32_t level_layers
=
2310 miptree_layer_range_length(mt
, 0, start_layer
, num_layers
);
2311 for (uint32_t a
= 0; a
< level_layers
; a
++) {
2312 intel_miptree_prepare_mcs_access(brw
, mt
, start_layer
+ a
,
2313 aux_usage
, fast_clear_supported
);
2317 case ISL_AUX_USAGE_CCS_D
:
2318 case ISL_AUX_USAGE_CCS_E
:
2322 for (uint32_t l
= 0; l
< num_levels
; l
++) {
2323 const uint32_t level
= start_level
+ l
;
2324 const uint32_t level_layers
=
2325 miptree_layer_range_length(mt
, level
, start_layer
, num_layers
);
2326 for (uint32_t a
= 0; a
< level_layers
; a
++) {
2327 intel_miptree_prepare_ccs_access(brw
, mt
, level
,
2329 aux_usage
, fast_clear_supported
);
2334 case ISL_AUX_USAGE_HIZ
:
2335 assert(mt
->hiz_buf
);
2336 for (uint32_t l
= 0; l
< num_levels
; l
++) {
2337 const uint32_t level
= start_level
+ l
;
2338 if (!intel_miptree_level_has_hiz(mt
, level
))
2341 const uint32_t level_layers
=
2342 miptree_layer_range_length(mt
, level
, start_layer
, num_layers
);
2343 for (uint32_t a
= 0; a
< level_layers
; a
++) {
2344 intel_miptree_prepare_hiz_access(brw
, mt
, level
, start_layer
+ a
,
2345 aux_usage
, fast_clear_supported
);
2351 unreachable("Invalid aux usage");
2356 intel_miptree_finish_write(struct brw_context
*brw
,
2357 struct intel_mipmap_tree
*mt
, uint32_t level
,
2358 uint32_t start_layer
, uint32_t num_layers
,
2359 enum isl_aux_usage aux_usage
)
2361 num_layers
= miptree_layer_range_length(mt
, level
, start_layer
, num_layers
);
2363 switch (mt
->aux_usage
) {
2364 case ISL_AUX_USAGE_NONE
:
2368 case ISL_AUX_USAGE_MCS
:
2369 assert(mt
->mcs_buf
);
2370 for (uint32_t a
= 0; a
< num_layers
; a
++) {
2371 intel_miptree_finish_mcs_write(brw
, mt
, start_layer
+ a
,
2376 case ISL_AUX_USAGE_CCS_D
:
2377 case ISL_AUX_USAGE_CCS_E
:
2381 for (uint32_t a
= 0; a
< num_layers
; a
++) {
2382 intel_miptree_finish_ccs_write(brw
, mt
, level
, start_layer
+ a
,
2387 case ISL_AUX_USAGE_HIZ
:
2388 if (!intel_miptree_level_has_hiz(mt
, level
))
2391 for (uint32_t a
= 0; a
< num_layers
; a
++) {
2392 intel_miptree_finish_hiz_write(brw
, mt
, level
, start_layer
+ a
,
2398 unreachable("Invavlid aux usage");
2403 intel_miptree_get_aux_state(const struct intel_mipmap_tree
*mt
,
2404 uint32_t level
, uint32_t layer
)
2406 intel_miptree_check_level_layer(mt
, level
, layer
);
2408 if (_mesa_is_format_color_format(mt
->format
)) {
2409 assert(mt
->mcs_buf
!= NULL
);
2410 assert(mt
->surf
.samples
== 1 ||
2411 mt
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_ARRAY
);
2412 } else if (mt
->format
== MESA_FORMAT_S_UINT8
) {
2413 unreachable("Cannot get aux state for stencil");
2415 assert(intel_miptree_level_has_hiz(mt
, level
));
2418 return mt
->aux_state
[level
][layer
];
2422 intel_miptree_set_aux_state(struct brw_context
*brw
,
2423 struct intel_mipmap_tree
*mt
, uint32_t level
,
2424 uint32_t start_layer
, uint32_t num_layers
,
2425 enum isl_aux_state aux_state
)
2427 num_layers
= miptree_layer_range_length(mt
, level
, start_layer
, num_layers
);
2429 if (_mesa_is_format_color_format(mt
->format
)) {
2430 assert(mt
->mcs_buf
!= NULL
);
2431 assert(mt
->surf
.samples
== 1 ||
2432 mt
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_ARRAY
);
2433 } else if (mt
->format
== MESA_FORMAT_S_UINT8
) {
2434 unreachable("Cannot get aux state for stencil");
2436 assert(intel_miptree_level_has_hiz(mt
, level
));
2439 for (unsigned a
= 0; a
< num_layers
; a
++)
2440 mt
->aux_state
[level
][start_layer
+ a
] = aux_state
;
2443 /* On Gen9 color buffers may be compressed by the hardware (lossless
2444 * compression). There are, however, format restrictions and care needs to be
2445 * taken that the sampler engine is capable for re-interpreting a buffer with
2446 * format different the buffer was originally written with.
2448 * For example, SRGB formats are not compressible and the sampler engine isn't
2449 * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
2450 * color buffer needs to be resolved so that the sampling surface can be
2451 * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
2455 can_texture_with_ccs(struct brw_context
*brw
,
2456 struct intel_mipmap_tree
*mt
,
2457 enum isl_format view_format
)
2459 if (mt
->aux_usage
!= ISL_AUX_USAGE_CCS_E
)
2462 if (!isl_formats_are_ccs_e_compatible(&brw
->screen
->devinfo
,
2463 mt
->surf
.format
, view_format
)) {
2464 perf_debug("Incompatible sampling format (%s) for rbc (%s)\n",
2465 isl_format_get_layout(view_format
)->name
,
2466 _mesa_get_format_name(mt
->format
));
2474 intel_miptree_texture_aux_usage(struct brw_context
*brw
,
2475 struct intel_mipmap_tree
*mt
,
2476 enum isl_format view_format
)
2478 switch (mt
->aux_usage
) {
2479 case ISL_AUX_USAGE_HIZ
:
2480 if (intel_miptree_sample_with_hiz(brw
, mt
))
2481 return ISL_AUX_USAGE_HIZ
;
2484 case ISL_AUX_USAGE_MCS
:
2485 return ISL_AUX_USAGE_MCS
;
2487 case ISL_AUX_USAGE_CCS_D
:
2488 case ISL_AUX_USAGE_CCS_E
:
2489 if (mt
->mcs_buf
&& can_texture_with_ccs(brw
, mt
, view_format
))
2490 return ISL_AUX_USAGE_CCS_E
;
2497 return ISL_AUX_USAGE_NONE
;
2501 isl_formats_are_fast_clear_compatible(enum isl_format a
, enum isl_format b
)
2503 /* On gen8 and earlier, the hardware was only capable of handling 0/1 clear
2504 * values so sRGB curve application was a no-op for all fast-clearable
2507 * On gen9+, the hardware supports arbitrary clear values. For sRGB clear
2508 * values, the hardware interprets the floats, not as what would be
2509 * returned from the sampler (or written by the shader), but as being
2510 * between format conversion and sRGB curve application. This means that
2511 * we can switch between sRGB and UNORM without having to whack the clear
2514 return isl_format_srgb_to_linear(a
) == isl_format_srgb_to_linear(b
);
2518 intel_miptree_prepare_texture_slices(struct brw_context
*brw
,
2519 struct intel_mipmap_tree
*mt
,
2520 enum isl_format view_format
,
2521 uint32_t start_level
, uint32_t num_levels
,
2522 uint32_t start_layer
, uint32_t num_layers
,
2523 bool *aux_supported_out
)
2525 enum isl_aux_usage aux_usage
=
2526 intel_miptree_texture_aux_usage(brw
, mt
, view_format
);
2527 bool clear_supported
= aux_usage
!= ISL_AUX_USAGE_NONE
;
2529 /* Clear color is specified as ints or floats and the conversion is done by
2530 * the sampler. If we have a texture view, we would have to perform the
2531 * clear color conversion manually. Just disable clear color.
2533 if (!isl_formats_are_fast_clear_compatible(mt
->surf
.format
, view_format
))
2534 clear_supported
= false;
2536 intel_miptree_prepare_access(brw
, mt
, start_level
, num_levels
,
2537 start_layer
, num_layers
,
2538 aux_usage
, clear_supported
);
2539 if (aux_supported_out
)
2540 *aux_supported_out
= aux_usage
!= ISL_AUX_USAGE_NONE
;
2544 intel_miptree_prepare_texture(struct brw_context
*brw
,
2545 struct intel_mipmap_tree
*mt
,
2546 enum isl_format view_format
,
2547 bool *aux_supported_out
)
2549 intel_miptree_prepare_texture_slices(brw
, mt
, view_format
,
2550 0, INTEL_REMAINING_LEVELS
,
2551 0, INTEL_REMAINING_LAYERS
,
2556 intel_miptree_prepare_image(struct brw_context
*brw
,
2557 struct intel_mipmap_tree
*mt
)
2559 /* The data port doesn't understand any compression */
2560 intel_miptree_prepare_access(brw
, mt
, 0, INTEL_REMAINING_LEVELS
,
2561 0, INTEL_REMAINING_LAYERS
,
2562 ISL_AUX_USAGE_NONE
, false);
2566 intel_miptree_prepare_fb_fetch(struct brw_context
*brw
,
2567 struct intel_mipmap_tree
*mt
, uint32_t level
,
2568 uint32_t start_layer
, uint32_t num_layers
)
2570 intel_miptree_prepare_texture_slices(brw
, mt
, mt
->surf
.format
, level
, 1,
2571 start_layer
, num_layers
, NULL
);
2575 intel_miptree_render_aux_usage(struct brw_context
*brw
,
2576 struct intel_mipmap_tree
*mt
,
2577 bool srgb_enabled
, bool blend_enabled
)
2579 switch (mt
->aux_usage
) {
2580 case ISL_AUX_USAGE_MCS
:
2581 assert(mt
->mcs_buf
);
2582 return ISL_AUX_USAGE_MCS
;
2584 case ISL_AUX_USAGE_CCS_D
:
2585 return mt
->mcs_buf
? ISL_AUX_USAGE_CCS_D
: ISL_AUX_USAGE_NONE
;
2587 case ISL_AUX_USAGE_CCS_E
: {
2588 mesa_format mesa_format
=
2589 srgb_enabled
? mt
->format
:_mesa_get_srgb_format_linear(mt
->format
);
2590 enum isl_format isl_format
= brw_isl_format_for_mesa_format(mesa_format
);
2592 /* If the format supports CCS_E, then we can just use it */
2593 if (isl_format_supports_ccs_e(&brw
->screen
->devinfo
, isl_format
))
2594 return ISL_AUX_USAGE_CCS_E
;
2596 /* Otherwise, we have to fall back to CCS_D */
2598 /* gen9 hardware technically supports non-0/1 clear colors with sRGB
2599 * formats. However, there are issues with blending where it doesn't
2600 * properly apply the sRGB curve to the clear color when blending.
2602 if (blend_enabled
&& isl_format_is_srgb(isl_format
) &&
2603 !isl_color_value_is_zero_one(mt
->fast_clear_color
, isl_format
))
2604 return ISL_AUX_USAGE_NONE
;
2606 return ISL_AUX_USAGE_CCS_D
;
2610 return ISL_AUX_USAGE_NONE
;
2615 intel_miptree_prepare_render(struct brw_context
*brw
,
2616 struct intel_mipmap_tree
*mt
, uint32_t level
,
2617 uint32_t start_layer
, uint32_t layer_count
,
2618 bool srgb_enabled
, bool blend_enabled
)
2620 enum isl_aux_usage aux_usage
=
2621 intel_miptree_render_aux_usage(brw
, mt
, srgb_enabled
, blend_enabled
);
2622 intel_miptree_prepare_access(brw
, mt
, level
, 1, start_layer
, layer_count
,
2623 aux_usage
, aux_usage
!= ISL_AUX_USAGE_NONE
);
2627 intel_miptree_finish_render(struct brw_context
*brw
,
2628 struct intel_mipmap_tree
*mt
, uint32_t level
,
2629 uint32_t start_layer
, uint32_t layer_count
,
2630 bool srgb_enabled
, bool blend_enabled
)
2632 assert(_mesa_is_format_color_format(mt
->format
));
2634 enum isl_aux_usage aux_usage
=
2635 intel_miptree_render_aux_usage(brw
, mt
, srgb_enabled
, blend_enabled
);
2636 intel_miptree_finish_write(brw
, mt
, level
, start_layer
, layer_count
,
2641 intel_miptree_prepare_depth(struct brw_context
*brw
,
2642 struct intel_mipmap_tree
*mt
, uint32_t level
,
2643 uint32_t start_layer
, uint32_t layer_count
)
2645 intel_miptree_prepare_access(brw
, mt
, level
, 1, start_layer
, layer_count
,
2646 mt
->aux_usage
, mt
->hiz_buf
!= NULL
);
2650 intel_miptree_finish_depth(struct brw_context
*brw
,
2651 struct intel_mipmap_tree
*mt
, uint32_t level
,
2652 uint32_t start_layer
, uint32_t layer_count
,
2655 if (depth_written
) {
2656 intel_miptree_finish_write(brw
, mt
, level
, start_layer
, layer_count
,
2657 mt
->hiz_buf
!= NULL
);
2662 * Make it possible to share the BO backing the given miptree with another
2663 * process or another miptree.
2665 * Fast color clears are unsafe with shared buffers, so we need to resolve and
2666 * then discard the MCS buffer, if present. We also set the no_ccs flag to
2667 * ensure that no MCS buffer gets allocated in the future.
2669 * HiZ is similarly unsafe with shared buffers.
2672 intel_miptree_make_shareable(struct brw_context
*brw
,
2673 struct intel_mipmap_tree
*mt
)
2675 /* MCS buffers are also used for multisample buffers, but we can't resolve
2676 * away a multisample MCS buffer because it's an integral part of how the
2677 * pixel data is stored. Fortunately this code path should never be
2678 * reached for multisample buffers.
2680 assert(mt
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_NONE
||
2681 mt
->surf
.samples
== 1);
2683 intel_miptree_prepare_access(brw
, mt
, 0, INTEL_REMAINING_LEVELS
,
2684 0, INTEL_REMAINING_LAYERS
,
2685 ISL_AUX_USAGE_NONE
, false);
2688 brw_bo_unreference(mt
->mcs_buf
->bo
);
2692 /* Any pending MCS/CCS operations are no longer needed. Trying to
2693 * execute any will likely crash due to the missing aux buffer. So let's
2694 * delete all pending ops.
2696 free(mt
->aux_state
);
2697 mt
->aux_state
= NULL
;
2701 intel_miptree_aux_buffer_free(mt
->hiz_buf
);
2704 for (uint32_t l
= mt
->first_level
; l
<= mt
->last_level
; ++l
) {
2705 mt
->level
[l
].has_hiz
= false;
2708 /* Any pending HiZ operations are no longer needed. Trying to execute
2709 * any will likely crash due to the missing aux buffer. So let's delete
2712 free(mt
->aux_state
);
2713 mt
->aux_state
= NULL
;
2716 mt
->aux_usage
= ISL_AUX_USAGE_NONE
;
2721 * \brief Get pointer offset into stencil buffer.
2723 * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
2724 * must decode the tile's layout in software.
2727 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
2729 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
2731 * Even though the returned offset is always positive, the return type is
2733 * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
2734 * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
2737 intel_offset_S8(uint32_t stride
, uint32_t x
, uint32_t y
, bool swizzled
)
2739 uint32_t tile_size
= 4096;
2740 uint32_t tile_width
= 64;
2741 uint32_t tile_height
= 64;
2742 uint32_t row_size
= 64 * stride
/ 2; /* Two rows are interleaved. */
2744 uint32_t tile_x
= x
/ tile_width
;
2745 uint32_t tile_y
= y
/ tile_height
;
2747 /* The byte's address relative to the tile's base addres. */
2748 uint32_t byte_x
= x
% tile_width
;
2749 uint32_t byte_y
= y
% tile_height
;
2751 uintptr_t u
= tile_y
* row_size
2752 + tile_x
* tile_size
2753 + 512 * (byte_x
/ 8)
2755 + 32 * ((byte_y
/ 4) % 2)
2756 + 16 * ((byte_x
/ 4) % 2)
2757 + 8 * ((byte_y
/ 2) % 2)
2758 + 4 * ((byte_x
/ 2) % 2)
2763 /* adjust for bit6 swizzling */
2764 if (((byte_x
/ 8) % 2) == 1) {
2765 if (((byte_y
/ 8) % 2) == 0) {
2777 intel_miptree_updownsample(struct brw_context
*brw
,
2778 struct intel_mipmap_tree
*src
,
2779 struct intel_mipmap_tree
*dst
)
2781 unsigned src_w
= src
->surf
.logical_level0_px
.width
;
2782 unsigned src_h
= src
->surf
.logical_level0_px
.height
;
2783 unsigned dst_w
= dst
->surf
.logical_level0_px
.width
;
2784 unsigned dst_h
= dst
->surf
.logical_level0_px
.height
;
2786 brw_blorp_blit_miptrees(brw
,
2787 src
, 0 /* level */, 0 /* layer */,
2788 src
->format
, SWIZZLE_XYZW
,
2789 dst
, 0 /* level */, 0 /* layer */, dst
->format
,
2792 GL_NEAREST
, false, false /*mirror x, y*/,
2795 if (src
->stencil_mt
) {
2796 src_w
= src
->stencil_mt
->surf
.logical_level0_px
.width
;
2797 src_h
= src
->stencil_mt
->surf
.logical_level0_px
.height
;
2798 dst_w
= dst
->stencil_mt
->surf
.logical_level0_px
.width
;
2799 dst_h
= dst
->stencil_mt
->surf
.logical_level0_px
.height
;
2801 brw_blorp_blit_miptrees(brw
,
2802 src
->stencil_mt
, 0 /* level */, 0 /* layer */,
2803 src
->stencil_mt
->format
, SWIZZLE_XYZW
,
2804 dst
->stencil_mt
, 0 /* level */, 0 /* layer */,
2805 dst
->stencil_mt
->format
,
2808 GL_NEAREST
, false, false /*mirror x, y*/,
2809 false, false /* decode/encode srgb */);
2814 intel_update_r8stencil(struct brw_context
*brw
,
2815 struct intel_mipmap_tree
*mt
)
2817 assert(brw
->gen
>= 7);
2818 struct intel_mipmap_tree
*src
=
2819 mt
->format
== MESA_FORMAT_S_UINT8
? mt
: mt
->stencil_mt
;
2820 if (!src
|| brw
->gen
>= 8 || !src
->r8stencil_needs_update
)
2823 assert(src
->surf
.size
> 0);
2825 if (!mt
->r8stencil_mt
) {
2826 assert(brw
->gen
> 6); /* Handle MIPTREE_LAYOUT_GEN6_HIZ_STENCIL */
2827 mt
->r8stencil_mt
= make_surface(
2830 MESA_FORMAT_R_UINT8
,
2831 src
->first_level
, src
->last_level
,
2832 src
->surf
.logical_level0_px
.width
,
2833 src
->surf
.logical_level0_px
.height
,
2834 src
->surf
.dim
== ISL_SURF_DIM_3D
?
2835 src
->surf
.logical_level0_px
.depth
:
2836 src
->surf
.logical_level0_px
.array_len
,
2839 ISL_SURF_USAGE_TEXTURE_BIT
,
2840 BO_ALLOC_FOR_RENDER
, 0, NULL
);
2841 assert(mt
->r8stencil_mt
);
2844 struct intel_mipmap_tree
*dst
= mt
->r8stencil_mt
;
2846 for (int level
= src
->first_level
; level
<= src
->last_level
; level
++) {
2847 const unsigned depth
= src
->surf
.dim
== ISL_SURF_DIM_3D
?
2848 minify(src
->surf
.phys_level0_sa
.depth
, level
) :
2849 src
->surf
.phys_level0_sa
.array_len
;
2851 for (unsigned layer
= 0; layer
< depth
; layer
++) {
2852 brw_blorp_copy_miptrees(brw
,
2856 minify(src
->surf
.logical_level0_px
.width
,
2858 minify(src
->surf
.logical_level0_px
.height
,
2863 brw_render_cache_set_check_flush(brw
, dst
->bo
);
2864 src
->r8stencil_needs_update
= false;
2868 intel_miptree_map_raw(struct brw_context
*brw
,
2869 struct intel_mipmap_tree
*mt
,
2872 struct brw_bo
*bo
= mt
->bo
;
2874 if (brw_batch_references(&brw
->batch
, bo
))
2875 intel_batchbuffer_flush(brw
);
2877 return brw_bo_map(brw
, bo
, mode
);
2881 intel_miptree_unmap_raw(struct intel_mipmap_tree
*mt
)
2883 brw_bo_unmap(mt
->bo
);
2887 intel_miptree_map_gtt(struct brw_context
*brw
,
2888 struct intel_mipmap_tree
*mt
,
2889 struct intel_miptree_map
*map
,
2890 unsigned int level
, unsigned int slice
)
2892 unsigned int bw
, bh
;
2894 unsigned int image_x
, image_y
;
2895 intptr_t x
= map
->x
;
2896 intptr_t y
= map
->y
;
2898 /* For compressed formats, the stride is the number of bytes per
2899 * row of blocks. intel_miptree_get_image_offset() already does
2902 _mesa_get_format_block_size(mt
->format
, &bw
, &bh
);
2903 assert(y
% bh
== 0);
2904 assert(x
% bw
== 0);
2908 base
= intel_miptree_map_raw(brw
, mt
, map
->mode
);
2915 /* Note that in the case of cube maps, the caller must have passed the
2916 * slice number referencing the face.
2918 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
2922 map
->stride
= mt
->surf
.row_pitch
;
2923 map
->ptr
= base
+ y
* map
->stride
+ x
* mt
->cpp
;
2926 DBG("%s: %d,%d %dx%d from mt %p (%s) "
2927 "%"PRIiPTR
",%"PRIiPTR
" = %p/%d\n", __func__
,
2928 map
->x
, map
->y
, map
->w
, map
->h
,
2929 mt
, _mesa_get_format_name(mt
->format
),
2930 x
, y
, map
->ptr
, map
->stride
);
2934 intel_miptree_unmap_gtt(struct intel_mipmap_tree
*mt
)
2936 intel_miptree_unmap_raw(mt
);
2940 intel_miptree_map_blit(struct brw_context
*brw
,
2941 struct intel_mipmap_tree
*mt
,
2942 struct intel_miptree_map
*map
,
2943 unsigned int level
, unsigned int slice
)
2945 map
->linear_mt
= intel_miptree_create(brw
, GL_TEXTURE_2D
, mt
->format
,
2946 /* first_level */ 0,
2950 MIPTREE_LAYOUT_TILING_NONE
);
2952 if (!map
->linear_mt
) {
2953 fprintf(stderr
, "Failed to allocate blit temporary\n");
2956 map
->stride
= map
->linear_mt
->surf
.row_pitch
;
2958 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2959 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2960 * invalidate is set, since we'll be writing the whole rectangle from our
2961 * temporary buffer back out.
2963 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
2964 if (!intel_miptree_copy(brw
,
2965 mt
, level
, slice
, map
->x
, map
->y
,
2966 map
->linear_mt
, 0, 0, 0, 0,
2968 fprintf(stderr
, "Failed to blit\n");
2973 map
->ptr
= intel_miptree_map_raw(brw
, map
->linear_mt
, map
->mode
);
2975 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__
,
2976 map
->x
, map
->y
, map
->w
, map
->h
,
2977 mt
, _mesa_get_format_name(mt
->format
),
2978 level
, slice
, map
->ptr
, map
->stride
);
2983 intel_miptree_release(&map
->linear_mt
);
2989 intel_miptree_unmap_blit(struct brw_context
*brw
,
2990 struct intel_mipmap_tree
*mt
,
2991 struct intel_miptree_map
*map
,
2995 struct gl_context
*ctx
= &brw
->ctx
;
2997 intel_miptree_unmap_raw(map
->linear_mt
);
2999 if (map
->mode
& GL_MAP_WRITE_BIT
) {
3000 bool ok
= intel_miptree_copy(brw
,
3001 map
->linear_mt
, 0, 0, 0, 0,
3002 mt
, level
, slice
, map
->x
, map
->y
,
3004 WARN_ONCE(!ok
, "Failed to blit from linear temporary mapping");
3007 intel_miptree_release(&map
->linear_mt
);
3011 * "Map" a buffer by copying it to an untiled temporary using MOVNTDQA.
3013 #if defined(USE_SSE41)
3015 intel_miptree_map_movntdqa(struct brw_context
*brw
,
3016 struct intel_mipmap_tree
*mt
,
3017 struct intel_miptree_map
*map
,
3018 unsigned int level
, unsigned int slice
)
3020 assert(map
->mode
& GL_MAP_READ_BIT
);
3021 assert(!(map
->mode
& GL_MAP_WRITE_BIT
));
3023 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__
,
3024 map
->x
, map
->y
, map
->w
, map
->h
,
3025 mt
, _mesa_get_format_name(mt
->format
),
3026 level
, slice
, map
->ptr
, map
->stride
);
3028 /* Map the original image */
3031 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
3035 void *src
= intel_miptree_map_raw(brw
, mt
, map
->mode
);
3041 src
+= image_y
* mt
->surf
.row_pitch
;
3042 src
+= image_x
* mt
->cpp
;
3044 /* Due to the pixel offsets for the particular image being mapped, our
3045 * src pointer may not be 16-byte aligned. However, if the pitch is
3046 * divisible by 16, then the amount by which it's misaligned will remain
3047 * consistent from row to row.
3049 assert((mt
->surf
.row_pitch
% 16) == 0);
3050 const int misalignment
= ((uintptr_t) src
) & 15;
3052 /* Create an untiled temporary buffer for the mapping. */
3053 const unsigned width_bytes
= _mesa_format_row_stride(mt
->format
, map
->w
);
3055 map
->stride
= ALIGN(misalignment
+ width_bytes
, 16);
3057 map
->buffer
= _mesa_align_malloc(map
->stride
* map
->h
, 16);
3058 /* Offset the destination so it has the same misalignment as src. */
3059 map
->ptr
= map
->buffer
+ misalignment
;
3061 assert((((uintptr_t) map
->ptr
) & 15) == misalignment
);
3063 for (uint32_t y
= 0; y
< map
->h
; y
++) {
3064 void *dst_ptr
= map
->ptr
+ y
* map
->stride
;
3065 void *src_ptr
= src
+ y
* mt
->surf
.row_pitch
;
3067 _mesa_streaming_load_memcpy(dst_ptr
, src_ptr
, width_bytes
);
3070 intel_miptree_unmap_raw(mt
);
3074 intel_miptree_unmap_movntdqa(struct brw_context
*brw
,
3075 struct intel_mipmap_tree
*mt
,
3076 struct intel_miptree_map
*map
,
3080 _mesa_align_free(map
->buffer
);
3087 intel_miptree_map_s8(struct brw_context
*brw
,
3088 struct intel_mipmap_tree
*mt
,
3089 struct intel_miptree_map
*map
,
3090 unsigned int level
, unsigned int slice
)
3092 map
->stride
= map
->w
;
3093 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
3097 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
3098 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
3099 * invalidate is set, since we'll be writing the whole rectangle from our
3100 * temporary buffer back out.
3102 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
3103 uint8_t *untiled_s8_map
= map
->ptr
;
3104 uint8_t *tiled_s8_map
= intel_miptree_map_raw(brw
, mt
, GL_MAP_READ_BIT
);
3105 unsigned int image_x
, image_y
;
3107 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
3109 for (uint32_t y
= 0; y
< map
->h
; y
++) {
3110 for (uint32_t x
= 0; x
< map
->w
; x
++) {
3111 ptrdiff_t offset
= intel_offset_S8(mt
->surf
.row_pitch
,
3112 x
+ image_x
+ map
->x
,
3113 y
+ image_y
+ map
->y
,
3114 brw
->has_swizzling
);
3115 untiled_s8_map
[y
* map
->w
+ x
] = tiled_s8_map
[offset
];
3119 intel_miptree_unmap_raw(mt
);
3121 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __func__
,
3122 map
->x
, map
->y
, map
->w
, map
->h
,
3123 mt
, map
->x
+ image_x
, map
->y
+ image_y
, map
->ptr
, map
->stride
);
3125 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__
,
3126 map
->x
, map
->y
, map
->w
, map
->h
,
3127 mt
, map
->ptr
, map
->stride
);
3132 intel_miptree_unmap_s8(struct brw_context
*brw
,
3133 struct intel_mipmap_tree
*mt
,
3134 struct intel_miptree_map
*map
,
3138 if (map
->mode
& GL_MAP_WRITE_BIT
) {
3139 unsigned int image_x
, image_y
;
3140 uint8_t *untiled_s8_map
= map
->ptr
;
3141 uint8_t *tiled_s8_map
= intel_miptree_map_raw(brw
, mt
, GL_MAP_WRITE_BIT
);
3143 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
3145 for (uint32_t y
= 0; y
< map
->h
; y
++) {
3146 for (uint32_t x
= 0; x
< map
->w
; x
++) {
3147 ptrdiff_t offset
= intel_offset_S8(mt
->surf
.row_pitch
,
3148 image_x
+ x
+ map
->x
,
3149 image_y
+ y
+ map
->y
,
3150 brw
->has_swizzling
);
3151 tiled_s8_map
[offset
] = untiled_s8_map
[y
* map
->w
+ x
];
3155 intel_miptree_unmap_raw(mt
);
3162 intel_miptree_map_etc(struct brw_context
*brw
,
3163 struct intel_mipmap_tree
*mt
,
3164 struct intel_miptree_map
*map
,
3168 assert(mt
->etc_format
!= MESA_FORMAT_NONE
);
3169 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
) {
3170 assert(mt
->format
== MESA_FORMAT_R8G8B8X8_UNORM
);
3173 assert(map
->mode
& GL_MAP_WRITE_BIT
);
3174 assert(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
);
3176 map
->stride
= _mesa_format_row_stride(mt
->etc_format
, map
->w
);
3177 map
->buffer
= malloc(_mesa_format_image_size(mt
->etc_format
,
3178 map
->w
, map
->h
, 1));
3179 map
->ptr
= map
->buffer
;
3183 intel_miptree_unmap_etc(struct brw_context
*brw
,
3184 struct intel_mipmap_tree
*mt
,
3185 struct intel_miptree_map
*map
,
3191 intel_miptree_get_image_offset(mt
, level
, slice
, &image_x
, &image_y
);
3196 uint8_t *dst
= intel_miptree_map_raw(brw
, mt
, GL_MAP_WRITE_BIT
)
3197 + image_y
* mt
->surf
.row_pitch
3198 + image_x
* mt
->cpp
;
3200 if (mt
->etc_format
== MESA_FORMAT_ETC1_RGB8
)
3201 _mesa_etc1_unpack_rgba8888(dst
, mt
->surf
.row_pitch
,
3202 map
->ptr
, map
->stride
,
3205 _mesa_unpack_etc2_format(dst
, mt
->surf
.row_pitch
,
3206 map
->ptr
, map
->stride
,
3207 map
->w
, map
->h
, mt
->etc_format
);
3209 intel_miptree_unmap_raw(mt
);
3214 * Mapping function for packed depth/stencil miptrees backed by real separate
3215 * miptrees for depth and stencil.
3217 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
3218 * separate from the depth buffer. Yet at the GL API level, we have to expose
3219 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
3220 * be able to map that memory for texture storage and glReadPixels-type
3221 * operations. We give Mesa core that access by mallocing a temporary and
3222 * copying the data between the actual backing store and the temporary.
3225 intel_miptree_map_depthstencil(struct brw_context
*brw
,
3226 struct intel_mipmap_tree
*mt
,
3227 struct intel_miptree_map
*map
,
3228 unsigned int level
, unsigned int slice
)
3230 struct intel_mipmap_tree
*z_mt
= mt
;
3231 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
3232 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z_FLOAT32
;
3233 int packed_bpp
= map_z32f_x24s8
? 8 : 4;
3235 map
->stride
= map
->w
* packed_bpp
;
3236 map
->buffer
= map
->ptr
= malloc(map
->stride
* map
->h
);
3240 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
3241 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
3242 * invalidate is set, since we'll be writing the whole rectangle from our
3243 * temporary buffer back out.
3245 if (!(map
->mode
& GL_MAP_INVALIDATE_RANGE_BIT
)) {
3246 uint32_t *packed_map
= map
->ptr
;
3247 uint8_t *s_map
= intel_miptree_map_raw(brw
, s_mt
, GL_MAP_READ_BIT
);
3248 uint32_t *z_map
= intel_miptree_map_raw(brw
, z_mt
, GL_MAP_READ_BIT
);
3249 unsigned int s_image_x
, s_image_y
;
3250 unsigned int z_image_x
, z_image_y
;
3252 intel_miptree_get_image_offset(s_mt
, level
, slice
,
3253 &s_image_x
, &s_image_y
);
3254 intel_miptree_get_image_offset(z_mt
, level
, slice
,
3255 &z_image_x
, &z_image_y
);
3257 for (uint32_t y
= 0; y
< map
->h
; y
++) {
3258 for (uint32_t x
= 0; x
< map
->w
; x
++) {
3259 int map_x
= map
->x
+ x
, map_y
= map
->y
+ y
;
3260 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->surf
.row_pitch
,
3263 brw
->has_swizzling
);
3264 ptrdiff_t z_offset
= ((map_y
+ z_image_y
) *
3265 (z_mt
->surf
.row_pitch
/ 4) +
3266 (map_x
+ z_image_x
));
3267 uint8_t s
= s_map
[s_offset
];
3268 uint32_t z
= z_map
[z_offset
];
3270 if (map_z32f_x24s8
) {
3271 packed_map
[(y
* map
->w
+ x
) * 2 + 0] = z
;
3272 packed_map
[(y
* map
->w
+ x
) * 2 + 1] = s
;
3274 packed_map
[y
* map
->w
+ x
] = (s
<< 24) | (z
& 0x00ffffff);
3279 intel_miptree_unmap_raw(s_mt
);
3280 intel_miptree_unmap_raw(z_mt
);
3282 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
3284 map
->x
, map
->y
, map
->w
, map
->h
,
3285 z_mt
, map
->x
+ z_image_x
, map
->y
+ z_image_y
,
3286 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
3287 map
->ptr
, map
->stride
);
3289 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__
,
3290 map
->x
, map
->y
, map
->w
, map
->h
,
3291 mt
, map
->ptr
, map
->stride
);
3296 intel_miptree_unmap_depthstencil(struct brw_context
*brw
,
3297 struct intel_mipmap_tree
*mt
,
3298 struct intel_miptree_map
*map
,
3302 struct intel_mipmap_tree
*z_mt
= mt
;
3303 struct intel_mipmap_tree
*s_mt
= mt
->stencil_mt
;
3304 bool map_z32f_x24s8
= mt
->format
== MESA_FORMAT_Z_FLOAT32
;
3306 if (map
->mode
& GL_MAP_WRITE_BIT
) {
3307 uint32_t *packed_map
= map
->ptr
;
3308 uint8_t *s_map
= intel_miptree_map_raw(brw
, s_mt
, GL_MAP_WRITE_BIT
);
3309 uint32_t *z_map
= intel_miptree_map_raw(brw
, z_mt
, GL_MAP_WRITE_BIT
);
3310 unsigned int s_image_x
, s_image_y
;
3311 unsigned int z_image_x
, z_image_y
;
3313 intel_miptree_get_image_offset(s_mt
, level
, slice
,
3314 &s_image_x
, &s_image_y
);
3315 intel_miptree_get_image_offset(z_mt
, level
, slice
,
3316 &z_image_x
, &z_image_y
);
3318 for (uint32_t y
= 0; y
< map
->h
; y
++) {
3319 for (uint32_t x
= 0; x
< map
->w
; x
++) {
3320 ptrdiff_t s_offset
= intel_offset_S8(s_mt
->surf
.row_pitch
,
3321 x
+ s_image_x
+ map
->x
,
3322 y
+ s_image_y
+ map
->y
,
3323 brw
->has_swizzling
);
3324 ptrdiff_t z_offset
= ((y
+ z_image_y
+ map
->y
) *
3325 (z_mt
->surf
.row_pitch
/ 4) +
3326 (x
+ z_image_x
+ map
->x
));
3328 if (map_z32f_x24s8
) {
3329 z_map
[z_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 0];
3330 s_map
[s_offset
] = packed_map
[(y
* map
->w
+ x
) * 2 + 1];
3332 uint32_t packed
= packed_map
[y
* map
->w
+ x
];
3333 s_map
[s_offset
] = packed
>> 24;
3334 z_map
[z_offset
] = packed
;
3339 intel_miptree_unmap_raw(s_mt
);
3340 intel_miptree_unmap_raw(z_mt
);
3342 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
3344 map
->x
, map
->y
, map
->w
, map
->h
,
3345 z_mt
, _mesa_get_format_name(z_mt
->format
),
3346 map
->x
+ z_image_x
, map
->y
+ z_image_y
,
3347 s_mt
, map
->x
+ s_image_x
, map
->y
+ s_image_y
,
3348 map
->ptr
, map
->stride
);
3355 * Create and attach a map to the miptree at (level, slice). Return the
3358 static struct intel_miptree_map
*
3359 intel_miptree_attach_map(struct intel_mipmap_tree
*mt
,
3368 struct intel_miptree_map
*map
= calloc(1, sizeof(*map
));
3373 assert(mt
->level
[level
].slice
[slice
].map
== NULL
);
3374 mt
->level
[level
].slice
[slice
].map
= map
;
3386 * Release the map at (level, slice).
3389 intel_miptree_release_map(struct intel_mipmap_tree
*mt
,
3393 struct intel_miptree_map
**map
;
3395 map
= &mt
->level
[level
].slice
[slice
].map
;
3401 can_blit_slice(struct intel_mipmap_tree
*mt
,
3402 unsigned int level
, unsigned int slice
)
3404 /* See intel_miptree_blit() for details on the 32k pitch limit. */
3405 if (mt
->surf
.row_pitch
>= 32768)
3412 use_intel_mipree_map_blit(struct brw_context
*brw
,
3413 struct intel_mipmap_tree
*mt
,
3419 /* It's probably not worth swapping to the blit ring because of
3420 * all the overhead involved.
3422 !(mode
& GL_MAP_WRITE_BIT
) &&
3424 (mt
->surf
.tiling
== ISL_TILING_X
||
3425 /* Prior to Sandybridge, the blitter can't handle Y tiling */
3426 (brw
->gen
>= 6 && mt
->surf
.tiling
== ISL_TILING_Y0
) ||
3427 /* Fast copy blit on skl+ supports all tiling formats. */
3429 can_blit_slice(mt
, level
, slice
))
3432 if (mt
->surf
.tiling
!= ISL_TILING_LINEAR
&&
3433 mt
->bo
->size
>= brw
->max_gtt_map_object_size
) {
3434 assert(can_blit_slice(mt
, level
, slice
));
3442 * Parameter \a out_stride has type ptrdiff_t not because the buffer stride may
3443 * exceed 32 bits but to diminish the likelihood subtle bugs in pointer
3444 * arithmetic overflow.
3446 * If you call this function and use \a out_stride, then you're doing pointer
3447 * arithmetic on \a out_ptr. The type of \a out_stride doesn't prevent all
3448 * bugs. The caller must still take care to avoid 32-bit overflow errors in
3449 * all arithmetic expressions that contain buffer offsets and pixel sizes,
3450 * which usually have type uint32_t or GLuint.
3453 intel_miptree_map(struct brw_context
*brw
,
3454 struct intel_mipmap_tree
*mt
,
3463 ptrdiff_t *out_stride
)
3465 struct intel_miptree_map
*map
;
3467 assert(mt
->surf
.samples
== 1);
3469 map
= intel_miptree_attach_map(mt
, level
, slice
, x
, y
, w
, h
, mode
);
3476 intel_miptree_access_raw(brw
, mt
, level
, slice
,
3477 map
->mode
& GL_MAP_WRITE_BIT
);
3479 if (mt
->format
== MESA_FORMAT_S_UINT8
) {
3480 intel_miptree_map_s8(brw
, mt
, map
, level
, slice
);
3481 } else if (mt
->etc_format
!= MESA_FORMAT_NONE
&&
3482 !(mode
& BRW_MAP_DIRECT_BIT
)) {
3483 intel_miptree_map_etc(brw
, mt
, map
, level
, slice
);
3484 } else if (mt
->stencil_mt
&& !(mode
& BRW_MAP_DIRECT_BIT
)) {
3485 intel_miptree_map_depthstencil(brw
, mt
, map
, level
, slice
);
3486 } else if (use_intel_mipree_map_blit(brw
, mt
, mode
, level
, slice
)) {
3487 intel_miptree_map_blit(brw
, mt
, map
, level
, slice
);
3488 #if defined(USE_SSE41)
3489 } else if (!(mode
& GL_MAP_WRITE_BIT
) &&
3490 !mt
->compressed
&& cpu_has_sse4_1
&&
3491 (mt
->surf
.row_pitch
% 16 == 0)) {
3492 intel_miptree_map_movntdqa(brw
, mt
, map
, level
, slice
);
3495 intel_miptree_map_gtt(brw
, mt
, map
, level
, slice
);
3498 *out_ptr
= map
->ptr
;
3499 *out_stride
= map
->stride
;
3501 if (map
->ptr
== NULL
)
3502 intel_miptree_release_map(mt
, level
, slice
);
3506 intel_miptree_unmap(struct brw_context
*brw
,
3507 struct intel_mipmap_tree
*mt
,
3511 struct intel_miptree_map
*map
= mt
->level
[level
].slice
[slice
].map
;
3513 assert(mt
->surf
.samples
== 1);
3518 DBG("%s: mt %p (%s) level %d slice %d\n", __func__
,
3519 mt
, _mesa_get_format_name(mt
->format
), level
, slice
);
3521 if (mt
->format
== MESA_FORMAT_S_UINT8
) {
3522 intel_miptree_unmap_s8(brw
, mt
, map
, level
, slice
);
3523 } else if (mt
->etc_format
!= MESA_FORMAT_NONE
&&
3524 !(map
->mode
& BRW_MAP_DIRECT_BIT
)) {
3525 intel_miptree_unmap_etc(brw
, mt
, map
, level
, slice
);
3526 } else if (mt
->stencil_mt
&& !(map
->mode
& BRW_MAP_DIRECT_BIT
)) {
3527 intel_miptree_unmap_depthstencil(brw
, mt
, map
, level
, slice
);
3528 } else if (map
->linear_mt
) {
3529 intel_miptree_unmap_blit(brw
, mt
, map
, level
, slice
);
3530 #if defined(USE_SSE41)
3531 } else if (map
->buffer
&& cpu_has_sse4_1
) {
3532 intel_miptree_unmap_movntdqa(brw
, mt
, map
, level
, slice
);
3535 intel_miptree_unmap_gtt(mt
);
3538 intel_miptree_release_map(mt
, level
, slice
);
3542 get_isl_surf_dim(GLenum target
)
3546 case GL_TEXTURE_1D_ARRAY
:
3547 return ISL_SURF_DIM_1D
;
3550 case GL_TEXTURE_2D_ARRAY
:
3551 case GL_TEXTURE_RECTANGLE
:
3552 case GL_TEXTURE_CUBE_MAP
:
3553 case GL_TEXTURE_CUBE_MAP_ARRAY
:
3554 case GL_TEXTURE_2D_MULTISAMPLE
:
3555 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY
:
3556 case GL_TEXTURE_EXTERNAL_OES
:
3557 return ISL_SURF_DIM_2D
;
3560 return ISL_SURF_DIM_3D
;
3563 unreachable("Invalid texture target");
3567 get_isl_dim_layout(const struct gen_device_info
*devinfo
,
3568 enum isl_tiling tiling
, GLenum target
)
3572 case GL_TEXTURE_1D_ARRAY
:
3573 return (devinfo
->gen
>= 9 && tiling
== ISL_TILING_LINEAR
?
3574 ISL_DIM_LAYOUT_GEN9_1D
: ISL_DIM_LAYOUT_GEN4_2D
);
3577 case GL_TEXTURE_2D_ARRAY
:
3578 case GL_TEXTURE_RECTANGLE
:
3579 case GL_TEXTURE_2D_MULTISAMPLE
:
3580 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY
:
3581 case GL_TEXTURE_EXTERNAL_OES
:
3582 return ISL_DIM_LAYOUT_GEN4_2D
;
3584 case GL_TEXTURE_CUBE_MAP
:
3585 case GL_TEXTURE_CUBE_MAP_ARRAY
:
3586 return (devinfo
->gen
== 4 ? ISL_DIM_LAYOUT_GEN4_3D
:
3587 ISL_DIM_LAYOUT_GEN4_2D
);
3590 return (devinfo
->gen
>= 9 ?
3591 ISL_DIM_LAYOUT_GEN4_2D
: ISL_DIM_LAYOUT_GEN4_3D
);
3594 unreachable("Invalid texture target");
3598 intel_miptree_get_aux_isl_usage(const struct brw_context
*brw
,
3599 const struct intel_mipmap_tree
*mt
)
3602 return ISL_AUX_USAGE_HIZ
;
3605 return ISL_AUX_USAGE_NONE
;
3607 return mt
->aux_usage
;