2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 * @file iris_resource.c
26 * Resources are images, buffers, and other objects used by the GPU.
28 * XXX: explain resources
33 #include "pipe/p_defines.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_context.h"
36 #include "pipe/p_screen.h"
37 #include "util/os_memory.h"
38 #include "util/u_cpu_detect.h"
39 #include "util/u_inlines.h"
40 #include "util/u_format.h"
41 #include "util/u_threaded_context.h"
42 #include "util/u_transfer.h"
43 #include "util/u_transfer_helper.h"
44 #include "util/u_upload_mgr.h"
45 #include "util/ralloc.h"
46 #include "iris_batch.h"
47 #include "iris_context.h"
48 #include "iris_resource.h"
49 #include "iris_screen.h"
50 #include "intel/dev/gen_debug.h"
52 #include "drm-uapi/drm_fourcc.h"
53 #include "drm-uapi/i915_drm.h"
55 enum modifier_priority
{
56 MODIFIER_PRIORITY_INVALID
= 0,
57 MODIFIER_PRIORITY_LINEAR
,
60 MODIFIER_PRIORITY_Y_CCS
,
63 static const uint64_t priority_to_modifier
[] = {
64 [MODIFIER_PRIORITY_INVALID
] = DRM_FORMAT_MOD_INVALID
,
65 [MODIFIER_PRIORITY_LINEAR
] = DRM_FORMAT_MOD_LINEAR
,
66 [MODIFIER_PRIORITY_X
] = I915_FORMAT_MOD_X_TILED
,
67 [MODIFIER_PRIORITY_Y
] = I915_FORMAT_MOD_Y_TILED
,
68 [MODIFIER_PRIORITY_Y_CCS
] = I915_FORMAT_MOD_Y_TILED_CCS
,
72 modifier_is_supported(const struct gen_device_info
*devinfo
,
75 /* XXX: do something real */
77 case I915_FORMAT_MOD_Y_TILED
:
78 case I915_FORMAT_MOD_X_TILED
:
79 case DRM_FORMAT_MOD_LINEAR
:
81 case I915_FORMAT_MOD_Y_TILED_CCS
:
82 case DRM_FORMAT_MOD_INVALID
:
89 select_best_modifier(struct gen_device_info
*devinfo
,
90 const uint64_t *modifiers
,
93 enum modifier_priority prio
= MODIFIER_PRIORITY_INVALID
;
95 for (int i
= 0; i
< count
; i
++) {
96 if (!modifier_is_supported(devinfo
, modifiers
[i
]))
99 switch (modifiers
[i
]) {
100 case I915_FORMAT_MOD_Y_TILED_CCS
:
101 prio
= MAX2(prio
, MODIFIER_PRIORITY_Y_CCS
);
103 case I915_FORMAT_MOD_Y_TILED
:
104 prio
= MAX2(prio
, MODIFIER_PRIORITY_Y
);
106 case I915_FORMAT_MOD_X_TILED
:
107 prio
= MAX2(prio
, MODIFIER_PRIORITY_X
);
109 case DRM_FORMAT_MOD_LINEAR
:
110 prio
= MAX2(prio
, MODIFIER_PRIORITY_LINEAR
);
112 case DRM_FORMAT_MOD_INVALID
:
118 return priority_to_modifier
[prio
];
121 static enum isl_surf_dim
122 target_to_isl_surf_dim(enum pipe_texture_target target
)
126 case PIPE_TEXTURE_1D
:
127 case PIPE_TEXTURE_1D_ARRAY
:
128 return ISL_SURF_DIM_1D
;
129 case PIPE_TEXTURE_2D
:
130 case PIPE_TEXTURE_CUBE
:
131 case PIPE_TEXTURE_RECT
:
132 case PIPE_TEXTURE_2D_ARRAY
:
133 case PIPE_TEXTURE_CUBE_ARRAY
:
134 return ISL_SURF_DIM_2D
;
135 case PIPE_TEXTURE_3D
:
136 return ISL_SURF_DIM_3D
;
137 case PIPE_MAX_TEXTURE_TYPES
:
140 unreachable("invalid texture type");
144 iris_query_dmabuf_modifiers(struct pipe_screen
*pscreen
,
145 enum pipe_format pfmt
,
148 unsigned int *external_only
,
151 struct iris_screen
*screen
= (void *) pscreen
;
152 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
154 uint64_t all_modifiers
[] = {
155 DRM_FORMAT_MOD_LINEAR
,
156 I915_FORMAT_MOD_X_TILED
,
157 I915_FORMAT_MOD_Y_TILED
,
158 // XXX: (broken) I915_FORMAT_MOD_Y_TILED_CCS,
161 int supported_mods
= 0;
163 for (int i
= 0; i
< ARRAY_SIZE(all_modifiers
); i
++) {
164 if (!modifier_is_supported(devinfo
, all_modifiers
[i
]))
167 if (supported_mods
< max
) {
169 modifiers
[supported_mods
] = all_modifiers
[i
];
172 external_only
[supported_mods
] = util_format_is_yuv(pfmt
);
178 *count
= supported_mods
;
181 static isl_surf_usage_flags_t
182 pipe_bind_to_isl_usage(unsigned bindings
)
184 isl_surf_usage_flags_t usage
= 0;
186 if (bindings
& PIPE_BIND_RENDER_TARGET
)
187 usage
|= ISL_SURF_USAGE_RENDER_TARGET_BIT
;
189 if (bindings
& PIPE_BIND_SAMPLER_VIEW
)
190 usage
|= ISL_SURF_USAGE_TEXTURE_BIT
;
192 if (bindings
& (PIPE_BIND_SHADER_IMAGE
| PIPE_BIND_SHADER_BUFFER
))
193 usage
|= ISL_SURF_USAGE_STORAGE_BIT
;
195 if (bindings
& PIPE_BIND_DISPLAY_TARGET
)
196 usage
|= ISL_SURF_USAGE_DISPLAY_BIT
;
201 struct pipe_resource
*
202 iris_resource_get_separate_stencil(struct pipe_resource
*p_res
)
204 /* For packed depth-stencil, we treat depth as the primary resource
205 * and store S8 as the "second plane" resource.
211 iris_resource_set_separate_stencil(struct pipe_resource
*p_res
,
212 struct pipe_resource
*stencil
)
214 assert(util_format_has_depth(util_format_description(p_res
->format
)));
215 pipe_resource_reference(&p_res
->next
, stencil
);
219 iris_get_depth_stencil_resources(struct pipe_resource
*res
,
220 struct iris_resource
**out_z
,
221 struct iris_resource
**out_s
)
229 if (res
->format
!= PIPE_FORMAT_S8_UINT
) {
230 *out_z
= (void *) res
;
231 *out_s
= (void *) iris_resource_get_separate_stencil(res
);
234 *out_s
= (void *) res
;
239 iris_resource_disable_aux(struct iris_resource
*res
)
241 iris_bo_unreference(res
->aux
.bo
);
242 iris_bo_unreference(res
->aux
.clear_color_bo
);
243 free(res
->aux
.state
);
245 res
->aux
.usage
= ISL_AUX_USAGE_NONE
;
246 res
->aux
.possible_usages
= 1 << ISL_AUX_USAGE_NONE
;
247 res
->aux
.sampler_usages
= 1 << ISL_AUX_USAGE_NONE
;
248 res
->aux
.surf
.size_B
= 0;
250 res
->aux
.clear_color_bo
= NULL
;
251 res
->aux
.state
= NULL
;
255 iris_resource_destroy(struct pipe_screen
*screen
,
256 struct pipe_resource
*resource
)
258 struct iris_resource
*res
= (struct iris_resource
*)resource
;
260 if (resource
->target
== PIPE_BUFFER
)
261 util_range_destroy(&res
->valid_buffer_range
);
263 iris_resource_disable_aux(res
);
265 iris_bo_unreference(res
->bo
);
269 static struct iris_resource
*
270 iris_alloc_resource(struct pipe_screen
*pscreen
,
271 const struct pipe_resource
*templ
)
273 struct iris_resource
*res
= calloc(1, sizeof(struct iris_resource
));
278 res
->base
.screen
= pscreen
;
279 pipe_reference_init(&res
->base
.reference
, 1);
281 res
->aux
.possible_usages
= 1 << ISL_AUX_USAGE_NONE
;
282 res
->aux
.sampler_usages
= 1 << ISL_AUX_USAGE_NONE
;
284 if (templ
->target
== PIPE_BUFFER
)
285 util_range_init(&res
->valid_buffer_range
);
291 iris_get_num_logical_layers(const struct iris_resource
*res
, unsigned level
)
293 if (res
->surf
.dim
== ISL_SURF_DIM_3D
)
294 return minify(res
->surf
.logical_level0_px
.depth
, level
);
296 return res
->surf
.logical_level0_px
.array_len
;
299 static enum isl_aux_state
**
300 create_aux_state_map(struct iris_resource
*res
, enum isl_aux_state initial
)
302 uint32_t total_slices
= 0;
303 for (uint32_t level
= 0; level
< res
->surf
.levels
; level
++)
304 total_slices
+= iris_get_num_logical_layers(res
, level
);
306 const size_t per_level_array_size
=
307 res
->surf
.levels
* sizeof(enum isl_aux_state
*);
309 /* We're going to allocate a single chunk of data for both the per-level
310 * reference array and the arrays of aux_state. This makes cleanup
311 * significantly easier.
313 const size_t total_size
=
314 per_level_array_size
+ total_slices
* sizeof(enum isl_aux_state
);
316 void *data
= malloc(total_size
);
320 enum isl_aux_state
**per_level_arr
= data
;
321 enum isl_aux_state
*s
= data
+ per_level_array_size
;
322 for (uint32_t level
= 0; level
< res
->surf
.levels
; level
++) {
323 per_level_arr
[level
] = s
;
324 const unsigned level_layers
= iris_get_num_logical_layers(res
, level
);
325 for (uint32_t a
= 0; a
< level_layers
; a
++)
328 assert((void *)s
== data
+ total_size
);
330 return per_level_arr
;
334 * Allocate the initial aux surface for a resource based on aux.usage
337 iris_resource_alloc_aux(struct iris_screen
*screen
, struct iris_resource
*res
)
339 struct isl_device
*isl_dev
= &screen
->isl_dev
;
340 enum isl_aux_state initial_state
;
341 UNUSED
bool ok
= false;
342 uint8_t memset_value
= 0;
343 uint32_t alloc_flags
= 0;
344 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
345 const unsigned clear_color_state_size
= devinfo
->gen
>= 10 ?
346 screen
->isl_dev
.ss
.clear_color_state_size
:
347 (devinfo
->gen
>= 9 ? screen
->isl_dev
.ss
.clear_value_size
: 0);
349 assert(!res
->aux
.bo
);
351 switch (res
->aux
.usage
) {
352 case ISL_AUX_USAGE_NONE
:
353 res
->aux
.surf
.size_B
= 0;
356 case ISL_AUX_USAGE_HIZ
:
357 initial_state
= ISL_AUX_STATE_AUX_INVALID
;
359 ok
= isl_surf_get_hiz_surf(isl_dev
, &res
->surf
, &res
->aux
.surf
);
361 case ISL_AUX_USAGE_MCS
:
362 /* The Ivybridge PRM, Vol 2 Part 1 p326 says:
364 * "When MCS buffer is enabled and bound to MSRT, it is required
365 * that it is cleared prior to any rendering."
367 * Since we only use the MCS buffer for rendering, we just clear it
368 * immediately on allocation. The clear value for MCS buffers is all
369 * 1's, so we simply memset it to 0xff.
371 initial_state
= ISL_AUX_STATE_CLEAR
;
373 ok
= isl_surf_get_mcs_surf(isl_dev
, &res
->surf
, &res
->aux
.surf
);
375 case ISL_AUX_USAGE_CCS_D
:
376 case ISL_AUX_USAGE_CCS_E
:
377 /* When CCS_E is used, we need to ensure that the CCS starts off in
378 * a valid state. From the Sky Lake PRM, "MCS Buffer for Render
381 * "If Software wants to enable Color Compression without Fast
382 * clear, Software needs to initialize MCS with zeros."
384 * A CCS value of 0 indicates that the corresponding block is in the
385 * pass-through state which is what we want.
387 * For CCS_D, do the same thing. On Gen9+, this avoids having any
388 * undefined bits in the aux buffer.
390 initial_state
= ISL_AUX_STATE_PASS_THROUGH
;
391 alloc_flags
|= BO_ALLOC_ZEROED
;
392 ok
= isl_surf_get_ccs_surf(isl_dev
, &res
->surf
, &res
->aux
.surf
, 0);
396 /* We should have a valid aux_surf. */
400 /* No work is needed for a zero-sized auxiliary buffer. */
401 if (res
->aux
.surf
.size_B
== 0)
404 /* Create the aux_state for the auxiliary buffer. */
405 res
->aux
.state
= create_aux_state_map(res
, initial_state
);
409 uint64_t size
= res
->aux
.surf
.size_B
;
411 /* Allocate space in the buffer for storing the clear color. On modern
412 * platforms (gen > 9), we can read it directly from such buffer.
414 * On gen <= 9, we are going to store the clear color on the buffer
415 * anyways, and copy it back to the surface state during state emission.
417 res
->aux
.clear_color_offset
= size
;
418 size
+= clear_color_state_size
;
420 /* Allocate the auxiliary buffer. ISL has stricter set of alignment rules
421 * the drm allocator. Therefore, one can pass the ISL dimensions in terms
422 * of bytes instead of trying to recalculate based on different format
425 res
->aux
.bo
= iris_bo_alloc_tiled(screen
->bufmgr
, "aux buffer", size
,
426 IRIS_MEMZONE_OTHER
, I915_TILING_Y
,
427 res
->aux
.surf
.row_pitch_B
, alloc_flags
);
432 if (!(alloc_flags
& BO_ALLOC_ZEROED
)) {
433 void *map
= iris_bo_map(NULL
, res
->aux
.bo
, MAP_WRITE
| MAP_RAW
);
436 iris_resource_disable_aux(res
);
440 if (memset_value
!= 0)
441 memset(map
, memset_value
, res
->aux
.surf
.size_B
);
443 /* Zero the indirect clear color to match ::fast_clear_color. */
444 memset((char *)map
+ res
->aux
.clear_color_offset
, 0,
445 clear_color_state_size
);
447 iris_bo_unmap(res
->aux
.bo
);
450 if (clear_color_state_size
> 0) {
451 res
->aux
.clear_color_bo
= res
->aux
.bo
;
452 iris_bo_reference(res
->aux
.clear_color_bo
);
455 if (res
->aux
.usage
== ISL_AUX_USAGE_HIZ
) {
456 for (unsigned level
= 0; level
< res
->surf
.levels
; ++level
) {
457 uint32_t width
= u_minify(res
->surf
.phys_level0_sa
.width
, level
);
458 uint32_t height
= u_minify(res
->surf
.phys_level0_sa
.height
, level
);
460 /* Disable HiZ for LOD > 0 unless the width/height are 8x4 aligned.
461 * For LOD == 0, we can grow the dimensions to make it work.
463 if (level
== 0 || ((width
& 7) == 0 && (height
& 3) == 0))
464 res
->aux
.has_hiz
|= 1 << level
;
472 supports_mcs(const struct isl_surf
*surf
)
474 /* MCS compression only applies to multisampled resources. */
475 if (surf
->samples
<= 1)
478 /* Depth and stencil buffers use the IMS (interleaved) layout. */
479 if (isl_surf_usage_is_depth_or_stencil(surf
->usage
))
486 supports_ccs(const struct gen_device_info
*devinfo
,
487 const struct isl_surf
*surf
)
489 /* CCS only supports singlesampled resources. */
490 if (surf
->samples
> 1)
493 /* Note: still need to check the format! */
498 static struct pipe_resource
*
499 iris_resource_create_for_buffer(struct pipe_screen
*pscreen
,
500 const struct pipe_resource
*templ
)
502 struct iris_screen
*screen
= (struct iris_screen
*)pscreen
;
503 struct iris_resource
*res
= iris_alloc_resource(pscreen
, templ
);
505 assert(templ
->target
== PIPE_BUFFER
);
506 assert(templ
->height0
<= 1);
507 assert(templ
->depth0
<= 1);
508 assert(templ
->format
== PIPE_FORMAT_NONE
||
509 util_format_get_blocksize(templ
->format
) == 1);
511 res
->internal_format
= templ
->format
;
512 res
->surf
.tiling
= ISL_TILING_LINEAR
;
514 enum iris_memory_zone memzone
= IRIS_MEMZONE_OTHER
;
515 const char *name
= templ
->target
== PIPE_BUFFER
? "buffer" : "miptree";
516 if (templ
->flags
& IRIS_RESOURCE_FLAG_SHADER_MEMZONE
) {
517 memzone
= IRIS_MEMZONE_SHADER
;
518 name
= "shader kernels";
519 } else if (templ
->flags
& IRIS_RESOURCE_FLAG_SURFACE_MEMZONE
) {
520 memzone
= IRIS_MEMZONE_SURFACE
;
521 name
= "surface state";
522 } else if (templ
->flags
& IRIS_RESOURCE_FLAG_DYNAMIC_MEMZONE
) {
523 memzone
= IRIS_MEMZONE_DYNAMIC
;
524 name
= "dynamic state";
527 res
->bo
= iris_bo_alloc(screen
->bufmgr
, name
, templ
->width0
, memzone
);
529 iris_resource_destroy(pscreen
, &res
->base
);
536 static struct pipe_resource
*
537 iris_resource_create_with_modifiers(struct pipe_screen
*pscreen
,
538 const struct pipe_resource
*templ
,
539 const uint64_t *modifiers
,
542 struct iris_screen
*screen
= (struct iris_screen
*)pscreen
;
543 struct gen_device_info
*devinfo
= &screen
->devinfo
;
544 struct iris_resource
*res
= iris_alloc_resource(pscreen
, templ
);
549 const struct util_format_description
*format_desc
=
550 util_format_description(templ
->format
);
551 const bool has_depth
= util_format_has_depth(format_desc
);
553 select_best_modifier(devinfo
, modifiers
, modifiers_count
);
555 isl_tiling_flags_t tiling_flags
= ISL_TILING_ANY_MASK
;
557 if (modifier
!= DRM_FORMAT_MOD_INVALID
) {
558 res
->mod_info
= isl_drm_modifier_get_info(modifier
);
560 tiling_flags
= 1 << res
->mod_info
->tiling
;
562 if (modifiers_count
> 0) {
563 fprintf(stderr
, "Unsupported modifier, resource creation failed.\n");
567 /* No modifiers - we can select our own tiling. */
570 /* Depth must be Y-tiled */
571 tiling_flags
= ISL_TILING_Y0_BIT
;
572 } else if (templ
->format
== PIPE_FORMAT_S8_UINT
) {
573 /* Stencil must be W-tiled */
574 tiling_flags
= ISL_TILING_W_BIT
;
575 } else if (templ
->target
== PIPE_BUFFER
||
576 templ
->target
== PIPE_TEXTURE_1D
||
577 templ
->target
== PIPE_TEXTURE_1D_ARRAY
) {
578 /* Use linear for buffers and 1D textures */
579 tiling_flags
= ISL_TILING_LINEAR_BIT
;
582 /* Use linear for staging buffers */
583 if (templ
->usage
== PIPE_USAGE_STAGING
||
584 templ
->bind
& (PIPE_BIND_LINEAR
| PIPE_BIND_CURSOR
) )
585 tiling_flags
= ISL_TILING_LINEAR_BIT
;
588 isl_surf_usage_flags_t usage
= pipe_bind_to_isl_usage(templ
->bind
);
590 if (templ
->target
== PIPE_TEXTURE_CUBE
||
591 templ
->target
== PIPE_TEXTURE_CUBE_ARRAY
)
592 usage
|= ISL_SURF_USAGE_CUBE_BIT
;
594 if (templ
->usage
!= PIPE_USAGE_STAGING
) {
595 if (templ
->format
== PIPE_FORMAT_S8_UINT
)
596 usage
|= ISL_SURF_USAGE_STENCIL_BIT
;
598 usage
|= ISL_SURF_USAGE_DEPTH_BIT
;
601 enum pipe_format pfmt
= templ
->format
;
602 res
->internal_format
= pfmt
;
604 /* Should be handled by u_transfer_helper */
605 assert(!util_format_is_depth_and_stencil(pfmt
));
607 struct iris_format_info fmt
= iris_format_for_usage(devinfo
, pfmt
, usage
);
608 assert(fmt
.fmt
!= ISL_FORMAT_UNSUPPORTED
);
610 UNUSED
const bool isl_surf_created_successfully
=
611 isl_surf_init(&screen
->isl_dev
, &res
->surf
,
612 .dim
= target_to_isl_surf_dim(templ
->target
),
614 .width
= templ
->width0
,
615 .height
= templ
->height0
,
616 .depth
= templ
->depth0
,
617 .levels
= templ
->last_level
+ 1,
618 .array_len
= templ
->array_size
,
619 .samples
= MAX2(templ
->nr_samples
, 1),
620 .min_alignment_B
= 0,
623 .tiling_flags
= tiling_flags
);
624 assert(isl_surf_created_successfully
);
627 res
->aux
.possible_usages
|= 1 << res
->mod_info
->aux_usage
;
628 } else if (supports_mcs(&res
->surf
)) {
629 res
->aux
.possible_usages
|= 1 << ISL_AUX_USAGE_MCS
;
630 } else if (has_depth
) {
631 if (likely(!(INTEL_DEBUG
& DEBUG_NO_HIZ
)))
632 res
->aux
.possible_usages
|= 1 << ISL_AUX_USAGE_HIZ
;
633 } else if (likely(!(INTEL_DEBUG
& DEBUG_NO_RBC
)) &&
634 supports_ccs(devinfo
, &res
->surf
)) {
635 if (isl_format_supports_ccs_e(devinfo
, res
->surf
.format
))
636 res
->aux
.possible_usages
|= 1 << ISL_AUX_USAGE_CCS_E
;
638 if (isl_format_supports_ccs_d(devinfo
, res
->surf
.format
))
639 res
->aux
.possible_usages
|= 1 << ISL_AUX_USAGE_CCS_D
;
642 res
->aux
.usage
= util_last_bit(res
->aux
.possible_usages
) - 1;
644 res
->aux
.sampler_usages
= res
->aux
.possible_usages
;
646 /* We don't always support sampling with hiz. But when we do, it must be
649 if (!devinfo
->has_sample_with_hiz
|| res
->surf
.samples
> 1) {
650 res
->aux
.sampler_usages
&= ~(1 << ISL_AUX_USAGE_HIZ
);
653 const char *name
= "miptree";
654 enum iris_memory_zone memzone
= IRIS_MEMZONE_OTHER
;
656 unsigned int flags
= 0;
657 if (templ
->usage
== PIPE_USAGE_STAGING
)
658 flags
|= BO_ALLOC_COHERENT
;
660 /* These are for u_upload_mgr buffers only */
661 assert(!(templ
->flags
& (IRIS_RESOURCE_FLAG_SHADER_MEMZONE
|
662 IRIS_RESOURCE_FLAG_SURFACE_MEMZONE
|
663 IRIS_RESOURCE_FLAG_DYNAMIC_MEMZONE
)));
665 res
->bo
= iris_bo_alloc_tiled(screen
->bufmgr
, name
, res
->surf
.size_B
,
667 isl_tiling_to_i915_tiling(res
->surf
.tiling
),
668 res
->surf
.row_pitch_B
, flags
);
673 if (!iris_resource_alloc_aux(screen
, res
))
674 iris_resource_disable_aux(res
);
679 fprintf(stderr
, "XXX: resource creation failed\n");
680 iris_resource_destroy(pscreen
, &res
->base
);
685 static struct pipe_resource
*
686 iris_resource_create(struct pipe_screen
*pscreen
,
687 const struct pipe_resource
*templ
)
689 if (templ
->target
== PIPE_BUFFER
)
690 return iris_resource_create_for_buffer(pscreen
, templ
);
692 return iris_resource_create_with_modifiers(pscreen
, templ
, NULL
, 0);
696 tiling_to_modifier(uint32_t tiling
)
698 static const uint64_t map
[] = {
699 [I915_TILING_NONE
] = DRM_FORMAT_MOD_LINEAR
,
700 [I915_TILING_X
] = I915_FORMAT_MOD_X_TILED
,
701 [I915_TILING_Y
] = I915_FORMAT_MOD_Y_TILED
,
704 assert(tiling
< ARRAY_SIZE(map
));
709 static struct pipe_resource
*
710 iris_resource_from_user_memory(struct pipe_screen
*pscreen
,
711 const struct pipe_resource
*templ
,
714 struct iris_screen
*screen
= (struct iris_screen
*)pscreen
;
715 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
716 struct iris_resource
*res
= iris_alloc_resource(pscreen
, templ
);
720 assert(templ
->target
== PIPE_BUFFER
);
722 res
->internal_format
= templ
->format
;
723 res
->bo
= iris_bo_create_userptr(bufmgr
, "user",
724 user_memory
, templ
->width0
,
731 util_range_add(&res
->valid_buffer_range
, 0, templ
->width0
);
736 static struct pipe_resource
*
737 iris_resource_from_handle(struct pipe_screen
*pscreen
,
738 const struct pipe_resource
*templ
,
739 struct winsys_handle
*whandle
,
742 struct iris_screen
*screen
= (struct iris_screen
*)pscreen
;
743 struct gen_device_info
*devinfo
= &screen
->devinfo
;
744 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
745 struct iris_resource
*res
= iris_alloc_resource(pscreen
, templ
);
749 switch (whandle
->type
) {
750 case WINSYS_HANDLE_TYPE_FD
:
751 res
->bo
= iris_bo_import_dmabuf(bufmgr
, whandle
->handle
);
753 case WINSYS_HANDLE_TYPE_SHARED
:
754 res
->bo
= iris_bo_gem_create_from_name(bufmgr
, "winsys image",
758 unreachable("invalid winsys handle type");
763 res
->offset
= whandle
->offset
;
765 uint64_t modifier
= whandle
->modifier
;
766 if (modifier
== DRM_FORMAT_MOD_INVALID
) {
767 modifier
= tiling_to_modifier(res
->bo
->tiling_mode
);
769 res
->mod_info
= isl_drm_modifier_get_info(modifier
);
770 assert(res
->mod_info
);
772 isl_surf_usage_flags_t isl_usage
= pipe_bind_to_isl_usage(templ
->bind
);
774 const struct iris_format_info fmt
=
775 iris_format_for_usage(devinfo
, templ
->format
, isl_usage
);
776 res
->internal_format
= templ
->format
;
778 if (templ
->target
== PIPE_BUFFER
) {
779 res
->surf
.tiling
= ISL_TILING_LINEAR
;
781 isl_surf_init(&screen
->isl_dev
, &res
->surf
,
782 .dim
= target_to_isl_surf_dim(templ
->target
),
784 .width
= templ
->width0
,
785 .height
= templ
->height0
,
786 .depth
= templ
->depth0
,
787 .levels
= templ
->last_level
+ 1,
788 .array_len
= templ
->array_size
,
789 .samples
= MAX2(templ
->nr_samples
, 1),
790 .min_alignment_B
= 0,
791 .row_pitch_B
= whandle
->stride
,
793 .tiling_flags
= 1 << res
->mod_info
->tiling
);
795 assert(res
->bo
->tiling_mode
==
796 isl_tiling_to_i915_tiling(res
->surf
.tiling
));
798 // XXX: create_ccs_buf_for_image?
799 if (!iris_resource_alloc_aux(screen
, res
))
806 iris_resource_destroy(pscreen
, &res
->base
);
811 iris_flush_resource(struct pipe_context
*ctx
, struct pipe_resource
*resource
)
813 struct iris_context
*ice
= (struct iris_context
*)ctx
;
814 struct iris_batch
*render_batch
= &ice
->batches
[IRIS_BATCH_RENDER
];
815 struct iris_resource
*res
= (void *) resource
;
816 const struct isl_drm_modifier_info
*mod
= res
->mod_info
;
818 iris_resource_prepare_access(ice
, render_batch
, res
,
819 0, INTEL_REMAINING_LEVELS
,
820 0, INTEL_REMAINING_LAYERS
,
821 mod
? mod
->aux_usage
: ISL_AUX_USAGE_NONE
,
822 mod
? mod
->supports_clear_color
: false);
826 iris_resource_get_handle(struct pipe_screen
*pscreen
,
827 struct pipe_context
*ctx
,
828 struct pipe_resource
*resource
,
829 struct winsys_handle
*whandle
,
832 struct iris_resource
*res
= (struct iris_resource
*)resource
;
834 /* Disable aux usage if explicit flush not set and this is the
835 * first time we are dealing with this resource.
837 if ((!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
) && res
->aux
.usage
!= 0)) {
838 if (p_atomic_read(&resource
->reference
.count
) == 1)
839 iris_resource_disable_aux(res
);
842 /* If this is a buffer, stride should be 0 - no need to special case */
843 whandle
->stride
= res
->surf
.row_pitch_B
;
845 res
->mod_info
? res
->mod_info
->modifier
846 : tiling_to_modifier(res
->bo
->tiling_mode
);
849 enum isl_aux_usage allowed_usage
=
850 res
->mod_info
? res
->mod_info
->aux_usage
: ISL_AUX_USAGE_NONE
;
852 if (res
->aux
.usage
!= allowed_usage
) {
853 enum isl_aux_state aux_state
= iris_resource_get_aux_state(res
, 0, 0);
854 assert(aux_state
== ISL_AUX_STATE_RESOLVED
||
855 aux_state
== ISL_AUX_STATE_PASS_THROUGH
);
859 switch (whandle
->type
) {
860 case WINSYS_HANDLE_TYPE_SHARED
:
861 return iris_bo_flink(res
->bo
, &whandle
->handle
) == 0;
862 case WINSYS_HANDLE_TYPE_KMS
:
863 whandle
->handle
= iris_bo_export_gem_handle(res
->bo
);
865 case WINSYS_HANDLE_TYPE_FD
:
866 return iris_bo_export_dmabuf(res
->bo
, (int *) &whandle
->handle
) == 0;
873 resource_is_busy(struct iris_context
*ice
,
874 struct iris_resource
*res
)
876 bool busy
= iris_bo_busy(res
->bo
);
878 for (int i
= 0; i
< IRIS_BATCH_COUNT
; i
++)
879 busy
|= iris_batch_references(&ice
->batches
[i
], res
->bo
);
885 iris_invalidate_resource(struct pipe_context
*ctx
,
886 struct pipe_resource
*resource
)
888 struct iris_screen
*screen
= (void *) ctx
->screen
;
889 struct iris_context
*ice
= (void *) ctx
;
890 struct iris_resource
*res
= (void *) resource
;
892 if (resource
->target
!= PIPE_BUFFER
)
895 if (!resource_is_busy(ice
, res
)) {
896 /* The resource is idle, so just mark that it contains no data and
897 * keep using the same underlying buffer object.
899 util_range_set_empty(&res
->valid_buffer_range
);
903 /* Otherwise, try and replace the backing storage with a new BO. */
905 /* We can't reallocate memory we didn't allocate in the first place. */
906 if (res
->bo
->userptr
)
909 // XXX: We should support this.
910 if (res
->bind_history
& PIPE_BIND_STREAM_OUTPUT
)
913 struct iris_bo
*old_bo
= res
->bo
;
914 struct iris_bo
*new_bo
=
915 iris_bo_alloc(screen
->bufmgr
, res
->bo
->name
, resource
->width0
,
916 iris_memzone_for_address(old_bo
->gtt_offset
));
920 /* Swap out the backing storage */
923 /* Rebind the buffer, replacing any state referring to the old BO's
924 * address, and marking state dirty so it's reemitted.
926 ice
->vtbl
.rebind_buffer(ice
, res
, old_bo
->gtt_offset
);
928 util_range_set_empty(&res
->valid_buffer_range
);
930 iris_bo_unreference(old_bo
);
934 iris_flush_staging_region(struct pipe_transfer
*xfer
,
935 const struct pipe_box
*flush_box
)
937 if (!(xfer
->usage
& PIPE_TRANSFER_WRITE
))
940 struct iris_transfer
*map
= (void *) xfer
;
942 struct pipe_box src_box
= *flush_box
;
944 /* Account for extra alignment padding in staging buffer */
945 if (xfer
->resource
->target
== PIPE_BUFFER
)
946 src_box
.x
+= xfer
->box
.x
% IRIS_MAP_BUFFER_ALIGNMENT
;
948 struct pipe_box dst_box
= (struct pipe_box
) {
949 .x
= xfer
->box
.x
+ flush_box
->x
,
950 .y
= xfer
->box
.y
+ flush_box
->y
,
951 .z
= xfer
->box
.z
+ flush_box
->z
,
952 .width
= flush_box
->width
,
953 .height
= flush_box
->height
,
954 .depth
= flush_box
->depth
,
957 iris_copy_region(map
->blorp
, map
->batch
, xfer
->resource
, xfer
->level
,
958 dst_box
.x
, dst_box
.y
, dst_box
.z
, map
->staging
, 0,
963 iris_unmap_copy_region(struct iris_transfer
*map
)
965 iris_resource_destroy(map
->staging
->screen
, map
->staging
);
971 iris_map_copy_region(struct iris_transfer
*map
)
973 struct pipe_screen
*pscreen
= &map
->batch
->screen
->base
;
974 struct pipe_transfer
*xfer
= &map
->base
;
975 struct pipe_box
*box
= &xfer
->box
;
976 struct iris_resource
*res
= (void *) xfer
->resource
;
978 unsigned extra
= xfer
->resource
->target
== PIPE_BUFFER
?
979 box
->x
% IRIS_MAP_BUFFER_ALIGNMENT
: 0;
981 struct pipe_resource templ
= (struct pipe_resource
) {
982 .usage
= PIPE_USAGE_STAGING
,
983 .width0
= box
->width
+ extra
,
984 .height0
= box
->height
,
986 .nr_samples
= xfer
->resource
->nr_samples
,
987 .nr_storage_samples
= xfer
->resource
->nr_storage_samples
,
988 .array_size
= box
->depth
,
991 if (xfer
->resource
->target
== PIPE_BUFFER
)
992 templ
.target
= PIPE_BUFFER
;
993 else if (templ
.array_size
> 1)
994 templ
.target
= PIPE_TEXTURE_2D_ARRAY
;
996 templ
.target
= PIPE_TEXTURE_2D
;
998 /* Depth, stencil, and ASTC can't be linear surfaces, so we can't use
999 * xfer->resource->format directly. Pick a bpb compatible format so
1000 * resource creation will succeed; blorp_copy will override it anyway.
1002 switch (util_format_get_blocksizebits(res
->internal_format
)) {
1003 case 8: templ
.format
= PIPE_FORMAT_R8_UINT
; break;
1004 case 16: templ
.format
= PIPE_FORMAT_R8G8_UINT
; break;
1005 case 24: templ
.format
= PIPE_FORMAT_R8G8B8_UINT
; break;
1006 case 32: templ
.format
= PIPE_FORMAT_R8G8B8A8_UINT
; break;
1007 case 48: templ
.format
= PIPE_FORMAT_R16G16B16_UINT
; break;
1008 case 64: templ
.format
= PIPE_FORMAT_R16G16B16A16_UINT
; break;
1009 case 96: templ
.format
= PIPE_FORMAT_R32G32B32_UINT
; break;
1010 case 128: templ
.format
= PIPE_FORMAT_R32G32B32A32_UINT
; break;
1011 default: unreachable("Invalid bpb");
1014 map
->staging
= iris_resource_create(pscreen
, &templ
);
1015 assert(map
->staging
);
1017 if (templ
.target
!= PIPE_BUFFER
) {
1018 struct isl_surf
*surf
= &((struct iris_resource
*) map
->staging
)->surf
;
1019 xfer
->stride
= isl_surf_get_row_pitch_B(surf
);
1020 xfer
->layer_stride
= isl_surf_get_array_pitch(surf
);
1023 if (!(xfer
->usage
& PIPE_TRANSFER_DISCARD_RANGE
)) {
1024 iris_copy_region(map
->blorp
, map
->batch
, map
->staging
, 0, extra
, 0, 0,
1025 xfer
->resource
, xfer
->level
, box
);
1026 /* Ensure writes to the staging BO land before we map it below. */
1027 iris_emit_pipe_control_flush(map
->batch
,
1028 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
1029 PIPE_CONTROL_CS_STALL
);
1032 struct iris_bo
*staging_bo
= iris_resource_bo(map
->staging
);
1034 if (iris_batch_references(map
->batch
, staging_bo
))
1035 iris_batch_flush(map
->batch
);
1038 iris_bo_map(map
->dbg
, staging_bo
, xfer
->usage
& MAP_FLAGS
) + extra
;
1040 map
->unmap
= iris_unmap_copy_region
;
1044 get_image_offset_el(const struct isl_surf
*surf
, unsigned level
, unsigned z
,
1045 unsigned *out_x0_el
, unsigned *out_y0_el
)
1047 if (surf
->dim
== ISL_SURF_DIM_3D
) {
1048 isl_surf_get_image_offset_el(surf
, level
, 0, z
, out_x0_el
, out_y0_el
);
1050 isl_surf_get_image_offset_el(surf
, level
, z
, 0, out_x0_el
, out_y0_el
);
1055 * Get pointer offset into stencil buffer.
1057 * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
1058 * must decode the tile's layout in software.
1061 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
1063 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
1065 * Even though the returned offset is always positive, the return type is
1067 * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
1068 * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
1071 s8_offset(uint32_t stride
, uint32_t x
, uint32_t y
, bool swizzled
)
1073 uint32_t tile_size
= 4096;
1074 uint32_t tile_width
= 64;
1075 uint32_t tile_height
= 64;
1076 uint32_t row_size
= 64 * stride
/ 2; /* Two rows are interleaved. */
1078 uint32_t tile_x
= x
/ tile_width
;
1079 uint32_t tile_y
= y
/ tile_height
;
1081 /* The byte's address relative to the tile's base addres. */
1082 uint32_t byte_x
= x
% tile_width
;
1083 uint32_t byte_y
= y
% tile_height
;
1085 uintptr_t u
= tile_y
* row_size
1086 + tile_x
* tile_size
1087 + 512 * (byte_x
/ 8)
1089 + 32 * ((byte_y
/ 4) % 2)
1090 + 16 * ((byte_x
/ 4) % 2)
1091 + 8 * ((byte_y
/ 2) % 2)
1092 + 4 * ((byte_x
/ 2) % 2)
1097 /* adjust for bit6 swizzling */
1098 if (((byte_x
/ 8) % 2) == 1) {
1099 if (((byte_y
/ 8) % 2) == 0) {
1111 iris_unmap_s8(struct iris_transfer
*map
)
1113 struct pipe_transfer
*xfer
= &map
->base
;
1114 const struct pipe_box
*box
= &xfer
->box
;
1115 struct iris_resource
*res
= (struct iris_resource
*) xfer
->resource
;
1116 struct isl_surf
*surf
= &res
->surf
;
1117 const bool has_swizzling
= false;
1119 if (xfer
->usage
& PIPE_TRANSFER_WRITE
) {
1120 uint8_t *untiled_s8_map
= map
->ptr
;
1121 uint8_t *tiled_s8_map
=
1122 iris_bo_map(map
->dbg
, res
->bo
, (xfer
->usage
| MAP_RAW
) & MAP_FLAGS
);
1124 for (int s
= 0; s
< box
->depth
; s
++) {
1125 unsigned x0_el
, y0_el
;
1126 get_image_offset_el(surf
, xfer
->level
, box
->z
+ s
, &x0_el
, &y0_el
);
1128 for (uint32_t y
= 0; y
< box
->height
; y
++) {
1129 for (uint32_t x
= 0; x
< box
->width
; x
++) {
1130 ptrdiff_t offset
= s8_offset(surf
->row_pitch_B
,
1134 tiled_s8_map
[offset
] =
1135 untiled_s8_map
[s
* xfer
->layer_stride
+ y
* xfer
->stride
+ x
];
1145 iris_map_s8(struct iris_transfer
*map
)
1147 struct pipe_transfer
*xfer
= &map
->base
;
1148 const struct pipe_box
*box
= &xfer
->box
;
1149 struct iris_resource
*res
= (struct iris_resource
*) xfer
->resource
;
1150 struct isl_surf
*surf
= &res
->surf
;
1152 xfer
->stride
= surf
->row_pitch_B
;
1153 xfer
->layer_stride
= xfer
->stride
* box
->height
;
1155 /* The tiling and detiling functions require that the linear buffer has
1156 * a 16-byte alignment (that is, its `x0` is 16-byte aligned). Here we
1157 * over-allocate the linear buffer to get the proper alignment.
1159 map
->buffer
= map
->ptr
= malloc(xfer
->layer_stride
* box
->depth
);
1160 assert(map
->buffer
);
1162 const bool has_swizzling
= false;
1164 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
1165 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
1166 * invalidate is set, since we'll be writing the whole rectangle from our
1167 * temporary buffer back out.
1169 if (!(xfer
->usage
& PIPE_TRANSFER_DISCARD_RANGE
)) {
1170 uint8_t *untiled_s8_map
= map
->ptr
;
1171 uint8_t *tiled_s8_map
=
1172 iris_bo_map(map
->dbg
, res
->bo
, (xfer
->usage
| MAP_RAW
) & MAP_FLAGS
);
1174 for (int s
= 0; s
< box
->depth
; s
++) {
1175 unsigned x0_el
, y0_el
;
1176 get_image_offset_el(surf
, xfer
->level
, box
->z
+ s
, &x0_el
, &y0_el
);
1178 for (uint32_t y
= 0; y
< box
->height
; y
++) {
1179 for (uint32_t x
= 0; x
< box
->width
; x
++) {
1180 ptrdiff_t offset
= s8_offset(surf
->row_pitch_B
,
1184 untiled_s8_map
[s
* xfer
->layer_stride
+ y
* xfer
->stride
+ x
] =
1185 tiled_s8_map
[offset
];
1191 map
->unmap
= iris_unmap_s8
;
1194 /* Compute extent parameters for use with tiled_memcpy functions.
1195 * xs are in units of bytes and ys are in units of strides.
1198 tile_extents(const struct isl_surf
*surf
,
1199 const struct pipe_box
*box
,
1200 unsigned level
, int z
,
1201 unsigned *x1_B
, unsigned *x2_B
,
1202 unsigned *y1_el
, unsigned *y2_el
)
1204 const struct isl_format_layout
*fmtl
= isl_format_get_layout(surf
->format
);
1205 const unsigned cpp
= fmtl
->bpb
/ 8;
1207 assert(box
->x
% fmtl
->bw
== 0);
1208 assert(box
->y
% fmtl
->bh
== 0);
1210 unsigned x0_el
, y0_el
;
1211 get_image_offset_el(surf
, level
, box
->z
+ z
, &x0_el
, &y0_el
);
1213 *x1_B
= (box
->x
/ fmtl
->bw
+ x0_el
) * cpp
;
1214 *y1_el
= box
->y
/ fmtl
->bh
+ y0_el
;
1215 *x2_B
= (DIV_ROUND_UP(box
->x
+ box
->width
, fmtl
->bw
) + x0_el
) * cpp
;
1216 *y2_el
= DIV_ROUND_UP(box
->y
+ box
->height
, fmtl
->bh
) + y0_el
;
1220 iris_unmap_tiled_memcpy(struct iris_transfer
*map
)
1222 struct pipe_transfer
*xfer
= &map
->base
;
1223 const struct pipe_box
*box
= &xfer
->box
;
1224 struct iris_resource
*res
= (struct iris_resource
*) xfer
->resource
;
1225 struct isl_surf
*surf
= &res
->surf
;
1227 const bool has_swizzling
= false;
1229 if (xfer
->usage
& PIPE_TRANSFER_WRITE
) {
1231 iris_bo_map(map
->dbg
, res
->bo
, (xfer
->usage
| MAP_RAW
) & MAP_FLAGS
);
1233 for (int s
= 0; s
< box
->depth
; s
++) {
1234 unsigned x1
, x2
, y1
, y2
;
1235 tile_extents(surf
, box
, xfer
->level
, s
, &x1
, &x2
, &y1
, &y2
);
1237 void *ptr
= map
->ptr
+ s
* xfer
->layer_stride
;
1239 isl_memcpy_linear_to_tiled(x1
, x2
, y1
, y2
, dst
, ptr
,
1240 surf
->row_pitch_B
, xfer
->stride
,
1241 has_swizzling
, surf
->tiling
, ISL_MEMCPY
);
1244 os_free_aligned(map
->buffer
);
1245 map
->buffer
= map
->ptr
= NULL
;
1249 iris_map_tiled_memcpy(struct iris_transfer
*map
)
1251 struct pipe_transfer
*xfer
= &map
->base
;
1252 const struct pipe_box
*box
= &xfer
->box
;
1253 struct iris_resource
*res
= (struct iris_resource
*) xfer
->resource
;
1254 struct isl_surf
*surf
= &res
->surf
;
1256 xfer
->stride
= ALIGN(surf
->row_pitch_B
, 16);
1257 xfer
->layer_stride
= xfer
->stride
* box
->height
;
1259 unsigned x1
, x2
, y1
, y2
;
1260 tile_extents(surf
, box
, xfer
->level
, 0, &x1
, &x2
, &y1
, &y2
);
1262 /* The tiling and detiling functions require that the linear buffer has
1263 * a 16-byte alignment (that is, its `x0` is 16-byte aligned). Here we
1264 * over-allocate the linear buffer to get the proper alignment.
1267 os_malloc_aligned(xfer
->layer_stride
* box
->depth
, 16);
1268 assert(map
->buffer
);
1269 map
->ptr
= (char *)map
->buffer
+ (x1
& 0xf);
1271 const bool has_swizzling
= false;
1273 // XXX: PIPE_TRANSFER_READ?
1274 if (!(xfer
->usage
& PIPE_TRANSFER_DISCARD_RANGE
)) {
1276 iris_bo_map(map
->dbg
, res
->bo
, (xfer
->usage
| MAP_RAW
) & MAP_FLAGS
);
1278 for (int s
= 0; s
< box
->depth
; s
++) {
1279 unsigned x1
, x2
, y1
, y2
;
1280 tile_extents(surf
, box
, xfer
->level
, s
, &x1
, &x2
, &y1
, &y2
);
1282 /* Use 's' rather than 'box->z' to rebase the first slice to 0. */
1283 void *ptr
= map
->ptr
+ s
* xfer
->layer_stride
;
1285 isl_memcpy_tiled_to_linear(x1
, x2
, y1
, y2
, ptr
, src
, xfer
->stride
,
1286 surf
->row_pitch_B
, has_swizzling
,
1287 surf
->tiling
, ISL_MEMCPY_STREAMING_LOAD
);
1291 map
->unmap
= iris_unmap_tiled_memcpy
;
1295 iris_map_direct(struct iris_transfer
*map
)
1297 struct pipe_transfer
*xfer
= &map
->base
;
1298 struct pipe_box
*box
= &xfer
->box
;
1299 struct iris_resource
*res
= (struct iris_resource
*) xfer
->resource
;
1301 void *ptr
= iris_bo_map(map
->dbg
, res
->bo
, xfer
->usage
& MAP_FLAGS
);
1303 if (res
->base
.target
== PIPE_BUFFER
) {
1305 xfer
->layer_stride
= 0;
1307 map
->ptr
= ptr
+ box
->x
;
1309 struct isl_surf
*surf
= &res
->surf
;
1310 const struct isl_format_layout
*fmtl
=
1311 isl_format_get_layout(surf
->format
);
1312 const unsigned cpp
= fmtl
->bpb
/ 8;
1313 unsigned x0_el
, y0_el
;
1315 get_image_offset_el(surf
, xfer
->level
, box
->z
, &x0_el
, &y0_el
);
1317 xfer
->stride
= isl_surf_get_row_pitch_B(surf
);
1318 xfer
->layer_stride
= isl_surf_get_array_pitch(surf
);
1320 map
->ptr
= ptr
+ (y0_el
+ box
->y
) * xfer
->stride
+ (x0_el
+ box
->x
) * cpp
;
1325 can_promote_to_async(const struct iris_resource
*res
,
1326 const struct pipe_box
*box
,
1327 enum pipe_transfer_usage usage
)
1329 /* If we're writing to a section of the buffer that hasn't even been
1330 * initialized with useful data, then we can safely promote this write
1331 * to be unsynchronized. This helps the common pattern of appending data.
1333 return res
->base
.target
== PIPE_BUFFER
&& (usage
& PIPE_TRANSFER_WRITE
) &&
1334 !(usage
& TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED
) &&
1335 !util_ranges_intersect(&res
->valid_buffer_range
, box
->x
,
1336 box
->x
+ box
->width
);
1340 iris_transfer_map(struct pipe_context
*ctx
,
1341 struct pipe_resource
*resource
,
1343 enum pipe_transfer_usage usage
,
1344 const struct pipe_box
*box
,
1345 struct pipe_transfer
**ptransfer
)
1347 struct iris_context
*ice
= (struct iris_context
*)ctx
;
1348 struct iris_resource
*res
= (struct iris_resource
*)resource
;
1349 struct isl_surf
*surf
= &res
->surf
;
1351 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
1352 /* Replace the backing storage with a fresh buffer for non-async maps */
1353 if (!(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
|
1354 TC_TRANSFER_MAP_NO_INVALIDATE
)))
1355 iris_invalidate_resource(ctx
, resource
);
1357 /* If we can discard the whole resource, we can discard the range. */
1358 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
1361 bool map_would_stall
= false;
1363 if (resource
->target
!= PIPE_BUFFER
) {
1364 iris_resource_access_raw(ice
, &ice
->batches
[IRIS_BATCH_RENDER
], res
,
1365 level
, box
->z
, box
->depth
,
1366 usage
& PIPE_TRANSFER_WRITE
);
1369 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) &&
1370 can_promote_to_async(res
, box
, usage
)) {
1371 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
1374 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
1375 map_would_stall
= resource_is_busy(ice
, res
);
1377 if (map_would_stall
&& (usage
& PIPE_TRANSFER_DONTBLOCK
) &&
1378 (usage
& PIPE_TRANSFER_MAP_DIRECTLY
))
1382 if (surf
->tiling
!= ISL_TILING_LINEAR
&&
1383 (usage
& PIPE_TRANSFER_MAP_DIRECTLY
))
1386 struct iris_transfer
*map
= slab_alloc(&ice
->transfer_pool
);
1387 struct pipe_transfer
*xfer
= &map
->base
;
1392 memset(map
, 0, sizeof(*map
));
1393 map
->dbg
= &ice
->dbg
;
1395 pipe_resource_reference(&xfer
->resource
, resource
);
1396 xfer
->level
= level
;
1397 xfer
->usage
= usage
;
1401 if (usage
& PIPE_TRANSFER_WRITE
)
1402 util_range_add(&res
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
);
1404 /* Avoid using GPU copies for persistent/coherent buffers, as the idea
1405 * there is to access them simultaneously on the CPU & GPU. This also
1406 * avoids trying to use GPU copies for our u_upload_mgr buffers which
1407 * contain state we're constructing for a GPU draw call, which would
1408 * kill us with infinite stack recursion.
1410 bool no_gpu
= usage
& (PIPE_TRANSFER_PERSISTENT
|
1411 PIPE_TRANSFER_COHERENT
|
1412 PIPE_TRANSFER_MAP_DIRECTLY
);
1414 /* GPU copies are not useful for buffer reads. Instead of stalling to
1415 * read from the original buffer, we'd simply copy it to a temporary...
1416 * then stall (a bit longer) to read from that buffer.
1418 * Images are less clear-cut. Color resolves are destructive, removing
1419 * the underlying compression, so we'd rather blit the data to a linear
1420 * temporary and map that, to avoid the resolve. (It might be better to
1421 * a tiled temporary and use the tiled_memcpy paths...)
1423 if (!(usage
& PIPE_TRANSFER_DISCARD_RANGE
) &&
1424 res
->aux
.usage
!= ISL_AUX_USAGE_CCS_E
&&
1425 res
->aux
.usage
!= ISL_AUX_USAGE_CCS_D
) {
1429 if ((map_would_stall
|| res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
) && !no_gpu
) {
1430 /* If we need a synchronous mapping and the resource is busy,
1431 * we copy to/from a linear temporary buffer using the GPU.
1433 map
->batch
= &ice
->batches
[IRIS_BATCH_RENDER
];
1434 map
->blorp
= &ice
->blorp
;
1435 iris_map_copy_region(map
);
1437 /* Otherwise we're free to map on the CPU. Flush if needed. */
1438 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
1439 for (int i
= 0; i
< IRIS_BATCH_COUNT
; i
++) {
1440 if (iris_batch_references(&ice
->batches
[i
], res
->bo
))
1441 iris_batch_flush(&ice
->batches
[i
]);
1445 if (surf
->tiling
== ISL_TILING_W
) {
1446 /* TODO: Teach iris_map_tiled_memcpy about W-tiling... */
1448 } else if (surf
->tiling
!= ISL_TILING_LINEAR
) {
1449 iris_map_tiled_memcpy(map
);
1451 iris_map_direct(map
);
1459 iris_transfer_flush_region(struct pipe_context
*ctx
,
1460 struct pipe_transfer
*xfer
,
1461 const struct pipe_box
*box
)
1463 struct iris_context
*ice
= (struct iris_context
*)ctx
;
1464 struct iris_resource
*res
= (struct iris_resource
*) xfer
->resource
;
1465 struct iris_transfer
*map
= (void *) xfer
;
1468 iris_flush_staging_region(xfer
, box
);
1470 for (int i
= 0; i
< IRIS_BATCH_COUNT
; i
++) {
1471 if (ice
->batches
[i
].contains_draw
||
1472 ice
->batches
[i
].cache
.render
->entries
) {
1473 iris_batch_maybe_flush(&ice
->batches
[i
], 24);
1474 iris_flush_and_dirty_for_history(ice
, &ice
->batches
[i
], res
);
1478 /* Make sure we flag constants dirty even if there's no need to emit
1479 * any PIPE_CONTROLs to a batch.
1481 iris_dirty_for_history(ice
, res
);
1485 iris_transfer_unmap(struct pipe_context
*ctx
, struct pipe_transfer
*xfer
)
1487 struct iris_context
*ice
= (struct iris_context
*)ctx
;
1488 struct iris_transfer
*map
= (void *) xfer
;
1490 if (!(xfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
)) {
1491 struct pipe_box flush_box
= {
1492 .x
= 0, .y
= 0, .z
= 0,
1493 .width
= xfer
->box
.width
,
1494 .height
= xfer
->box
.height
,
1495 .depth
= xfer
->box
.depth
,
1497 iris_transfer_flush_region(ctx
, xfer
, &flush_box
);
1503 pipe_resource_reference(&xfer
->resource
, NULL
);
1504 slab_free(&ice
->transfer_pool
, map
);
1508 * Mark state dirty that needs to be re-emitted when a resource is written.
1511 iris_dirty_for_history(struct iris_context
*ice
,
1512 struct iris_resource
*res
)
1514 uint64_t dirty
= 0ull;
1516 if (res
->bind_history
& PIPE_BIND_CONSTANT_BUFFER
) {
1517 dirty
|= IRIS_DIRTY_CONSTANTS_VS
|
1518 IRIS_DIRTY_CONSTANTS_TCS
|
1519 IRIS_DIRTY_CONSTANTS_TES
|
1520 IRIS_DIRTY_CONSTANTS_GS
|
1521 IRIS_DIRTY_CONSTANTS_FS
|
1522 IRIS_DIRTY_CONSTANTS_CS
|
1523 IRIS_ALL_DIRTY_BINDINGS
;
1526 ice
->state
.dirty
|= dirty
;
1530 * Produce a set of PIPE_CONTROL bits which ensure data written to a
1531 * resource becomes visible, and any stale read cache data is invalidated.
1534 iris_flush_bits_for_history(struct iris_resource
*res
)
1536 uint32_t flush
= PIPE_CONTROL_CS_STALL
;
1538 if (res
->bind_history
& PIPE_BIND_CONSTANT_BUFFER
) {
1539 flush
|= PIPE_CONTROL_CONST_CACHE_INVALIDATE
|
1540 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
1543 if (res
->bind_history
& PIPE_BIND_SAMPLER_VIEW
)
1544 flush
|= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
1546 if (res
->bind_history
& (PIPE_BIND_VERTEX_BUFFER
| PIPE_BIND_INDEX_BUFFER
))
1547 flush
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
;
1549 if (res
->bind_history
& (PIPE_BIND_SHADER_BUFFER
| PIPE_BIND_SHADER_IMAGE
))
1550 flush
|= PIPE_CONTROL_DATA_CACHE_FLUSH
;
1556 iris_flush_and_dirty_for_history(struct iris_context
*ice
,
1557 struct iris_batch
*batch
,
1558 struct iris_resource
*res
)
1560 if (res
->base
.target
!= PIPE_BUFFER
)
1563 uint32_t flush
= iris_flush_bits_for_history(res
);
1565 /* We've likely used the rendering engine (i.e. BLORP) to write to this
1566 * surface. Flush the render cache so the data actually lands.
1568 if (batch
->name
!= IRIS_BATCH_COMPUTE
)
1569 flush
|= PIPE_CONTROL_RENDER_TARGET_FLUSH
;
1571 iris_emit_pipe_control_flush(batch
, flush
);
1575 iris_resource_set_clear_color(struct iris_context
*ice
,
1576 struct iris_resource
*res
,
1577 union isl_color_value color
)
1579 if (memcmp(&res
->aux
.clear_color
, &color
, sizeof(color
)) != 0) {
1580 res
->aux
.clear_color
= color
;
1587 union isl_color_value
1588 iris_resource_get_clear_color(const struct iris_resource
*res
,
1589 struct iris_bo
**clear_color_bo
,
1590 uint64_t *clear_color_offset
)
1592 assert(res
->aux
.bo
);
1595 *clear_color_bo
= res
->aux
.clear_color_bo
;
1596 if (clear_color_offset
)
1597 *clear_color_offset
= res
->aux
.clear_color_offset
;
1598 return res
->aux
.clear_color
;
1601 static enum pipe_format
1602 iris_resource_get_internal_format(struct pipe_resource
*p_res
)
1604 struct iris_resource
*res
= (void *) p_res
;
1605 return res
->internal_format
;
1608 static const struct u_transfer_vtbl transfer_vtbl
= {
1609 .resource_create
= iris_resource_create
,
1610 .resource_destroy
= iris_resource_destroy
,
1611 .transfer_map
= iris_transfer_map
,
1612 .transfer_unmap
= iris_transfer_unmap
,
1613 .transfer_flush_region
= iris_transfer_flush_region
,
1614 .get_internal_format
= iris_resource_get_internal_format
,
1615 .set_stencil
= iris_resource_set_separate_stencil
,
1616 .get_stencil
= iris_resource_get_separate_stencil
,
1620 iris_init_screen_resource_functions(struct pipe_screen
*pscreen
)
1622 pscreen
->query_dmabuf_modifiers
= iris_query_dmabuf_modifiers
;
1623 pscreen
->resource_create_with_modifiers
=
1624 iris_resource_create_with_modifiers
;
1625 pscreen
->resource_create
= u_transfer_helper_resource_create
;
1626 pscreen
->resource_from_user_memory
= iris_resource_from_user_memory
;
1627 pscreen
->resource_from_handle
= iris_resource_from_handle
;
1628 pscreen
->resource_get_handle
= iris_resource_get_handle
;
1629 pscreen
->resource_destroy
= u_transfer_helper_resource_destroy
;
1630 pscreen
->transfer_helper
=
1631 u_transfer_helper_create(&transfer_vtbl
, true, true, false, true);
1635 iris_init_resource_functions(struct pipe_context
*ctx
)
1637 ctx
->flush_resource
= iris_flush_resource
;
1638 ctx
->invalidate_resource
= iris_invalidate_resource
;
1639 ctx
->transfer_map
= u_transfer_helper_transfer_map
;
1640 ctx
->transfer_flush_region
= u_transfer_helper_transfer_flush_region
;
1641 ctx
->transfer_unmap
= u_transfer_helper_transfer_unmap
;
1642 ctx
->buffer_subdata
= u_default_buffer_subdata
;
1643 ctx
->texture_subdata
= u_default_texture_subdata
;