2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "main/context.h"
25 #include "main/teximage.h"
26 #include "main/blend.h"
27 #include "main/bufferobj.h"
28 #include "main/enums.h"
29 #include "main/fbobject.h"
30 #include "main/image.h"
31 #include "main/renderbuffer.h"
32 #include "main/glformats.h"
34 #include "brw_blorp.h"
35 #include "brw_context.h"
36 #include "brw_defines.h"
37 #include "brw_meta_util.h"
38 #include "brw_state.h"
39 #include "intel_buffer_objects.h"
40 #include "intel_fbo.h"
41 #include "common/gen_debug.h"
43 #define FILE_DEBUG_FLAG DEBUG_BLORP
46 brw_blorp_lookup_shader(struct blorp_context
*blorp
,
47 const void *key
, uint32_t key_size
,
48 uint32_t *kernel_out
, void *prog_data_out
)
50 struct brw_context
*brw
= blorp
->driver_ctx
;
51 return brw_search_cache(&brw
->cache
, BRW_CACHE_BLORP_PROG
,
52 key
, key_size
, kernel_out
, prog_data_out
);
56 brw_blorp_upload_shader(struct blorp_context
*blorp
,
57 const void *key
, uint32_t key_size
,
58 const void *kernel
, uint32_t kernel_size
,
59 const struct brw_stage_prog_data
*prog_data
,
60 uint32_t prog_data_size
,
61 uint32_t *kernel_out
, void *prog_data_out
)
63 struct brw_context
*brw
= blorp
->driver_ctx
;
64 brw_upload_cache(&brw
->cache
, BRW_CACHE_BLORP_PROG
, key
, key_size
,
65 kernel
, kernel_size
, prog_data
, prog_data_size
,
66 kernel_out
, prog_data_out
);
71 brw_blorp_init(struct brw_context
*brw
)
73 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
75 blorp_init(&brw
->blorp
, brw
, &brw
->isl_dev
);
77 brw
->blorp
.compiler
= brw
->screen
->compiler
;
79 switch (devinfo
->gen
) {
81 if (devinfo
->is_g4x
) {
82 brw
->blorp
.exec
= gen45_blorp_exec
;
84 brw
->blorp
.exec
= gen4_blorp_exec
;
88 brw
->blorp
.exec
= gen5_blorp_exec
;
91 brw
->blorp
.exec
= gen6_blorp_exec
;
94 if (devinfo
->is_haswell
) {
95 brw
->blorp
.exec
= gen75_blorp_exec
;
97 brw
->blorp
.exec
= gen7_blorp_exec
;
101 brw
->blorp
.exec
= gen8_blorp_exec
;
104 brw
->blorp
.exec
= gen9_blorp_exec
;
107 brw
->blorp
.exec
= gen10_blorp_exec
;
110 brw
->blorp
.exec
= gen11_blorp_exec
;
114 unreachable("Invalid gen");
117 brw
->blorp
.lookup_shader
= brw_blorp_lookup_shader
;
118 brw
->blorp
.upload_shader
= brw_blorp_upload_shader
;
122 blorp_surf_for_miptree(struct brw_context
*brw
,
123 struct blorp_surf
*surf
,
124 struct intel_mipmap_tree
*mt
,
125 enum isl_aux_usage aux_usage
,
126 bool is_render_target
,
128 unsigned start_layer
, unsigned num_layers
,
129 struct isl_surf tmp_surfs
[1])
131 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
133 if (mt
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_ARRAY
) {
134 const unsigned num_samples
= mt
->surf
.samples
;
135 for (unsigned i
= 0; i
< num_layers
; i
++) {
136 for (unsigned s
= 0; s
< num_samples
; s
++) {
137 const unsigned phys_layer
= (start_layer
+ i
) * num_samples
+ s
;
138 intel_miptree_check_level_layer(mt
, *level
, phys_layer
);
142 for (unsigned i
= 0; i
< num_layers
; i
++)
143 intel_miptree_check_level_layer(mt
, *level
, start_layer
+ i
);
146 *surf
= (struct blorp_surf
) {
148 .addr
= (struct blorp_address
) {
150 .offset
= mt
->offset
,
151 .reloc_flags
= is_render_target
? EXEC_OBJECT_WRITE
: 0,
152 .mocs
= brw_get_bo_mocs(devinfo
, mt
->bo
),
154 .aux_usage
= aux_usage
,
157 if (mt
->format
== MESA_FORMAT_S_UINT8
&& is_render_target
&&
159 mt
->r8stencil_needs_update
= true;
161 if (surf
->aux_usage
== ISL_AUX_USAGE_HIZ
&&
162 !intel_miptree_level_has_hiz(mt
, *level
))
163 surf
->aux_usage
= ISL_AUX_USAGE_NONE
;
165 if (surf
->aux_usage
!= ISL_AUX_USAGE_NONE
) {
166 /* We only really need a clear color if we also have an auxiliary
167 * surface. Without one, it does nothing.
169 surf
->clear_color
= mt
->fast_clear_color
;
171 surf
->aux_surf
= &mt
->aux_buf
->surf
;
172 surf
->aux_addr
= (struct blorp_address
) {
173 .reloc_flags
= is_render_target
? EXEC_OBJECT_WRITE
: 0,
174 .mocs
= surf
->addr
.mocs
,
177 surf
->aux_addr
.buffer
= mt
->aux_buf
->bo
;
178 surf
->aux_addr
.offset
= mt
->aux_buf
->offset
;
180 if (devinfo
->gen
>= 10) {
181 surf
->clear_color_addr
= (struct blorp_address
) {
182 .buffer
= mt
->aux_buf
->clear_color_bo
,
183 .offset
= mt
->aux_buf
->clear_color_offset
,
187 surf
->aux_addr
= (struct blorp_address
) {
190 memset(&surf
->clear_color
, 0, sizeof(surf
->clear_color
));
192 assert((surf
->aux_usage
== ISL_AUX_USAGE_NONE
) ==
193 (surf
->aux_addr
.buffer
== NULL
));
195 /* ISL wants real levels, not offset ones. */
196 *level
-= mt
->first_level
;
199 static enum isl_format
200 brw_blorp_to_isl_format(struct brw_context
*brw
, mesa_format format
,
201 bool is_render_target
)
204 case MESA_FORMAT_NONE
:
205 return ISL_FORMAT_UNSUPPORTED
;
206 case MESA_FORMAT_S_UINT8
:
207 return ISL_FORMAT_R8_UINT
;
208 case MESA_FORMAT_Z24_UNORM_X8_UINT
:
209 case MESA_FORMAT_Z24_UNORM_S8_UINT
:
210 return ISL_FORMAT_R24_UNORM_X8_TYPELESS
;
211 case MESA_FORMAT_Z_FLOAT32
:
212 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT
:
213 return ISL_FORMAT_R32_FLOAT
;
214 case MESA_FORMAT_Z_UNORM16
:
215 return ISL_FORMAT_R16_UNORM
;
217 if (is_render_target
) {
218 assert(brw
->mesa_format_supports_render
[format
]);
219 return brw
->mesa_to_isl_render_format
[format
];
221 return brw_isl_format_for_mesa_format(format
);
229 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
230 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
232 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
235 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
237 * which is simply adding 4 then modding by 8 (or anding with 7).
239 * We then may need to apply workarounds for textureGather hardware bugs.
241 static enum isl_channel_select
242 swizzle_to_scs(GLenum swizzle
)
244 return (enum isl_channel_select
)((swizzle
+ 4) & 7);
248 * Note: if the src (or dst) is a 2D multisample array texture on Gen7+ using
249 * INTEL_MSAA_LAYOUT_UMS or INTEL_MSAA_LAYOUT_CMS, src_layer (dst_layer) is
250 * the physical layer holding sample 0. So, for example, if
251 * src_mt->surf.samples == 4, then logical layer n corresponds to src_layer ==
255 brw_blorp_blit_miptrees(struct brw_context
*brw
,
256 struct intel_mipmap_tree
*src_mt
,
257 unsigned src_level
, unsigned src_layer
,
258 mesa_format src_format
, int src_swizzle
,
259 struct intel_mipmap_tree
*dst_mt
,
260 unsigned dst_level
, unsigned dst_layer
,
261 mesa_format dst_format
,
262 float src_x0
, float src_y0
,
263 float src_x1
, float src_y1
,
264 float dst_x0
, float dst_y0
,
265 float dst_x1
, float dst_y1
,
266 GLenum filter
, bool mirror_x
, bool mirror_y
,
267 bool decode_srgb
, bool encode_srgb
)
269 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
271 DBG("%s from %dx %s mt %p %d %d (%f,%f) (%f,%f)"
272 "to %dx %s mt %p %d %d (%f,%f) (%f,%f) (flip %d,%d)\n",
274 src_mt
->surf
.samples
, _mesa_get_format_name(src_mt
->format
), src_mt
,
275 src_level
, src_layer
, src_x0
, src_y0
, src_x1
, src_y1
,
276 dst_mt
->surf
.samples
, _mesa_get_format_name(dst_mt
->format
), dst_mt
,
277 dst_level
, dst_layer
, dst_x0
, dst_y0
, dst_x1
, dst_y1
,
280 if (!decode_srgb
&& _mesa_get_format_color_encoding(src_format
) == GL_SRGB
)
281 src_format
= _mesa_get_srgb_format_linear(src_format
);
283 if (!encode_srgb
&& _mesa_get_format_color_encoding(dst_format
) == GL_SRGB
)
284 dst_format
= _mesa_get_srgb_format_linear(dst_format
);
286 /* When doing a multisample resolve of a GL_LUMINANCE32F or GL_INTENSITY32F
287 * texture, the above code configures the source format for L32_FLOAT or
288 * I32_FLOAT, and the destination format for R32_FLOAT. On Sandy Bridge,
289 * the SAMPLE message appears to handle multisampled L32_FLOAT and
290 * I32_FLOAT textures incorrectly, resulting in blocky artifacts. So work
291 * around the problem by using a source format of R32_FLOAT. This
292 * shouldn't affect rendering correctness, since the destination format is
293 * R32_FLOAT, so only the contents of the red channel matters.
295 if (devinfo
->gen
== 6 &&
296 src_mt
->surf
.samples
> 1 && dst_mt
->surf
.samples
<= 1 &&
297 src_mt
->format
== dst_mt
->format
&&
298 (dst_format
== MESA_FORMAT_L_FLOAT32
||
299 dst_format
== MESA_FORMAT_I_FLOAT32
)) {
300 src_format
= dst_format
= MESA_FORMAT_R_FLOAT32
;
303 enum isl_format src_isl_format
=
304 brw_blorp_to_isl_format(brw
, src_format
, false);
305 enum isl_aux_usage src_aux_usage
=
306 intel_miptree_texture_aux_usage(brw
, src_mt
, src_isl_format
);
307 /* We do format workarounds for some depth formats so we can't reliably
308 * sample with HiZ. One of these days, we should fix that.
310 if (src_aux_usage
== ISL_AUX_USAGE_HIZ
)
311 src_aux_usage
= ISL_AUX_USAGE_NONE
;
312 const bool src_clear_supported
=
313 src_aux_usage
!= ISL_AUX_USAGE_NONE
&& src_mt
->format
== src_format
;
314 intel_miptree_prepare_access(brw
, src_mt
, src_level
, 1, src_layer
, 1,
315 src_aux_usage
, src_clear_supported
);
317 enum isl_format dst_isl_format
=
318 brw_blorp_to_isl_format(brw
, dst_format
, true);
319 enum isl_aux_usage dst_aux_usage
=
320 intel_miptree_render_aux_usage(brw
, dst_mt
, dst_isl_format
,
322 const bool dst_clear_supported
= dst_aux_usage
!= ISL_AUX_USAGE_NONE
;
323 intel_miptree_prepare_access(brw
, dst_mt
, dst_level
, 1, dst_layer
, 1,
324 dst_aux_usage
, dst_clear_supported
);
326 struct isl_surf tmp_surfs
[2];
327 struct blorp_surf src_surf
, dst_surf
;
328 blorp_surf_for_miptree(brw
, &src_surf
, src_mt
, src_aux_usage
, false,
329 &src_level
, src_layer
, 1, &tmp_surfs
[0]);
330 blorp_surf_for_miptree(brw
, &dst_surf
, dst_mt
, dst_aux_usage
, true,
331 &dst_level
, dst_layer
, 1, &tmp_surfs
[1]);
333 struct isl_swizzle src_isl_swizzle
= {
334 .r
= swizzle_to_scs(GET_SWZ(src_swizzle
, 0)),
335 .g
= swizzle_to_scs(GET_SWZ(src_swizzle
, 1)),
336 .b
= swizzle_to_scs(GET_SWZ(src_swizzle
, 2)),
337 .a
= swizzle_to_scs(GET_SWZ(src_swizzle
, 3)),
340 struct blorp_batch batch
;
341 blorp_batch_init(&brw
->blorp
, &batch
, brw
, 0);
342 blorp_blit(&batch
, &src_surf
, src_level
, src_layer
,
343 src_isl_format
, src_isl_swizzle
,
344 &dst_surf
, dst_level
, dst_layer
,
345 dst_isl_format
, ISL_SWIZZLE_IDENTITY
,
346 src_x0
, src_y0
, src_x1
, src_y1
,
347 dst_x0
, dst_y0
, dst_x1
, dst_y1
,
348 filter
, mirror_x
, mirror_y
);
349 blorp_batch_finish(&batch
);
351 intel_miptree_finish_write(brw
, dst_mt
, dst_level
, dst_layer
, 1,
356 brw_blorp_copy_miptrees(struct brw_context
*brw
,
357 struct intel_mipmap_tree
*src_mt
,
358 unsigned src_level
, unsigned src_layer
,
359 struct intel_mipmap_tree
*dst_mt
,
360 unsigned dst_level
, unsigned dst_layer
,
361 unsigned src_x
, unsigned src_y
,
362 unsigned dst_x
, unsigned dst_y
,
363 unsigned src_width
, unsigned src_height
)
365 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
367 DBG("%s from %dx %s mt %p %d %d (%d,%d) %dx%d"
368 "to %dx %s mt %p %d %d (%d,%d)\n",
370 src_mt
->surf
.samples
, _mesa_get_format_name(src_mt
->format
), src_mt
,
371 src_level
, src_layer
, src_x
, src_y
, src_width
, src_height
,
372 dst_mt
->surf
.samples
, _mesa_get_format_name(dst_mt
->format
), dst_mt
,
373 dst_level
, dst_layer
, dst_x
, dst_y
);
375 enum isl_aux_usage src_aux_usage
, dst_aux_usage
;
376 bool src_clear_supported
, dst_clear_supported
;
378 switch (src_mt
->aux_usage
) {
379 case ISL_AUX_USAGE_MCS
:
380 case ISL_AUX_USAGE_CCS_E
:
381 src_aux_usage
= src_mt
->aux_usage
;
382 /* Prior to gen9, fast-clear only supported 0/1 clear colors. Since
383 * we're going to re-interpret the format as an integer format possibly
384 * with a different number of components, we can't handle clear colors
387 src_clear_supported
= devinfo
->gen
>= 9;
390 src_aux_usage
= ISL_AUX_USAGE_NONE
;
391 src_clear_supported
= false;
395 switch (dst_mt
->aux_usage
) {
396 case ISL_AUX_USAGE_MCS
:
397 case ISL_AUX_USAGE_CCS_E
:
398 dst_aux_usage
= dst_mt
->aux_usage
;
399 /* Prior to gen9, fast-clear only supported 0/1 clear colors. Since
400 * we're going to re-interpret the format as an integer format possibly
401 * with a different number of components, we can't handle clear colors
404 dst_clear_supported
= devinfo
->gen
>= 9;
407 dst_aux_usage
= ISL_AUX_USAGE_NONE
;
408 dst_clear_supported
= false;
412 intel_miptree_prepare_access(brw
, src_mt
, src_level
, 1, src_layer
, 1,
413 src_aux_usage
, src_clear_supported
);
414 intel_miptree_prepare_access(brw
, dst_mt
, dst_level
, 1, dst_layer
, 1,
415 dst_aux_usage
, dst_clear_supported
);
417 struct isl_surf tmp_surfs
[2];
418 struct blorp_surf src_surf
, dst_surf
;
419 blorp_surf_for_miptree(brw
, &src_surf
, src_mt
, src_aux_usage
, false,
420 &src_level
, src_layer
, 1, &tmp_surfs
[0]);
421 blorp_surf_for_miptree(brw
, &dst_surf
, dst_mt
, dst_aux_usage
, true,
422 &dst_level
, dst_layer
, 1, &tmp_surfs
[1]);
424 /* The hardware seems to have issues with having a two different format
425 * views of the same texture in the sampler cache at the same time. It's
426 * unclear exactly what the issue is but it hurts glCopyImageSubData
427 * particularly badly because it does a lot of format reinterprets. We
428 * badly need better understanding of the issue and a better fix but this
429 * works for now and fixes CTS tests.
431 * TODO: Remove this hack!
433 brw_emit_pipe_control_flush(brw
, PIPE_CONTROL_CS_STALL
|
434 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
);
436 struct blorp_batch batch
;
437 blorp_batch_init(&brw
->blorp
, &batch
, brw
, 0);
438 blorp_copy(&batch
, &src_surf
, src_level
, src_layer
,
439 &dst_surf
, dst_level
, dst_layer
,
440 src_x
, src_y
, dst_x
, dst_y
, src_width
, src_height
);
441 blorp_batch_finish(&batch
);
443 brw_emit_pipe_control_flush(brw
, PIPE_CONTROL_CS_STALL
|
444 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
);
446 intel_miptree_finish_write(brw
, dst_mt
, dst_level
, dst_layer
, 1,
451 brw_blorp_copy_buffers(struct brw_context
*brw
,
452 struct brw_bo
*src_bo
,
454 struct brw_bo
*dst_bo
,
458 DBG("%s %d bytes from %p[%d] to %p[%d]",
459 __func__
, size
, src_bo
, src_offset
, dst_bo
, dst_offset
);
461 struct blorp_batch batch
;
462 struct blorp_address src
= { .buffer
= src_bo
, .offset
= src_offset
};
463 struct blorp_address dst
= { .buffer
= dst_bo
, .offset
= dst_offset
};
465 blorp_batch_init(&brw
->blorp
, &batch
, brw
, 0);
466 blorp_buffer_copy(&batch
, src
, dst
, size
);
467 blorp_batch_finish(&batch
);
471 static struct intel_mipmap_tree
*
472 find_miptree(GLbitfield buffer_bit
, struct intel_renderbuffer
*irb
)
474 struct intel_mipmap_tree
*mt
= irb
->mt
;
475 if (buffer_bit
== GL_STENCIL_BUFFER_BIT
&& mt
->stencil_mt
)
481 blorp_get_texture_swizzle(const struct intel_renderbuffer
*irb
)
483 return irb
->Base
.Base
._BaseFormat
== GL_RGB
?
484 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_ONE
) :
489 do_blorp_blit(struct brw_context
*brw
, GLbitfield buffer_bit
,
490 struct intel_renderbuffer
*src_irb
, mesa_format src_format
,
491 struct intel_renderbuffer
*dst_irb
, mesa_format dst_format
,
492 GLfloat srcX0
, GLfloat srcY0
, GLfloat srcX1
, GLfloat srcY1
,
493 GLfloat dstX0
, GLfloat dstY0
, GLfloat dstX1
, GLfloat dstY1
,
494 GLenum filter
, bool mirror_x
, bool mirror_y
)
496 const struct gl_context
*ctx
= &brw
->ctx
;
498 /* Find source/dst miptrees */
499 struct intel_mipmap_tree
*src_mt
= find_miptree(buffer_bit
, src_irb
);
500 struct intel_mipmap_tree
*dst_mt
= find_miptree(buffer_bit
, dst_irb
);
502 const bool do_srgb
= ctx
->Color
.sRGBEnabled
;
505 brw_blorp_blit_miptrees(brw
,
506 src_mt
, src_irb
->mt_level
, src_irb
->mt_layer
,
507 src_format
, blorp_get_texture_swizzle(src_irb
),
508 dst_mt
, dst_irb
->mt_level
, dst_irb
->mt_layer
,
510 srcX0
, srcY0
, srcX1
, srcY1
,
511 dstX0
, dstY0
, dstX1
, dstY1
,
512 filter
, mirror_x
, mirror_y
,
515 dst_irb
->need_downsample
= true;
519 try_blorp_blit(struct brw_context
*brw
,
520 const struct gl_framebuffer
*read_fb
,
521 const struct gl_framebuffer
*draw_fb
,
522 GLfloat srcX0
, GLfloat srcY0
, GLfloat srcX1
, GLfloat srcY1
,
523 GLfloat dstX0
, GLfloat dstY0
, GLfloat dstX1
, GLfloat dstY1
,
524 GLenum filter
, GLbitfield buffer_bit
)
526 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
527 struct gl_context
*ctx
= &brw
->ctx
;
529 /* Sync up the state of window system buffers. We need to do this before
530 * we go looking for the buffers.
532 intel_prepare_render(brw
);
534 bool mirror_x
, mirror_y
;
535 if (brw_meta_mirror_clip_and_scissor(ctx
, read_fb
, draw_fb
,
536 &srcX0
, &srcY0
, &srcX1
, &srcY1
,
537 &dstX0
, &dstY0
, &dstX1
, &dstY1
,
538 &mirror_x
, &mirror_y
))
542 struct intel_renderbuffer
*src_irb
;
543 struct intel_renderbuffer
*dst_irb
;
544 struct intel_mipmap_tree
*src_mt
;
545 struct intel_mipmap_tree
*dst_mt
;
546 switch (buffer_bit
) {
547 case GL_COLOR_BUFFER_BIT
:
548 src_irb
= intel_renderbuffer(read_fb
->_ColorReadBuffer
);
549 for (unsigned i
= 0; i
< draw_fb
->_NumColorDrawBuffers
; ++i
) {
550 dst_irb
= intel_renderbuffer(draw_fb
->_ColorDrawBuffers
[i
]);
552 do_blorp_blit(brw
, buffer_bit
,
553 src_irb
, src_irb
->Base
.Base
.Format
,
554 dst_irb
, dst_irb
->Base
.Base
.Format
,
555 srcX0
, srcY0
, srcX1
, srcY1
,
556 dstX0
, dstY0
, dstX1
, dstY1
,
557 filter
, mirror_x
, mirror_y
);
560 case GL_DEPTH_BUFFER_BIT
:
562 intel_renderbuffer(read_fb
->Attachment
[BUFFER_DEPTH
].Renderbuffer
);
564 intel_renderbuffer(draw_fb
->Attachment
[BUFFER_DEPTH
].Renderbuffer
);
565 src_mt
= find_miptree(buffer_bit
, src_irb
);
566 dst_mt
= find_miptree(buffer_bit
, dst_irb
);
568 /* We can't handle format conversions between Z24 and other formats
569 * since we have to lie about the surface format. See the comments in
570 * brw_blorp_surface_info::set().
572 if ((src_mt
->format
== MESA_FORMAT_Z24_UNORM_X8_UINT
) !=
573 (dst_mt
->format
== MESA_FORMAT_Z24_UNORM_X8_UINT
))
576 /* We also can't handle any combined depth-stencil formats because we
577 * have to reinterpret as a color format.
579 if (_mesa_get_format_base_format(src_mt
->format
) == GL_DEPTH_STENCIL
||
580 _mesa_get_format_base_format(dst_mt
->format
) == GL_DEPTH_STENCIL
)
583 do_blorp_blit(brw
, buffer_bit
, src_irb
, MESA_FORMAT_NONE
,
584 dst_irb
, MESA_FORMAT_NONE
, srcX0
, srcY0
,
585 srcX1
, srcY1
, dstX0
, dstY0
, dstX1
, dstY1
,
586 filter
, mirror_x
, mirror_y
);
588 case GL_STENCIL_BUFFER_BIT
:
589 /* Blorp doesn't support combined depth stencil which is all we have
592 if (devinfo
->gen
< 6)
596 intel_renderbuffer(read_fb
->Attachment
[BUFFER_STENCIL
].Renderbuffer
);
598 intel_renderbuffer(draw_fb
->Attachment
[BUFFER_STENCIL
].Renderbuffer
);
599 do_blorp_blit(brw
, buffer_bit
, src_irb
, MESA_FORMAT_NONE
,
600 dst_irb
, MESA_FORMAT_NONE
, srcX0
, srcY0
,
601 srcX1
, srcY1
, dstX0
, dstY0
, dstX1
, dstY1
,
602 filter
, mirror_x
, mirror_y
);
605 unreachable("not reached");
612 apply_y_flip(int *y0
, int *y1
, int height
)
614 int tmp
= height
- *y0
;
620 brw_blorp_copytexsubimage(struct brw_context
*brw
,
621 struct gl_renderbuffer
*src_rb
,
622 struct gl_texture_image
*dst_image
,
624 int srcX0
, int srcY0
,
625 int dstX0
, int dstY0
,
626 int width
, int height
)
628 struct gl_context
*ctx
= &brw
->ctx
;
629 struct intel_renderbuffer
*src_irb
= intel_renderbuffer(src_rb
);
630 struct intel_texture_image
*intel_image
= intel_texture_image(dst_image
);
632 /* No pixel transfer operations (zoom, bias, mapping), just a blit */
633 if (brw
->ctx
._ImageTransferState
)
636 /* Sync up the state of window system buffers. We need to do this before
637 * we go looking at the src renderbuffer's miptree.
639 intel_prepare_render(brw
);
641 struct intel_mipmap_tree
*src_mt
= src_irb
->mt
;
642 struct intel_mipmap_tree
*dst_mt
= intel_image
->mt
;
644 /* There is support for only up to eight samples. */
645 if (src_mt
->surf
.samples
> 8 || dst_mt
->surf
.samples
> 8)
648 if (_mesa_get_format_base_format(src_rb
->Format
) !=
649 _mesa_get_format_base_format(dst_image
->TexFormat
)) {
653 /* We can't handle format conversions between Z24 and other formats since
654 * we have to lie about the surface format. See the comments in
655 * brw_blorp_surface_info::set().
657 if ((src_mt
->format
== MESA_FORMAT_Z24_UNORM_X8_UINT
) !=
658 (dst_mt
->format
== MESA_FORMAT_Z24_UNORM_X8_UINT
)) {
662 /* We also can't handle any combined depth-stencil formats because we
663 * have to reinterpret as a color format.
665 if (_mesa_get_format_base_format(src_mt
->format
) == GL_DEPTH_STENCIL
||
666 _mesa_get_format_base_format(dst_mt
->format
) == GL_DEPTH_STENCIL
)
669 if (!brw
->mesa_format_supports_render
[dst_image
->TexFormat
])
672 /* Source clipping shouldn't be necessary, since copytexsubimage (in
673 * src/mesa/main/teximage.c) calls _mesa_clip_copytexsubimage() which
676 * Destination clipping shouldn't be necessary since the restrictions on
677 * glCopyTexSubImage prevent the user from specifying a destination rectangle
678 * that falls outside the bounds of the destination texture.
679 * See error_check_subtexture_dimensions().
682 int srcY1
= srcY0
+ height
;
683 int srcX1
= srcX0
+ width
;
684 int dstX1
= dstX0
+ width
;
685 int dstY1
= dstY0
+ height
;
687 /* Account for the fact that in the system framebuffer, the origin is at
690 bool mirror_y
= _mesa_is_winsys_fbo(ctx
->ReadBuffer
);
692 apply_y_flip(&srcY0
, &srcY1
, src_rb
->Height
);
694 /* Account for face selection and texture view MinLayer */
695 int dst_slice
= slice
+ dst_image
->TexObject
->MinLayer
+ dst_image
->Face
;
696 int dst_level
= dst_image
->Level
+ dst_image
->TexObject
->MinLevel
;
698 brw_blorp_blit_miptrees(brw
,
699 src_mt
, src_irb
->mt_level
, src_irb
->mt_layer
,
700 src_rb
->Format
, blorp_get_texture_swizzle(src_irb
),
701 dst_mt
, dst_level
, dst_slice
,
702 dst_image
->TexFormat
,
703 srcX0
, srcY0
, srcX1
, srcY1
,
704 dstX0
, dstY0
, dstX1
, dstY1
,
705 GL_NEAREST
, false, mirror_y
,
708 /* If we're copying to a packed depth stencil texture and the source
709 * framebuffer has separate stencil, we need to also copy the stencil data
712 src_rb
= ctx
->ReadBuffer
->Attachment
[BUFFER_STENCIL
].Renderbuffer
;
713 if (_mesa_get_format_bits(dst_image
->TexFormat
, GL_STENCIL_BITS
) > 0 &&
715 src_irb
= intel_renderbuffer(src_rb
);
716 src_mt
= src_irb
->mt
;
718 if (src_mt
->stencil_mt
)
719 src_mt
= src_mt
->stencil_mt
;
720 if (dst_mt
->stencil_mt
)
721 dst_mt
= dst_mt
->stencil_mt
;
723 if (src_mt
!= dst_mt
) {
724 brw_blorp_blit_miptrees(brw
,
725 src_mt
, src_irb
->mt_level
, src_irb
->mt_layer
,
727 blorp_get_texture_swizzle(src_irb
),
728 dst_mt
, dst_level
, dst_slice
,
730 srcX0
, srcY0
, srcX1
, srcY1
,
731 dstX0
, dstY0
, dstX1
, dstY1
,
732 GL_NEAREST
, false, mirror_y
,
742 brw_blorp_framebuffer(struct brw_context
*brw
,
743 struct gl_framebuffer
*readFb
,
744 struct gl_framebuffer
*drawFb
,
745 GLint srcX0
, GLint srcY0
, GLint srcX1
, GLint srcY1
,
746 GLint dstX0
, GLint dstY0
, GLint dstX1
, GLint dstY1
,
747 GLbitfield mask
, GLenum filter
)
749 static GLbitfield buffer_bits
[] = {
752 GL_STENCIL_BUFFER_BIT
,
755 for (unsigned int i
= 0; i
< ARRAY_SIZE(buffer_bits
); ++i
) {
756 if ((mask
& buffer_bits
[i
]) &&
757 try_blorp_blit(brw
, readFb
, drawFb
,
758 srcX0
, srcY0
, srcX1
, srcY1
,
759 dstX0
, dstY0
, dstX1
, dstY1
,
760 filter
, buffer_bits
[i
])) {
761 mask
&= ~buffer_bits
[i
];
768 static struct brw_bo
*
769 blorp_get_client_bo(struct brw_context
*brw
,
770 unsigned w
, unsigned h
, unsigned d
,
771 GLenum target
, GLenum format
, GLenum type
,
773 const struct gl_pixelstore_attrib
*packing
,
774 uint32_t *offset_out
, uint32_t *row_stride_out
,
775 uint32_t *image_stride_out
, bool read_only
)
777 /* Account for SKIP_PIXELS, SKIP_ROWS, ALIGNMENT, and SKIP_IMAGES */
778 const GLuint dims
= _mesa_get_texture_dimensions(target
);
779 const uint32_t first_pixel
= _mesa_image_offset(dims
, packing
, w
, h
,
780 format
, type
, 0, 0, 0);
781 const uint32_t last_pixel
= _mesa_image_offset(dims
, packing
, w
, h
,
784 const uint32_t stride
= _mesa_image_row_stride(packing
, w
, format
, type
);
785 const uint32_t cpp
= _mesa_bytes_per_pixel(format
, type
);
786 const uint32_t size
= last_pixel
- first_pixel
;
788 *row_stride_out
= stride
;
789 *image_stride_out
= _mesa_image_image_stride(packing
, w
, h
, format
, type
);
791 if (_mesa_is_bufferobj(packing
->BufferObj
)) {
792 const uint32_t offset
= first_pixel
+ (intptr_t)pixels
;
793 if (!read_only
&& ((offset
% cpp
) || (stride
% cpp
))) {
794 perf_debug("Bad PBO alignment; fallback to CPU mapping\n");
798 /* This is a user-provided PBO. We just need to get the BO out */
799 struct intel_buffer_object
*intel_pbo
=
800 intel_buffer_object(packing
->BufferObj
);
802 intel_bufferobj_buffer(brw
, intel_pbo
, offset
, size
, !read_only
);
804 /* We take a reference to the BO so that the caller can just always
805 * unref without having to worry about whether it's a user PBO or one
808 brw_bo_reference(bo
);
810 *offset_out
= offset
;
813 /* Someone should have already checked that there is data to upload. */
816 /* Creating a temp buffer currently only works for upload */
819 /* This is not a user-provided PBO. Instead, pixels is a pointer to CPU
820 * data which we need to copy into a BO.
823 brw_bo_alloc(brw
->bufmgr
, "tmp_tex_subimage_src", size
);
825 perf_debug("intel_texsubimage: temp bo creation failed: size = %u\n",
830 if (brw_bo_subdata(bo
, 0, size
, pixels
+ first_pixel
)) {
831 perf_debug("intel_texsubimage: temp bo upload failed\n");
832 brw_bo_unreference(bo
);
841 /* Consider all the restrictions and determine the format of the source. */
843 blorp_get_client_format(struct brw_context
*brw
,
844 GLenum format
, GLenum type
,
845 const struct gl_pixelstore_attrib
*packing
)
847 if (brw
->ctx
._ImageTransferState
)
848 return MESA_FORMAT_NONE
;
850 if (packing
->SwapBytes
|| packing
->LsbFirst
|| packing
->Invert
) {
851 perf_debug("intel_texsubimage_blorp: unsupported gl_pixelstore_attrib\n");
852 return MESA_FORMAT_NONE
;
855 if (format
!= GL_RED
&&
861 format
!= GL_ALPHA
&&
862 format
!= GL_RED_INTEGER
&&
863 format
!= GL_RG_INTEGER
&&
864 format
!= GL_RGB_INTEGER
&&
865 format
!= GL_BGR_INTEGER
&&
866 format
!= GL_RGBA_INTEGER
&&
867 format
!= GL_BGRA_INTEGER
) {
868 perf_debug("intel_texsubimage_blorp: %s not supported",
869 _mesa_enum_to_string(format
));
870 return MESA_FORMAT_NONE
;
873 return _mesa_tex_format_from_format_and_type(&brw
->ctx
, format
, type
);
877 need_signed_unsigned_int_conversion(mesa_format src_format
,
878 mesa_format dst_format
)
880 const GLenum src_type
= _mesa_get_format_datatype(src_format
);
881 const GLenum dst_type
= _mesa_get_format_datatype(dst_format
);
882 return (src_type
== GL_INT
&& dst_type
== GL_UNSIGNED_INT
) ||
883 (src_type
== GL_UNSIGNED_INT
&& dst_type
== GL_INT
);
887 brw_blorp_upload_miptree(struct brw_context
*brw
,
888 struct intel_mipmap_tree
*dst_mt
,
889 mesa_format dst_format
,
890 uint32_t level
, uint32_t x
, uint32_t y
, uint32_t z
,
891 uint32_t width
, uint32_t height
, uint32_t depth
,
892 GLenum target
, GLenum format
, GLenum type
,
894 const struct gl_pixelstore_attrib
*packing
)
896 const mesa_format src_format
=
897 blorp_get_client_format(brw
, format
, type
, packing
);
898 if (src_format
== MESA_FORMAT_NONE
)
901 if (!brw
->mesa_format_supports_render
[dst_format
]) {
902 perf_debug("intel_texsubimage: can't use %s as render target\n",
903 _mesa_get_format_name(dst_format
));
907 /* This function relies on blorp_blit to upload the pixel data to the
908 * miptree. But, blorp_blit doesn't support signed to unsigned or
909 * unsigned to signed integer conversions.
911 if (need_signed_unsigned_int_conversion(src_format
, dst_format
))
914 uint32_t src_offset
, src_row_stride
, src_image_stride
;
915 struct brw_bo
*src_bo
=
916 blorp_get_client_bo(brw
, width
, height
, depth
,
917 target
, format
, type
, pixels
, packing
,
918 &src_offset
, &src_row_stride
,
919 &src_image_stride
, true);
923 /* Now that source is offset to correct starting point, adjust the
924 * given dimensions to treat 1D arrays as 2D.
926 if (target
== GL_TEXTURE_1D_ARRAY
) {
933 src_image_stride
= src_row_stride
;
936 intel_miptree_check_level_layer(dst_mt
, level
, z
+ depth
- 1);
940 /* Blit slice-by-slice creating a single-slice miptree for each layer. Even
941 * in case of linear buffers hardware wants image arrays to be aligned by
942 * four rows. This way hardware only gets one image at a time and any
943 * source alignment will do.
945 for (unsigned i
= 0; i
< depth
; ++i
) {
946 struct intel_mipmap_tree
*src_mt
= intel_miptree_create_for_bo(
947 brw
, src_bo
, src_format
,
948 src_offset
+ i
* src_image_stride
,
951 ISL_TILING_LINEAR
, 0);
954 perf_debug("intel_texsubimage: miptree creation for src failed\n");
958 /* In case exact match is needed, copy using equivalent UINT formats
959 * preventing hardware from changing presentation for SNORM -1.
961 if (src_mt
->format
== dst_format
) {
962 brw_blorp_copy_miptrees(brw
, src_mt
, 0, 0,
963 dst_mt
, level
, z
+ i
,
964 0, 0, x
, y
, width
, height
);
966 brw_blorp_blit_miptrees(brw
, src_mt
, 0, 0,
967 src_format
, SWIZZLE_XYZW
,
968 dst_mt
, level
, z
+ i
,
971 x
, y
, x
+ width
, y
+ height
,
972 GL_NEAREST
, false, false, false, false);
975 intel_miptree_release(&src_mt
);
981 brw_bo_unreference(src_bo
);
987 brw_blorp_download_miptree(struct brw_context
*brw
,
988 struct intel_mipmap_tree
*src_mt
,
989 mesa_format src_format
, uint32_t src_swizzle
,
990 uint32_t level
, uint32_t x
, uint32_t y
, uint32_t z
,
991 uint32_t width
, uint32_t height
, uint32_t depth
,
992 GLenum target
, GLenum format
, GLenum type
,
993 bool y_flip
, const void *pixels
,
994 const struct gl_pixelstore_attrib
*packing
)
996 const mesa_format dst_format
=
997 blorp_get_client_format(brw
, format
, type
, packing
);
998 if (dst_format
== MESA_FORMAT_NONE
)
1001 if (!brw
->mesa_format_supports_render
[dst_format
]) {
1002 perf_debug("intel_texsubimage: can't use %s as render target\n",
1003 _mesa_get_format_name(dst_format
));
1007 /* This function relies on blorp_blit to download the pixel data from the
1008 * miptree. But, blorp_blit doesn't support signed to unsigned or unsigned
1009 * to signed integer conversions.
1011 if (need_signed_unsigned_int_conversion(src_format
, dst_format
))
1014 /* We can't fetch from LUMINANCE or intensity as that would require a
1015 * non-trivial swizzle.
1017 switch (_mesa_get_format_base_format(src_format
)) {
1019 case GL_LUMINANCE_ALPHA
:
1026 /* This pass only works for PBOs */
1027 assert(_mesa_is_bufferobj(packing
->BufferObj
));
1029 uint32_t dst_offset
, dst_row_stride
, dst_image_stride
;
1030 struct brw_bo
*dst_bo
=
1031 blorp_get_client_bo(brw
, width
, height
, depth
,
1032 target
, format
, type
, pixels
, packing
,
1033 &dst_offset
, &dst_row_stride
,
1034 &dst_image_stride
, false);
1038 /* Now that source is offset to correct starting point, adjust the
1039 * given dimensions to treat 1D arrays as 2D.
1041 if (target
== GL_TEXTURE_1D_ARRAY
) {
1048 dst_image_stride
= dst_row_stride
;
1051 intel_miptree_check_level_layer(src_mt
, level
, z
+ depth
- 1);
1054 int y1
= y
+ height
;
1056 apply_y_flip(&y0
, &y1
, minify(src_mt
->surf
.phys_level0_sa
.height
,
1057 level
- src_mt
->first_level
));
1060 bool result
= false;
1062 /* Blit slice-by-slice creating a single-slice miptree for each layer. Even
1063 * in case of linear buffers hardware wants image arrays to be aligned by
1064 * four rows. This way hardware only gets one image at a time and any
1065 * source alignment will do.
1067 for (unsigned i
= 0; i
< depth
; ++i
) {
1068 struct intel_mipmap_tree
*dst_mt
= intel_miptree_create_for_bo(
1069 brw
, dst_bo
, dst_format
,
1070 dst_offset
+ i
* dst_image_stride
,
1073 ISL_TILING_LINEAR
, 0);
1076 perf_debug("intel_texsubimage: miptree creation for src failed\n");
1080 /* In case exact match is needed, copy using equivalent UINT formats
1081 * preventing hardware from changing presentation for SNORM -1.
1083 if (dst_mt
->format
== src_format
&& !y_flip
&&
1084 src_swizzle
== SWIZZLE_XYZW
) {
1085 brw_blorp_copy_miptrees(brw
, src_mt
, level
, z
+ i
,
1087 x
, y
, 0, 0, width
, height
);
1089 brw_blorp_blit_miptrees(brw
, src_mt
, level
, z
+ i
,
1090 src_format
, src_swizzle
,
1091 dst_mt
, 0, 0, dst_format
,
1092 x
, y0
, x
+ width
, y1
,
1093 0, 0, width
, height
,
1094 GL_NEAREST
, false, y_flip
, false, false);
1097 intel_miptree_release(&dst_mt
);
1102 /* As we implement PBO transfers by binding the user-provided BO as a
1103 * fake framebuffer and rendering to it. This breaks the invariant of the
1104 * GL that nothing is able to render to a BO, causing nondeterministic
1105 * corruption issues because the render cache is not coherent with a
1106 * number of other caches that the BO could potentially be bound to
1109 * This could be solved in the same way that we guarantee texture
1110 * coherency after a texture is attached to a framebuffer and
1111 * rendered to, but that would involve checking *all* BOs bound to
1112 * the pipeline for the case we need to emit a cache flush due to
1113 * previous rendering to any of them -- Including vertex, index,
1114 * uniform, atomic counter, shader image, transform feedback,
1115 * indirect draw buffers, etc.
1117 * That would increase the per-draw call overhead even though it's
1118 * very unlikely that any of the BOs bound to the pipeline has been
1119 * rendered to via a PBO at any point, so it seems better to just
1120 * flush here unconditionally.
1122 brw_emit_mi_flush(brw
);
1125 brw_bo_unreference(dst_bo
);
1131 set_write_disables(const struct intel_renderbuffer
*irb
,
1132 const unsigned color_mask
, bool *color_write_disable
)
1134 /* Format information in the renderbuffer represents the requirements
1135 * given by the client. There are cases where the backing miptree uses,
1136 * for example, RGBA to represent RGBX. Since the client is only expecting
1137 * RGB we can treat alpha as not used and write whatever we like into it.
1139 const GLenum base_format
= irb
->Base
.Base
._BaseFormat
;
1140 const int components
= _mesa_base_format_component_count(base_format
);
1141 bool disables
= false;
1143 assert(components
> 0);
1145 for (int i
= 0; i
< components
; i
++) {
1146 color_write_disable
[i
] = !(color_mask
& (1 << i
));
1147 disables
= disables
|| color_write_disable
[i
];
1154 do_single_blorp_clear(struct brw_context
*brw
, struct gl_framebuffer
*fb
,
1155 struct gl_renderbuffer
*rb
, unsigned buf
,
1156 bool partial_clear
, bool encode_srgb
)
1158 struct gl_context
*ctx
= &brw
->ctx
;
1159 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
1160 uint32_t x0
, x1
, y0
, y1
;
1162 mesa_format format
= irb
->Base
.Base
.Format
;
1163 if (!encode_srgb
&& _mesa_get_format_color_encoding(format
) == GL_SRGB
)
1164 format
= _mesa_get_srgb_format_linear(format
);
1165 enum isl_format isl_format
= brw
->mesa_to_isl_render_format
[format
];
1169 if (rb
->Name
!= 0) {
1173 y0
= rb
->Height
- fb
->_Ymax
;
1174 y1
= rb
->Height
- fb
->_Ymin
;
1177 /* If the clear region is empty, just return. */
1178 if (x0
== x1
|| y0
== y1
)
1181 bool can_fast_clear
= !partial_clear
;
1183 bool color_write_disable
[4] = { false, false, false, false };
1184 if (set_write_disables(irb
, GET_COLORMASK(ctx
->Color
.ColorMask
, buf
),
1185 color_write_disable
))
1186 can_fast_clear
= false;
1188 /* We store clear colors as floats or uints as needed. If there are
1189 * texture views in play, the formats will not properly be respected
1190 * during resolves because the resolve operations only know about the
1191 * miptree and not the renderbuffer.
1193 if (irb
->Base
.Base
.Format
!= irb
->mt
->format
)
1194 can_fast_clear
= false;
1196 if (!irb
->mt
->supports_fast_clear
||
1197 !brw_is_color_fast_clear_compatible(brw
, irb
->mt
, &ctx
->Color
.ClearColor
))
1198 can_fast_clear
= false;
1200 /* Surface state can only record one fast clear color value. Therefore
1201 * unless different levels/layers agree on the color it can be used to
1202 * represent only single level/layer. Here it will be reserved for the
1203 * first slice (level 0, layer 0).
1205 if (irb
->layer_count
> 1 || irb
->mt_level
|| irb
->mt_layer
)
1206 can_fast_clear
= false;
1208 unsigned level
= irb
->mt_level
;
1209 const unsigned num_layers
= fb
->MaxNumLayers
? irb
->layer_count
: 1;
1211 /* If the MCS buffer hasn't been allocated yet, we need to allocate it now.
1213 if (can_fast_clear
&& !irb
->mt
->aux_buf
) {
1214 assert(irb
->mt
->aux_usage
== ISL_AUX_USAGE_CCS_D
);
1215 if (!intel_miptree_alloc_ccs(brw
, irb
->mt
)) {
1216 /* There are a few reasons in addition to out-of-memory, that can
1217 * cause intel_miptree_alloc_non_msrt_mcs to fail. Try to recover by
1218 * falling back to non-fast clear.
1220 can_fast_clear
= false;
1224 /* FINISHME: Debug and enable fast clears */
1225 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1226 if (devinfo
->gen
>= 11)
1227 can_fast_clear
= false;
1229 if (can_fast_clear
) {
1230 const enum isl_aux_state aux_state
=
1231 intel_miptree_get_aux_state(irb
->mt
, irb
->mt_level
, irb
->mt_layer
);
1233 bool same_clear_color
=
1234 !intel_miptree_set_clear_color(brw
, irb
->mt
, &ctx
->Color
.ClearColor
);
1236 /* If the buffer is already in INTEL_FAST_CLEAR_STATE_CLEAR, the clear
1237 * is redundant and can be skipped.
1239 if (aux_state
== ISL_AUX_STATE_CLEAR
&& same_clear_color
)
1242 DBG("%s (fast) to mt %p level %d layers %d+%d\n", __FUNCTION__
,
1243 irb
->mt
, irb
->mt_level
, irb
->mt_layer
, num_layers
);
1245 /* We can't setup the blorp_surf until we've allocated the MCS above */
1246 struct isl_surf isl_tmp
[2];
1247 struct blorp_surf surf
;
1248 blorp_surf_for_miptree(brw
, &surf
, irb
->mt
, irb
->mt
->aux_usage
, true,
1249 &level
, irb
->mt_layer
, num_layers
, isl_tmp
);
1251 /* Ivybrigde PRM Vol 2, Part 1, "11.7 MCS Buffer for Render Target(s)":
1253 * "Any transition from any value in {Clear, Render, Resolve} to a
1254 * different value in {Clear, Render, Resolve} requires end of pipe
1257 * In other words, fast clear ops are not properly synchronized with
1258 * other drawing. We need to use a PIPE_CONTROL to ensure that the
1259 * contents of the previous draw hit the render target before we resolve
1260 * and again afterwards to ensure that the resolve is complete before we
1261 * do any more regular drawing.
1263 brw_emit_end_of_pipe_sync(brw
, PIPE_CONTROL_RENDER_TARGET_FLUSH
);
1265 struct blorp_batch batch
;
1266 blorp_batch_init(&brw
->blorp
, &batch
, brw
, 0);
1267 blorp_fast_clear(&batch
, &surf
, isl_format
,
1268 level
, irb
->mt_layer
, num_layers
,
1270 blorp_batch_finish(&batch
);
1272 brw_emit_end_of_pipe_sync(brw
, PIPE_CONTROL_RENDER_TARGET_FLUSH
);
1274 /* Now that the fast clear has occurred, put the buffer in
1275 * INTEL_FAST_CLEAR_STATE_CLEAR so that we won't waste time doing
1278 intel_miptree_set_aux_state(brw
, irb
->mt
, irb
->mt_level
,
1279 irb
->mt_layer
, num_layers
,
1280 ISL_AUX_STATE_CLEAR
);
1282 DBG("%s (slow) to mt %p level %d layer %d+%d\n", __FUNCTION__
,
1283 irb
->mt
, irb
->mt_level
, irb
->mt_layer
, num_layers
);
1285 enum isl_aux_usage aux_usage
=
1286 intel_miptree_render_aux_usage(brw
, irb
->mt
, isl_format
,
1288 intel_miptree_prepare_render(brw
, irb
->mt
, level
, irb
->mt_layer
,
1289 num_layers
, aux_usage
);
1291 struct isl_surf isl_tmp
[2];
1292 struct blorp_surf surf
;
1293 blorp_surf_for_miptree(brw
, &surf
, irb
->mt
, aux_usage
, true,
1294 &level
, irb
->mt_layer
, num_layers
, isl_tmp
);
1296 union isl_color_value clear_color
;
1297 memcpy(clear_color
.f32
, ctx
->Color
.ClearColor
.f
, sizeof(float) * 4);
1299 struct blorp_batch batch
;
1300 blorp_batch_init(&brw
->blorp
, &batch
, brw
, 0);
1301 blorp_clear(&batch
, &surf
, isl_format
, ISL_SWIZZLE_IDENTITY
,
1302 level
, irb
->mt_layer
, num_layers
,
1304 clear_color
, color_write_disable
);
1305 blorp_batch_finish(&batch
);
1307 intel_miptree_finish_render(brw
, irb
->mt
, level
, irb
->mt_layer
,
1308 num_layers
, aux_usage
);
1315 brw_blorp_clear_color(struct brw_context
*brw
, struct gl_framebuffer
*fb
,
1316 GLbitfield mask
, bool partial_clear
, bool encode_srgb
)
1318 for (unsigned buf
= 0; buf
< fb
->_NumColorDrawBuffers
; buf
++) {
1319 struct gl_renderbuffer
*rb
= fb
->_ColorDrawBuffers
[buf
];
1320 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
1322 /* Only clear the buffers present in the provided mask */
1323 if (((1 << fb
->_ColorDrawBufferIndexes
[buf
]) & mask
) == 0)
1326 /* If this is an ES2 context or GL_ARB_ES2_compatibility is supported,
1327 * the framebuffer can be complete with some attachments missing. In
1328 * this case the _ColorDrawBuffers pointer will be NULL.
1333 do_single_blorp_clear(brw
, fb
, rb
, buf
, partial_clear
, encode_srgb
);
1334 irb
->need_downsample
= true;
1341 brw_blorp_clear_depth_stencil(struct brw_context
*brw
,
1342 struct gl_framebuffer
*fb
,
1343 GLbitfield mask
, bool partial_clear
)
1345 const struct gl_context
*ctx
= &brw
->ctx
;
1346 struct gl_renderbuffer
*depth_rb
=
1347 fb
->Attachment
[BUFFER_DEPTH
].Renderbuffer
;
1348 struct gl_renderbuffer
*stencil_rb
=
1349 fb
->Attachment
[BUFFER_STENCIL
].Renderbuffer
;
1351 if (!depth_rb
|| ctx
->Depth
.Mask
== GL_FALSE
)
1352 mask
&= ~BUFFER_BIT_DEPTH
;
1354 if (!stencil_rb
|| (ctx
->Stencil
.WriteMask
[0] & 0xff) == 0)
1355 mask
&= ~BUFFER_BIT_STENCIL
;
1357 if (!(mask
& (BUFFER_BITS_DEPTH_STENCIL
)))
1360 uint32_t x0
, x1
, y0
, y1
, rb_name
, rb_height
;
1362 rb_name
= depth_rb
->Name
;
1363 rb_height
= depth_rb
->Height
;
1365 assert(depth_rb
->Width
== stencil_rb
->Width
);
1366 assert(depth_rb
->Height
== stencil_rb
->Height
);
1370 rb_name
= stencil_rb
->Name
;
1371 rb_height
= stencil_rb
->Height
;
1380 y0
= rb_height
- fb
->_Ymax
;
1381 y1
= rb_height
- fb
->_Ymin
;
1384 /* If the clear region is empty, just return. */
1385 if (x0
== x1
|| y0
== y1
)
1388 uint32_t level
, start_layer
, num_layers
;
1389 struct isl_surf isl_tmp
[4];
1390 struct blorp_surf depth_surf
, stencil_surf
;
1392 struct intel_mipmap_tree
*depth_mt
= NULL
;
1393 if (mask
& BUFFER_BIT_DEPTH
) {
1394 struct intel_renderbuffer
*irb
= intel_renderbuffer(depth_rb
);
1395 depth_mt
= find_miptree(GL_DEPTH_BUFFER_BIT
, irb
);
1397 level
= irb
->mt_level
;
1398 start_layer
= irb
->mt_layer
;
1399 num_layers
= fb
->MaxNumLayers
? irb
->layer_count
: 1;
1401 intel_miptree_prepare_depth(brw
, depth_mt
, level
,
1402 start_layer
, num_layers
);
1404 unsigned depth_level
= level
;
1405 blorp_surf_for_miptree(brw
, &depth_surf
, depth_mt
, depth_mt
->aux_usage
,
1406 true, &depth_level
, start_layer
, num_layers
,
1408 assert(depth_level
== level
);
1411 uint8_t stencil_mask
= 0;
1412 struct intel_mipmap_tree
*stencil_mt
= NULL
;
1413 if (mask
& BUFFER_BIT_STENCIL
) {
1414 struct intel_renderbuffer
*irb
= intel_renderbuffer(stencil_rb
);
1415 stencil_mt
= find_miptree(GL_STENCIL_BUFFER_BIT
, irb
);
1417 if (mask
& BUFFER_BIT_DEPTH
) {
1418 assert(level
== irb
->mt_level
);
1419 assert(start_layer
== irb
->mt_layer
);
1420 assert(num_layers
== fb
->MaxNumLayers
? irb
->layer_count
: 1);
1422 level
= irb
->mt_level
;
1423 start_layer
= irb
->mt_layer
;
1425 num_layers
= fb
->MaxNumLayers
? irb
->layer_count
: 1;
1427 stencil_mask
= ctx
->Stencil
.WriteMask
[0] & 0xff;
1429 intel_miptree_prepare_access(brw
, stencil_mt
, level
, 1,
1430 start_layer
, num_layers
,
1431 ISL_AUX_USAGE_NONE
, false);
1433 unsigned stencil_level
= level
;
1434 blorp_surf_for_miptree(brw
, &stencil_surf
, stencil_mt
,
1435 ISL_AUX_USAGE_NONE
, true,
1436 &stencil_level
, start_layer
, num_layers
,
1440 assert((mask
& BUFFER_BIT_DEPTH
) || stencil_mask
);
1442 struct blorp_batch batch
;
1443 blorp_batch_init(&brw
->blorp
, &batch
, brw
, 0);
1444 blorp_clear_depth_stencil(&batch
, &depth_surf
, &stencil_surf
,
1445 level
, start_layer
, num_layers
,
1447 (mask
& BUFFER_BIT_DEPTH
), ctx
->Depth
.Clear
,
1448 stencil_mask
, ctx
->Stencil
.Clear
);
1449 blorp_batch_finish(&batch
);
1451 if (mask
& BUFFER_BIT_DEPTH
) {
1452 intel_miptree_finish_depth(brw
, depth_mt
, level
,
1453 start_layer
, num_layers
, true);
1457 intel_miptree_finish_write(brw
, stencil_mt
, level
,
1458 start_layer
, num_layers
,
1459 ISL_AUX_USAGE_NONE
);
1464 brw_blorp_resolve_color(struct brw_context
*brw
, struct intel_mipmap_tree
*mt
,
1465 unsigned level
, unsigned layer
,
1466 enum isl_aux_op resolve_op
)
1468 DBG("%s to mt %p level %u layer %u\n", __FUNCTION__
, mt
, level
, layer
);
1470 const mesa_format format
= _mesa_get_srgb_format_linear(mt
->format
);
1472 struct isl_surf isl_tmp
[1];
1473 struct blorp_surf surf
;
1474 blorp_surf_for_miptree(brw
, &surf
, mt
, mt
->aux_usage
, true,
1475 &level
, layer
, 1 /* num_layers */,
1478 /* Ivybrigde PRM Vol 2, Part 1, "11.7 MCS Buffer for Render Target(s)":
1480 * "Any transition from any value in {Clear, Render, Resolve} to a
1481 * different value in {Clear, Render, Resolve} requires end of pipe
1484 * In other words, fast clear ops are not properly synchronized with
1485 * other drawing. We need to use a PIPE_CONTROL to ensure that the
1486 * contents of the previous draw hit the render target before we resolve
1487 * and again afterwards to ensure that the resolve is complete before we
1488 * do any more regular drawing.
1490 brw_emit_end_of_pipe_sync(brw
, PIPE_CONTROL_RENDER_TARGET_FLUSH
);
1493 struct blorp_batch batch
;
1494 blorp_batch_init(&brw
->blorp
, &batch
, brw
, 0);
1495 blorp_ccs_resolve(&batch
, &surf
, level
, layer
, 1,
1496 brw_blorp_to_isl_format(brw
, format
, true),
1498 blorp_batch_finish(&batch
);
1500 /* See comment above */
1501 brw_emit_end_of_pipe_sync(brw
, PIPE_CONTROL_RENDER_TARGET_FLUSH
);
1505 brw_blorp_mcs_partial_resolve(struct brw_context
*brw
,
1506 struct intel_mipmap_tree
*mt
,
1507 uint32_t start_layer
, uint32_t num_layers
)
1509 DBG("%s to mt %p layers %u-%u\n", __FUNCTION__
, mt
,
1510 start_layer
, start_layer
+ num_layers
- 1);
1512 assert(mt
->aux_usage
== ISL_AUX_USAGE_MCS
);
1514 const mesa_format format
= _mesa_get_srgb_format_linear(mt
->format
);
1515 enum isl_format isl_format
= brw_blorp_to_isl_format(brw
, format
, true);
1517 struct isl_surf isl_tmp
[1];
1518 struct blorp_surf surf
;
1520 blorp_surf_for_miptree(brw
, &surf
, mt
, ISL_AUX_USAGE_MCS
, true,
1521 &level
, start_layer
, num_layers
, isl_tmp
);
1523 struct blorp_batch batch
;
1524 blorp_batch_init(&brw
->blorp
, &batch
, brw
, 0);
1525 blorp_mcs_partial_resolve(&batch
, &surf
, isl_format
,
1526 start_layer
, num_layers
);
1527 blorp_batch_finish(&batch
);
1531 * Perform a HiZ or depth resolve operation.
1533 * For an overview of HiZ ops, see the following sections of the Sandy Bridge
1534 * PRM, Volume 1, Part 2:
1535 * - 7.5.3.1 Depth Buffer Clear
1536 * - 7.5.3.2 Depth Buffer Resolve
1537 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
1540 intel_hiz_exec(struct brw_context
*brw
, struct intel_mipmap_tree
*mt
,
1541 unsigned int level
, unsigned int start_layer
,
1542 unsigned int num_layers
, enum isl_aux_op op
)
1544 assert(intel_miptree_level_has_hiz(mt
, level
));
1545 assert(op
!= ISL_AUX_OP_NONE
);
1546 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1547 const char *opname
= NULL
;
1550 case ISL_AUX_OP_FULL_RESOLVE
:
1551 opname
= "depth resolve";
1553 case ISL_AUX_OP_AMBIGUATE
:
1554 opname
= "hiz ambiguate";
1556 case ISL_AUX_OP_FAST_CLEAR
:
1557 opname
= "depth clear";
1559 case ISL_AUX_OP_PARTIAL_RESOLVE
:
1560 case ISL_AUX_OP_NONE
:
1561 unreachable("Invalid HiZ op");
1564 DBG("%s %s to mt %p level %d layers %d-%d\n",
1565 __func__
, opname
, mt
, level
, start_layer
, start_layer
+ num_layers
- 1);
1567 /* The following stalls and flushes are only documented to be required for
1568 * HiZ clear operations. However, they also seem to be required for
1569 * resolve operations.
1571 if (devinfo
->gen
== 6) {
1572 /* From the Sandy Bridge PRM, volume 2 part 1, page 313:
1574 * "If other rendering operations have preceded this clear, a
1575 * PIPE_CONTROL with write cache flush enabled and Z-inhibit
1576 * disabled must be issued before the rectangle primitive used for
1577 * the depth buffer clear operation.
1579 brw_emit_pipe_control_flush(brw
,
1580 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
1581 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
1582 PIPE_CONTROL_CS_STALL
);
1583 } else if (devinfo
->gen
>= 7) {
1585 * From the Ivybridge PRM, volume 2, "Depth Buffer Clear":
1587 * If other rendering operations have preceded this clear, a
1588 * PIPE_CONTROL with depth cache flush enabled, Depth Stall bit
1589 * enabled must be issued before the rectangle primitive used for
1590 * the depth buffer clear operation.
1592 * Same applies for Gen8 and Gen9.
1594 * In addition, from the Ivybridge PRM, volume 2, 1.10.4.1
1595 * PIPE_CONTROL, Depth Cache Flush Enable:
1597 * This bit must not be set when Depth Stall Enable bit is set in
1600 * This is confirmed to hold for real, HSW gets immediate gpu hangs.
1602 * Therefore issue two pipe control flushes, one for cache flush and
1603 * another for depth stall.
1605 brw_emit_pipe_control_flush(brw
,
1606 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
1607 PIPE_CONTROL_CS_STALL
);
1609 brw_emit_pipe_control_flush(brw
, PIPE_CONTROL_DEPTH_STALL
);
1612 assert(mt
->aux_usage
== ISL_AUX_USAGE_HIZ
&& mt
->aux_buf
);
1614 struct isl_surf isl_tmp
[2];
1615 struct blorp_surf surf
;
1616 blorp_surf_for_miptree(brw
, &surf
, mt
, ISL_AUX_USAGE_HIZ
, true,
1617 &level
, start_layer
, num_layers
, isl_tmp
);
1619 struct blorp_batch batch
;
1620 blorp_batch_init(&brw
->blorp
, &batch
, brw
, 0);
1621 blorp_hiz_op(&batch
, &surf
, level
, start_layer
, num_layers
, op
);
1622 blorp_batch_finish(&batch
);
1624 /* The following stalls and flushes are only documented to be required for
1625 * HiZ clear operations. However, they also seem to be required for
1626 * resolve operations.
1628 if (devinfo
->gen
== 6) {
1629 /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
1631 * "DevSNB, DevSNB-B{W/A}]: Depth buffer clear pass must be
1632 * followed by a PIPE_CONTROL command with DEPTH_STALL bit set
1633 * and Then followed by Depth FLUSH'
1635 brw_emit_pipe_control_flush(brw
,
1636 PIPE_CONTROL_DEPTH_STALL
);
1638 brw_emit_pipe_control_flush(brw
,
1639 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
1640 PIPE_CONTROL_CS_STALL
);
1641 } else if (devinfo
->gen
>= 8) {
1643 * From the Broadwell PRM, volume 7, "Depth Buffer Clear":
1645 * "Depth buffer clear pass using any of the methods (WM_STATE,
1646 * 3DSTATE_WM or 3DSTATE_WM_HZ_OP) must be followed by a
1647 * PIPE_CONTROL command with DEPTH_STALL bit and Depth FLUSH bits
1648 * "set" before starting to render. DepthStall and DepthFlush are
1649 * not needed between consecutive depth clear passes nor is it
1650 * required if the depth clear pass was done with
1651 * 'full_surf_clear' bit set in the 3DSTATE_WM_HZ_OP."
1653 * TODO: Such as the spec says, this could be conditional.
1655 brw_emit_pipe_control_flush(brw
,
1656 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
1657 PIPE_CONTROL_DEPTH_STALL
);