2 * Copyright 2006 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "main/enums.h"
27 #include "main/imports.h"
28 #include "main/macros.h"
29 #include "main/mtypes.h"
30 #include "main/fbobject.h"
31 #include "main/framebuffer.h"
32 #include "main/renderbuffer.h"
33 #include "main/context.h"
34 #include "main/teximage.h"
35 #include "main/image.h"
36 #include "main/condrender.h"
37 #include "util/hash_table.h"
40 #include "swrast/swrast.h"
41 #include "drivers/common/meta.h"
43 #include "intel_batchbuffer.h"
44 #include "intel_buffers.h"
45 #include "intel_blit.h"
46 #include "intel_fbo.h"
47 #include "intel_mipmap_tree.h"
48 #include "intel_image.h"
49 #include "intel_screen.h"
50 #include "intel_tex.h"
51 #include "brw_context.h"
52 #include "brw_defines.h"
54 #define FILE_DEBUG_FLAG DEBUG_FBO
56 /** Called by gl_renderbuffer::Delete() */
58 intel_delete_renderbuffer(struct gl_context
*ctx
, struct gl_renderbuffer
*rb
)
60 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
64 intel_miptree_release(&irb
->mt
);
65 intel_miptree_release(&irb
->singlesample_mt
);
67 _mesa_delete_renderbuffer(ctx
, rb
);
71 * \brief Downsample a winsys renderbuffer from mt to singlesample_mt.
73 * If the miptree needs no downsample, then skip.
76 intel_renderbuffer_downsample(struct brw_context
*brw
,
77 struct intel_renderbuffer
*irb
)
79 if (!irb
->need_downsample
)
81 intel_miptree_updownsample(brw
, irb
->mt
, irb
->singlesample_mt
);
82 irb
->need_downsample
= false;
86 * \brief Upsample a winsys renderbuffer from singlesample_mt to mt.
88 * The upsample is done unconditionally.
91 intel_renderbuffer_upsample(struct brw_context
*brw
,
92 struct intel_renderbuffer
*irb
)
94 assert(!irb
->need_downsample
);
96 intel_miptree_updownsample(brw
, irb
->singlesample_mt
, irb
->mt
);
100 * \see dd_function_table::MapRenderbuffer
103 intel_map_renderbuffer(struct gl_context
*ctx
,
104 struct gl_renderbuffer
*rb
,
105 GLuint x
, GLuint y
, GLuint w
, GLuint h
,
111 struct brw_context
*brw
= brw_context(ctx
);
112 struct swrast_renderbuffer
*srb
= (struct swrast_renderbuffer
*)rb
;
113 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
114 struct intel_mipmap_tree
*mt
;
118 /* driver does not support GL_FRAMEBUFFER_FLIP_Y_MESA */
119 assert((rb
->Name
== 0) == flip_y
);
122 /* this is a malloc'd renderbuffer (accum buffer), not an irb */
123 GLint bpp
= _mesa_get_format_bytes(rb
->Format
);
124 GLint rowStride
= srb
->RowStride
;
125 *out_map
= (GLubyte
*) srb
->Buffer
+ y
* rowStride
+ x
* bpp
;
126 *out_stride
= rowStride
;
130 intel_prepare_render(brw
);
132 /* The MapRenderbuffer API should always return a single-sampled mapping.
133 * The case we are asked to map multisampled RBs is in glReadPixels() (or
134 * swrast paths like glCopyTexImage()) from a window-system MSAA buffer,
135 * and GL expects an automatic resolve to happen.
137 * If it's a color miptree, there is a ->singlesample_mt which wraps the
138 * actual window system renderbuffer (which we may resolve to at any time),
139 * while the miptree itself is our driver-private allocation. If it's a
140 * depth or stencil miptree, we have a private MSAA buffer and no shared
141 * singlesample buffer, and since we don't expect anybody to ever actually
142 * resolve it, we just make a temporary singlesample buffer now when we
145 if (rb
->NumSamples
> 1) {
146 if (!irb
->singlesample_mt
) {
147 irb
->singlesample_mt
=
148 intel_miptree_create_for_renderbuffer(brw
, irb
->mt
->format
,
149 rb
->Width
, rb
->Height
,
151 if (!irb
->singlesample_mt
)
153 irb
->singlesample_mt_is_tmp
= true;
154 irb
->need_downsample
= true;
157 intel_renderbuffer_downsample(brw
, irb
);
158 mt
= irb
->singlesample_mt
;
160 irb
->need_map_upsample
= mode
& GL_MAP_WRITE_BIT
;
165 /* For a window-system renderbuffer, we need to flip the mapping we receive
166 * upside-down. So we need to ask for a rectangle on flipped vertically, and
167 * we then return a pointer to the bottom of it with a negative stride.
170 y
= rb
->Height
- y
- h
;
173 intel_miptree_map(brw
, mt
, irb
->mt_level
, irb
->mt_layer
,
174 x
, y
, w
, h
, mode
, &map
, &stride
);
177 map
+= (h
- 1) * stride
;
181 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%"PRIdPTR
"\n",
182 __func__
, rb
->Name
, _mesa_get_format_name(rb
->Format
),
183 x
, y
, w
, h
, map
, stride
);
186 *out_stride
= stride
;
195 * \see dd_function_table::UnmapRenderbuffer
198 intel_unmap_renderbuffer(struct gl_context
*ctx
,
199 struct gl_renderbuffer
*rb
)
201 struct brw_context
*brw
= brw_context(ctx
);
202 struct swrast_renderbuffer
*srb
= (struct swrast_renderbuffer
*)rb
;
203 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
204 struct intel_mipmap_tree
*mt
;
206 DBG("%s: rb %d (%s)\n", __func__
,
207 rb
->Name
, _mesa_get_format_name(rb
->Format
));
210 /* this is a malloc'd renderbuffer (accum buffer) */
215 if (rb
->NumSamples
> 1) {
216 mt
= irb
->singlesample_mt
;
221 intel_miptree_unmap(brw
, mt
, irb
->mt_level
, irb
->mt_layer
);
223 if (irb
->need_map_upsample
) {
224 intel_renderbuffer_upsample(brw
, irb
);
225 irb
->need_map_upsample
= false;
228 if (irb
->singlesample_mt_is_tmp
)
229 intel_miptree_release(&irb
->singlesample_mt
);
234 * Round up the requested multisample count to the next supported sample size.
237 intel_quantize_num_samples(struct intel_screen
*intel
, unsigned num_samples
)
239 const int *msaa_modes
= intel_supported_msaa_modes(intel
);
240 int quantized_samples
= 0;
242 for (int i
= 0; msaa_modes
[i
] != -1; ++i
) {
243 if (msaa_modes
[i
] >= num_samples
)
244 quantized_samples
= msaa_modes
[i
];
249 return quantized_samples
;
253 intel_renderbuffer_format(struct gl_context
* ctx
, GLenum internalFormat
)
255 struct brw_context
*brw
= brw_context(ctx
);
256 MAYBE_UNUSED
const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
258 switch (internalFormat
) {
260 /* Use the same format-choice logic as for textures.
261 * Renderbuffers aren't any different from textures for us,
262 * except they're less useful because you can't texture with
265 return ctx
->Driver
.ChooseTextureFormat(ctx
, GL_TEXTURE_2D
,
269 case GL_STENCIL_INDEX
:
270 case GL_STENCIL_INDEX1_EXT
:
271 case GL_STENCIL_INDEX4_EXT
:
272 case GL_STENCIL_INDEX8_EXT
:
273 case GL_STENCIL_INDEX16_EXT
:
274 /* These aren't actual texture formats, so force them here. */
275 if (brw
->has_separate_stencil
) {
276 return MESA_FORMAT_S_UINT8
;
278 assert(!devinfo
->must_use_separate_stencil
);
279 return MESA_FORMAT_Z24_UNORM_S8_UINT
;
285 intel_alloc_private_renderbuffer_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
286 GLenum internalFormat
,
287 GLuint width
, GLuint height
)
289 struct brw_context
*brw
= brw_context(ctx
);
290 struct intel_screen
*screen
= brw
->screen
;
291 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
293 assert(rb
->Format
!= MESA_FORMAT_NONE
);
295 rb
->NumSamples
= intel_quantize_num_samples(screen
, rb
->NumSamples
);
298 rb
->_BaseFormat
= _mesa_get_format_base_format(rb
->Format
);
300 intel_miptree_release(&irb
->mt
);
302 DBG("%s: %s: %s (%dx%d)\n", __func__
,
303 _mesa_enum_to_string(internalFormat
),
304 _mesa_get_format_name(rb
->Format
), width
, height
);
306 if (width
== 0 || height
== 0)
309 irb
->mt
= intel_miptree_create_for_renderbuffer(brw
, rb
->Format
,
311 MAX2(rb
->NumSamples
, 1));
315 irb
->layer_count
= 1;
321 * Called via glRenderbufferStorageEXT() to set the format and allocate
322 * storage for a user-created renderbuffer.
325 intel_alloc_renderbuffer_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
326 GLenum internalFormat
,
327 GLuint width
, GLuint height
)
329 rb
->Format
= intel_renderbuffer_format(ctx
, internalFormat
);
330 return intel_alloc_private_renderbuffer_storage(ctx
, rb
, internalFormat
, width
, height
);
334 intel_image_target_renderbuffer_storage(struct gl_context
*ctx
,
335 struct gl_renderbuffer
*rb
,
338 struct brw_context
*brw
= brw_context(ctx
);
339 struct intel_renderbuffer
*irb
;
340 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
343 image
= dri_screen
->dri2
.image
->lookupEGLImage(dri_screen
, image_handle
,
344 dri_screen
->loaderPrivate
);
348 if (image
->planar_format
&& image
->planar_format
->nplanes
> 1) {
349 _mesa_error(ctx
, GL_INVALID_OPERATION
,
350 "glEGLImageTargetRenderbufferStorage(planar buffers are not "
351 "supported as render targets.)");
355 /* __DRIimage is opaque to the core so it has to be checked here */
356 if (!brw
->mesa_format_supports_render
[image
->format
]) {
357 _mesa_error(ctx
, GL_INVALID_OPERATION
,
358 "glEGLImageTargetRenderbufferStorage(unsupported image format)");
362 irb
= intel_renderbuffer(rb
);
363 intel_miptree_release(&irb
->mt
);
365 /* Disable creation of the miptree's aux buffers because the driver exposes
366 * no EGL API to manage them. That is, there is no API for resolving the aux
367 * buffer's content to the main buffer nor for invalidating the aux buffer's
370 irb
->mt
= intel_miptree_create_for_dri_image(brw
, image
, GL_TEXTURE_2D
,
371 image
->format
, false);
375 rb
->InternalFormat
= image
->internal_format
;
376 rb
->Width
= image
->width
;
377 rb
->Height
= image
->height
;
378 rb
->Format
= image
->format
;
379 rb
->_BaseFormat
= _mesa_get_format_base_format(image
->format
);
380 rb
->NeedsFinishRenderTexture
= true;
381 irb
->layer_count
= 1;
385 * Called by _mesa_resize_framebuffer() for each hardware renderbuffer when a
386 * window system framebuffer is resized.
388 * Any actual buffer reallocations for hardware renderbuffers (which would
389 * have triggered _mesa_resize_framebuffer()) were done by
390 * intel_process_dri2_buffer().
393 intel_alloc_window_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
394 GLenum internalFormat
, GLuint width
, GLuint height
)
397 assert(rb
->Name
== 0);
400 rb
->InternalFormat
= internalFormat
;
405 /** Dummy function for gl_renderbuffer::AllocStorage() */
407 intel_nop_alloc_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
408 GLenum internalFormat
, GLuint width
, GLuint height
)
411 (void) internalFormat
;
414 _mesa_problem(ctx
, "intel_nop_alloc_storage should never be called.");
419 * Create an intel_renderbuffer for a __DRIdrawable. This function is
420 * unrelated to GL renderbuffers (that is, those created by
421 * glGenRenderbuffers).
423 * \param num_samples must be quantized.
425 struct intel_renderbuffer
*
426 intel_create_winsys_renderbuffer(struct intel_screen
*screen
,
427 mesa_format format
, unsigned num_samples
)
429 struct intel_renderbuffer
*irb
= CALLOC_STRUCT(intel_renderbuffer
);
433 struct gl_renderbuffer
*rb
= &irb
->Base
.Base
;
434 irb
->layer_count
= 1;
436 _mesa_init_renderbuffer(rb
, 0);
437 rb
->ClassID
= INTEL_RB_CLASS
;
438 rb
->NumSamples
= num_samples
;
440 /* The base format and internal format must be derived from the user-visible
441 * format (that is, the gl_config's format), even if we internally use
442 * choose a different format for the renderbuffer. Otherwise, rendering may
443 * use incorrect channel write masks.
445 rb
->_BaseFormat
= _mesa_get_format_base_format(format
);
446 rb
->InternalFormat
= rb
->_BaseFormat
;
449 if (!screen
->mesa_format_supports_render
[rb
->Format
]) {
450 /* The glRenderbufferStorage paths in core Mesa detect if the driver
451 * does not support the user-requested format, and then searches for
452 * a falback format. The DRI code bypasses core Mesa, though. So we do
453 * the fallbacks here.
455 * We must support MESA_FORMAT_R8G8B8X8 on Android because the Android
456 * framework requires HAL_PIXEL_FORMAT_RGBX8888 winsys surfaces.
458 rb
->Format
= _mesa_format_fallback_rgbx_to_rgba(rb
->Format
);
459 assert(screen
->mesa_format_supports_render
[rb
->Format
]);
462 /* intel-specific methods */
463 rb
->Delete
= intel_delete_renderbuffer
;
464 rb
->AllocStorage
= intel_alloc_window_storage
;
470 * Private window-system buffers (as opposed to ones shared with the display
471 * server created with intel_create_winsys_renderbuffer()) are most similar in their
472 * handling to user-created renderbuffers, but they have a resize handler that
473 * may be called at intel_update_renderbuffers() time.
475 * \param num_samples must be quantized.
477 struct intel_renderbuffer
*
478 intel_create_private_renderbuffer(struct intel_screen
*screen
,
479 mesa_format format
, unsigned num_samples
)
481 struct intel_renderbuffer
*irb
;
483 irb
= intel_create_winsys_renderbuffer(screen
, format
, num_samples
);
484 irb
->Base
.Base
.AllocStorage
= intel_alloc_private_renderbuffer_storage
;
490 * Create a new renderbuffer object.
491 * Typically called via glBindRenderbufferEXT().
493 static struct gl_renderbuffer
*
494 intel_new_renderbuffer(struct gl_context
* ctx
, GLuint name
)
496 struct intel_renderbuffer
*irb
;
497 struct gl_renderbuffer
*rb
;
499 irb
= CALLOC_STRUCT(intel_renderbuffer
);
501 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "creating renderbuffer");
505 rb
= &irb
->Base
.Base
;
507 _mesa_init_renderbuffer(rb
, name
);
508 rb
->ClassID
= INTEL_RB_CLASS
;
510 /* intel-specific methods */
511 rb
->Delete
= intel_delete_renderbuffer
;
512 rb
->AllocStorage
= intel_alloc_renderbuffer_storage
;
513 /* span routines set in alloc_storage function */
519 intel_renderbuffer_update_wrapper(struct brw_context
*brw
,
520 struct intel_renderbuffer
*irb
,
521 struct gl_texture_image
*image
,
525 struct gl_renderbuffer
*rb
= &irb
->Base
.Base
;
526 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
527 struct intel_mipmap_tree
*mt
= intel_image
->mt
;
528 int level
= image
->Level
;
530 rb
->AllocStorage
= intel_nop_alloc_storage
;
532 /* adjust for texture view parameters */
533 layer
+= image
->TexObject
->MinLayer
;
534 level
+= image
->TexObject
->MinLevel
;
536 intel_miptree_check_level_layer(mt
, level
, layer
);
537 irb
->mt_level
= level
;
538 irb
->mt_layer
= layer
;
541 irb
->layer_count
= 1;
542 } else if (mt
->target
!= GL_TEXTURE_3D
&& image
->TexObject
->NumLayers
> 0) {
543 irb
->layer_count
= image
->TexObject
->NumLayers
;
545 irb
->layer_count
= mt
->surf
.dim
== ISL_SURF_DIM_3D
?
546 minify(mt
->surf
.logical_level0_px
.depth
, level
) :
547 mt
->surf
.logical_level0_px
.array_len
;
550 intel_miptree_reference(&irb
->mt
, mt
);
552 intel_renderbuffer_set_draw_offset(irb
);
558 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer
*irb
)
560 unsigned int dst_x
, dst_y
;
562 /* compute offset of the particular 2D image within the texture region */
563 intel_miptree_get_image_offset(irb
->mt
,
573 * Called by glFramebufferTexture[123]DEXT() (and other places) to
574 * prepare for rendering into texture memory. This might be called
575 * many times to choose different texture levels, cube faces, etc
576 * before intel_finish_render_texture() is ever called.
579 intel_render_texture(struct gl_context
* ctx
,
580 struct gl_framebuffer
*fb
,
581 struct gl_renderbuffer_attachment
*att
)
583 struct brw_context
*brw
= brw_context(ctx
);
584 struct gl_renderbuffer
*rb
= att
->Renderbuffer
;
585 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
586 struct gl_texture_image
*image
= rb
->TexImage
;
587 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
588 struct intel_mipmap_tree
*mt
= intel_image
->mt
;
593 if (att
->CubeMapFace
> 0) {
594 assert(att
->Zoffset
== 0);
595 layer
= att
->CubeMapFace
;
597 layer
= att
->Zoffset
;
600 if (!intel_image
->mt
) {
601 /* Fallback on drawing to a texture that doesn't have a miptree
602 * (has a border, width/height 0, etc.)
604 _swrast_render_texture(ctx
, fb
, att
);
608 intel_miptree_check_level_layer(mt
, att
->TextureLevel
, layer
);
610 if (!intel_renderbuffer_update_wrapper(brw
, irb
, image
, layer
, att
->Layered
)) {
611 _swrast_render_texture(ctx
, fb
, att
);
615 DBG("Begin render %s texture tex=%u w=%d h=%d d=%d refcount=%d\n",
616 _mesa_get_format_name(image
->TexFormat
),
617 att
->Texture
->Name
, image
->Width
, image
->Height
, image
->Depth
,
622 #define fbo_incomplete(fb, ...) do { \
623 static GLuint msg_id = 0; \
624 if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \
625 _mesa_gl_debug(ctx, &msg_id, \
626 MESA_DEBUG_SOURCE_API, \
627 MESA_DEBUG_TYPE_OTHER, \
628 MESA_DEBUG_SEVERITY_MEDIUM, \
632 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; \
636 * Do additional "completeness" testing of a framebuffer object.
639 intel_validate_framebuffer(struct gl_context
*ctx
, struct gl_framebuffer
*fb
)
641 struct brw_context
*brw
= brw_context(ctx
);
642 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
643 struct intel_renderbuffer
*depthRb
=
644 intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
645 struct intel_renderbuffer
*stencilRb
=
646 intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
647 struct intel_mipmap_tree
*depth_mt
= NULL
, *stencil_mt
= NULL
;
650 DBG("%s() on fb %p (%s)\n", __func__
,
651 fb
, (fb
== ctx
->DrawBuffer
? "drawbuffer" :
652 (fb
== ctx
->ReadBuffer
? "readbuffer" : "other buffer")));
655 depth_mt
= depthRb
->mt
;
657 stencil_mt
= stencilRb
->mt
;
658 if (stencil_mt
->stencil_mt
)
659 stencil_mt
= stencil_mt
->stencil_mt
;
662 if (depth_mt
&& stencil_mt
) {
663 if (devinfo
->gen
>= 6) {
664 const unsigned d_width
= depth_mt
->surf
.phys_level0_sa
.width
;
665 const unsigned d_height
= depth_mt
->surf
.phys_level0_sa
.height
;
666 const unsigned d_depth
= depth_mt
->surf
.dim
== ISL_SURF_DIM_3D
?
667 depth_mt
->surf
.phys_level0_sa
.depth
:
668 depth_mt
->surf
.phys_level0_sa
.array_len
;
670 const unsigned s_width
= stencil_mt
->surf
.phys_level0_sa
.width
;
671 const unsigned s_height
= stencil_mt
->surf
.phys_level0_sa
.height
;
672 const unsigned s_depth
= stencil_mt
->surf
.dim
== ISL_SURF_DIM_3D
?
673 stencil_mt
->surf
.phys_level0_sa
.depth
:
674 stencil_mt
->surf
.phys_level0_sa
.array_len
;
676 /* For gen >= 6, we are using the lod/minimum-array-element fields
677 * and supporting layered rendering. This means that we must restrict
678 * the depth & stencil attachments to match in various more retrictive
679 * ways. (width, height, depth, LOD and layer)
681 if (d_width
!= s_width
||
682 d_height
!= s_height
||
683 d_depth
!= s_depth
||
684 depthRb
->mt_level
!= stencilRb
->mt_level
||
685 depthRb
->mt_layer
!= stencilRb
->mt_layer
) {
687 "FBO incomplete: depth and stencil must match in"
688 "width, height, depth, LOD and layer\n");
691 if (depth_mt
== stencil_mt
) {
692 /* For true packed depth/stencil (not faked on prefers-separate-stencil
693 * hardware) we need to be sure they're the same level/layer, since
694 * we'll be emitting a single packet describing the packed setup.
696 if (depthRb
->mt_level
!= stencilRb
->mt_level
||
697 depthRb
->mt_layer
!= stencilRb
->mt_layer
) {
699 "FBO incomplete: depth image level/layer %d/%d != "
700 "stencil image %d/%d\n",
704 stencilRb
->mt_layer
);
707 if (!brw
->has_separate_stencil
) {
708 fbo_incomplete(fb
, "FBO incomplete: separate stencil "
711 if (stencil_mt
->format
!= MESA_FORMAT_S_UINT8
) {
712 fbo_incomplete(fb
, "FBO incomplete: separate stencil is %s "
714 _mesa_get_format_name(stencil_mt
->format
));
716 if (devinfo
->gen
< 7 && !intel_renderbuffer_has_hiz(depthRb
)) {
717 /* Before Gen7, separate depth and stencil buffers can be used
718 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
719 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
720 * [DevSNB]: This field must be set to the same value (enabled
721 * or disabled) as Hierarchical Depth Buffer Enable.
723 fbo_incomplete(fb
, "FBO incomplete: separate stencil "
729 for (i
= 0; i
< ARRAY_SIZE(fb
->Attachment
); i
++) {
730 struct gl_renderbuffer
*rb
;
731 struct intel_renderbuffer
*irb
;
733 if (fb
->Attachment
[i
].Type
== GL_NONE
)
736 /* A supported attachment will have a Renderbuffer set either
737 * from being a Renderbuffer or being a texture that got the
738 * intel_wrap_texture() treatment.
740 rb
= fb
->Attachment
[i
].Renderbuffer
;
742 fbo_incomplete(fb
, "FBO incomplete: attachment without "
747 if (fb
->Attachment
[i
].Type
== GL_TEXTURE
) {
748 if (rb
->TexImage
->Border
) {
749 fbo_incomplete(fb
, "FBO incomplete: texture with border\n");
754 irb
= intel_renderbuffer(rb
);
756 fbo_incomplete(fb
, "FBO incomplete: software rendering "
761 if (!brw_render_target_supported(brw
, rb
)) {
762 fbo_incomplete(fb
, "FBO incomplete: Unsupported HW "
763 "texture/renderbuffer format attached: %s\n",
764 _mesa_get_format_name(intel_rb_format(irb
)));
770 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
771 * We can do this when the dst renderbuffer is actually a texture and
772 * there is no scaling, mirroring or scissoring.
774 * \return new buffer mask indicating the buffers left to blit using the
778 intel_blit_framebuffer_with_blitter(struct gl_context
*ctx
,
779 const struct gl_framebuffer
*readFb
,
780 const struct gl_framebuffer
*drawFb
,
781 GLint srcX0
, GLint srcY0
,
782 GLint srcX1
, GLint srcY1
,
783 GLint dstX0
, GLint dstY0
,
784 GLint dstX1
, GLint dstY1
,
787 struct brw_context
*brw
= brw_context(ctx
);
789 /* Sync up the state of window system buffers. We need to do this before
790 * we go looking for the buffers.
792 intel_prepare_render(brw
);
794 if (mask
& GL_COLOR_BUFFER_BIT
) {
796 struct gl_renderbuffer
*src_rb
= readFb
->_ColorReadBuffer
;
797 struct intel_renderbuffer
*src_irb
= intel_renderbuffer(src_rb
);
800 perf_debug("glBlitFramebuffer(): missing src renderbuffer. "
801 "Falling back to software rendering.\n");
805 /* If the source and destination are the same size with no mirroring,
806 * the rectangles are within the size of the texture and there is no
807 * scissor, then we can probably use the blit engine.
809 if (!(srcX0
- srcX1
== dstX0
- dstX1
&&
810 srcY0
- srcY1
== dstY0
- dstY1
&&
813 srcX0
>= 0 && srcX1
<= readFb
->Width
&&
814 srcY0
>= 0 && srcY1
<= readFb
->Height
&&
815 dstX0
>= 0 && dstX1
<= drawFb
->Width
&&
816 dstY0
>= 0 && dstY1
<= drawFb
->Height
&&
817 !(ctx
->Scissor
.EnableFlags
))) {
818 perf_debug("glBlitFramebuffer(): non-1:1 blit. "
819 "Falling back to software rendering.\n");
823 /* Blit to all active draw buffers. We don't do any pre-checking,
824 * because we assume that copying to MRTs is rare, and failure midway
825 * through copying is even more rare. Even if it was to occur, it's
826 * safe to let meta start the copy over from scratch, because
827 * glBlitFramebuffer completely overwrites the destination pixels, and
828 * results are undefined if any destination pixels have a dependency on
831 for (i
= 0; i
< drawFb
->_NumColorDrawBuffers
; i
++) {
832 struct gl_renderbuffer
*dst_rb
= drawFb
->_ColorDrawBuffers
[i
];
833 struct intel_renderbuffer
*dst_irb
= intel_renderbuffer(dst_rb
);
836 perf_debug("glBlitFramebuffer(): missing dst renderbuffer. "
837 "Falling back to software rendering.\n");
841 if (ctx
->Color
.sRGBEnabled
&&
842 _mesa_get_format_color_encoding(src_irb
->mt
->format
) !=
843 _mesa_get_format_color_encoding(dst_irb
->mt
->format
)) {
844 perf_debug("glBlitFramebuffer() with sRGB conversion cannot be "
845 "handled by BLT path.\n");
849 if (!intel_miptree_blit(brw
,
851 src_irb
->mt_level
, src_irb
->mt_layer
,
852 srcX0
, srcY0
, src_rb
->Name
== 0,
854 dst_irb
->mt_level
, dst_irb
->mt_layer
,
855 dstX0
, dstY0
, dst_rb
->Name
== 0,
856 dstX1
- dstX0
, dstY1
- dstY0
,
857 COLOR_LOGICOP_COPY
)) {
858 perf_debug("glBlitFramebuffer(): unknown blit failure. "
859 "Falling back to software rendering.\n");
864 mask
&= ~GL_COLOR_BUFFER_BIT
;
871 intel_blit_framebuffer(struct gl_context
*ctx
,
872 struct gl_framebuffer
*readFb
,
873 struct gl_framebuffer
*drawFb
,
874 GLint srcX0
, GLint srcY0
, GLint srcX1
, GLint srcY1
,
875 GLint dstX0
, GLint dstY0
, GLint dstX1
, GLint dstY1
,
876 GLbitfield mask
, GLenum filter
)
878 struct brw_context
*brw
= brw_context(ctx
);
879 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
881 /* Page 679 of OpenGL 4.4 spec says:
882 * "Added BlitFramebuffer to commands affected by conditional rendering in
883 * section 10.10 (Bug 9562)."
885 if (!_mesa_check_conditional_render(ctx
))
888 if (devinfo
->gen
< 6) {
889 /* On gen4-5, try BLT first.
891 * Gen4-5 have a single ring for both 3D and BLT operations, so there's
892 * no inter-ring synchronization issues like on Gen6+. It is apparently
893 * faster than using the 3D pipeline. Original Gen4 also has to rebase
894 * and copy miptree slices in order to render to unaligned locations.
896 mask
= intel_blit_framebuffer_with_blitter(ctx
, readFb
, drawFb
,
897 srcX0
, srcY0
, srcX1
, srcY1
,
898 dstX0
, dstY0
, dstX1
, dstY1
,
904 mask
= brw_blorp_framebuffer(brw
, readFb
, drawFb
,
905 srcX0
, srcY0
, srcX1
, srcY1
,
906 dstX0
, dstY0
, dstX1
, dstY1
,
911 mask
= _mesa_meta_BlitFramebuffer(ctx
, readFb
, drawFb
,
912 srcX0
, srcY0
, srcX1
, srcY1
,
913 dstX0
, dstY0
, dstX1
, dstY1
,
918 if (devinfo
->gen
>= 8 && (mask
& GL_STENCIL_BUFFER_BIT
)) {
919 assert(!"Invalid blit");
922 _swrast_BlitFramebuffer(ctx
, readFb
, drawFb
,
923 srcX0
, srcY0
, srcX1
, srcY1
,
924 dstX0
, dstY0
, dstX1
, dstY1
,
929 * Does the renderbuffer have hiz enabled?
932 intel_renderbuffer_has_hiz(struct intel_renderbuffer
*irb
)
934 return intel_miptree_level_has_hiz(irb
->mt
, irb
->mt_level
);
938 intel_renderbuffer_move_to_temp(struct brw_context
*brw
,
939 struct intel_renderbuffer
*irb
,
942 struct gl_renderbuffer
*rb
=&irb
->Base
.Base
;
943 struct intel_texture_image
*intel_image
= intel_texture_image(rb
->TexImage
);
944 struct intel_mipmap_tree
*new_mt
;
945 int width
, height
, depth
;
947 intel_get_image_dims(rb
->TexImage
, &width
, &height
, &depth
);
949 assert(irb
->align_wa_mt
== NULL
);
950 new_mt
= intel_miptree_create(brw
, GL_TEXTURE_2D
,
951 intel_image
->base
.Base
.TexFormat
,
954 irb
->mt
->surf
.samples
,
955 MIPTREE_CREATE_BUSY
);
958 intel_miptree_copy_slice(brw
, intel_image
->mt
,
959 intel_image
->base
.Base
.Level
, irb
->mt_layer
,
962 intel_miptree_reference(&irb
->align_wa_mt
, new_mt
);
963 intel_miptree_release(&new_mt
);
970 brw_cache_sets_clear(struct brw_context
*brw
)
972 struct hash_entry
*render_entry
;
973 hash_table_foreach(brw
->render_cache
, render_entry
)
974 _mesa_hash_table_remove(brw
->render_cache
, render_entry
);
976 struct set_entry
*depth_entry
;
977 set_foreach(brw
->depth_cache
, depth_entry
)
978 _mesa_set_remove(brw
->depth_cache
, depth_entry
);
982 * Emits an appropriate flush for a BO if it has been rendered to within the
983 * same batchbuffer as a read that's about to be emitted.
985 * The GPU has separate, incoherent caches for the render cache and the
986 * sampler cache, along with other caches. Usually data in the different
987 * caches don't interact (e.g. we don't render to our driver-generated
988 * immediate constant data), but for render-to-texture in FBOs we definitely
989 * do. When a batchbuffer is flushed, the kernel will ensure that everything
990 * necessary is flushed before another use of that BO, but for reuse from
991 * different caches within a batchbuffer, it's all our responsibility.
994 flush_depth_and_render_caches(struct brw_context
*brw
, struct brw_bo
*bo
)
996 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
998 if (devinfo
->gen
>= 6) {
999 brw_emit_pipe_control_flush(brw
,
1000 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
1001 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
1002 PIPE_CONTROL_CS_STALL
);
1004 brw_emit_pipe_control_flush(brw
,
1005 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
1006 PIPE_CONTROL_CONST_CACHE_INVALIDATE
);
1008 brw_emit_mi_flush(brw
);
1011 brw_cache_sets_clear(brw
);
1015 brw_cache_flush_for_read(struct brw_context
*brw
, struct brw_bo
*bo
)
1017 if (_mesa_hash_table_search(brw
->render_cache
, bo
) ||
1018 _mesa_set_search(brw
->depth_cache
, bo
))
1019 flush_depth_and_render_caches(brw
, bo
);
1023 format_aux_tuple(enum isl_format format
, enum isl_aux_usage aux_usage
)
1025 return (void *)(uintptr_t)((uint32_t)format
<< 8 | aux_usage
);
1029 brw_cache_flush_for_render(struct brw_context
*brw
, struct brw_bo
*bo
,
1030 enum isl_format format
,
1031 enum isl_aux_usage aux_usage
)
1033 if (_mesa_set_search(brw
->depth_cache
, bo
))
1034 flush_depth_and_render_caches(brw
, bo
);
1036 /* Check to see if this bo has been used by a previous rendering operation
1037 * but with a different format or aux usage. If it has, flush the render
1038 * cache so we ensure that it's only in there with one format or aux usage
1041 * Even though it's not obvious, this can easily happen in practice.
1042 * Suppose a client is blending on a surface with sRGB encode enabled on
1043 * gen9. This implies that you get AUX_USAGE_CCS_D at best. If the client
1044 * then disables sRGB decode and continues blending we will flip on
1045 * AUX_USAGE_CCS_E without doing any sort of resolve in-between (this is
1046 * perfectly valid since CCS_E is a subset of CCS_D). However, this means
1047 * that we have fragments in-flight which are rendering with UNORM+CCS_E
1048 * and other fragments in-flight with SRGB+CCS_D on the same surface at the
1049 * same time and the pixel scoreboard and color blender are trying to sort
1050 * it all out. This ends badly (i.e. GPU hangs).
1052 * To date, we have never observed GPU hangs or even corruption to be
1053 * associated with switching the format, only the aux usage. However,
1054 * there are comments in various docs which indicate that the render cache
1055 * isn't 100% resilient to format changes. We may as well be conservative
1056 * and flush on format changes too. We can always relax this later if we
1057 * find it to be a performance problem.
1059 struct hash_entry
*entry
= _mesa_hash_table_search(brw
->render_cache
, bo
);
1060 if (entry
&& entry
->data
!= format_aux_tuple(format
, aux_usage
))
1061 flush_depth_and_render_caches(brw
, bo
);
1065 brw_render_cache_add_bo(struct brw_context
*brw
, struct brw_bo
*bo
,
1066 enum isl_format format
,
1067 enum isl_aux_usage aux_usage
)
1070 struct hash_entry
*entry
= _mesa_hash_table_search(brw
->render_cache
, bo
);
1072 /* Otherwise, someone didn't do a flush_for_render and that would be
1075 assert(entry
->data
== format_aux_tuple(format
, aux_usage
));
1079 _mesa_hash_table_insert(brw
->render_cache
, bo
,
1080 format_aux_tuple(format
, aux_usage
));
1084 brw_cache_flush_for_depth(struct brw_context
*brw
, struct brw_bo
*bo
)
1086 if (_mesa_hash_table_search(brw
->render_cache
, bo
))
1087 flush_depth_and_render_caches(brw
, bo
);
1091 brw_depth_cache_add_bo(struct brw_context
*brw
, struct brw_bo
*bo
)
1093 _mesa_set_add(brw
->depth_cache
, bo
);
1097 * Do one-time context initializations related to GL_EXT_framebuffer_object.
1098 * Hook in device driver functions.
1101 intel_fbo_init(struct brw_context
*brw
)
1103 struct dd_function_table
*dd
= &brw
->ctx
.Driver
;
1104 dd
->NewRenderbuffer
= intel_new_renderbuffer
;
1105 dd
->MapRenderbuffer
= intel_map_renderbuffer
;
1106 dd
->UnmapRenderbuffer
= intel_unmap_renderbuffer
;
1107 dd
->RenderTexture
= intel_render_texture
;
1108 dd
->ValidateFramebuffer
= intel_validate_framebuffer
;
1109 dd
->BlitFramebuffer
= intel_blit_framebuffer
;
1110 dd
->EGLImageTargetRenderbufferStorage
=
1111 intel_image_target_renderbuffer_storage
;
1113 brw
->render_cache
= _mesa_hash_table_create(brw
, _mesa_hash_pointer
,
1114 _mesa_key_pointer_equal
);
1115 brw
->depth_cache
= _mesa_set_create(brw
, _mesa_hash_pointer
,
1116 _mesa_key_pointer_equal
);