2 * Copyright 2006 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "main/enums.h"
27 #include "main/imports.h"
28 #include "main/macros.h"
29 #include "main/mtypes.h"
30 #include "main/fbobject.h"
31 #include "main/framebuffer.h"
32 #include "main/renderbuffer.h"
33 #include "main/context.h"
34 #include "main/teximage.h"
35 #include "main/image.h"
36 #include "main/condrender.h"
37 #include "util/hash_table.h"
40 #include "swrast/swrast.h"
41 #include "drivers/common/meta.h"
43 #include "intel_batchbuffer.h"
44 #include "intel_buffers.h"
45 #include "intel_blit.h"
46 #include "intel_fbo.h"
47 #include "intel_mipmap_tree.h"
48 #include "intel_image.h"
49 #include "intel_screen.h"
50 #include "intel_tex.h"
51 #include "brw_context.h"
52 #include "brw_defines.h"
54 #define FILE_DEBUG_FLAG DEBUG_FBO
56 /** Called by gl_renderbuffer::Delete() */
58 intel_delete_renderbuffer(struct gl_context
*ctx
, struct gl_renderbuffer
*rb
)
60 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
64 intel_miptree_release(&irb
->mt
);
65 intel_miptree_release(&irb
->singlesample_mt
);
67 _mesa_delete_renderbuffer(ctx
, rb
);
71 * \brief Downsample a winsys renderbuffer from mt to singlesample_mt.
73 * If the miptree needs no downsample, then skip.
76 intel_renderbuffer_downsample(struct brw_context
*brw
,
77 struct intel_renderbuffer
*irb
)
79 if (!irb
->need_downsample
)
81 intel_miptree_updownsample(brw
, irb
->mt
, irb
->singlesample_mt
);
82 irb
->need_downsample
= false;
86 * \brief Upsample a winsys renderbuffer from singlesample_mt to mt.
88 * The upsample is done unconditionally.
91 intel_renderbuffer_upsample(struct brw_context
*brw
,
92 struct intel_renderbuffer
*irb
)
94 assert(!irb
->need_downsample
);
96 intel_miptree_updownsample(brw
, irb
->singlesample_mt
, irb
->mt
);
100 * \see dd_function_table::MapRenderbuffer
103 intel_map_renderbuffer(struct gl_context
*ctx
,
104 struct gl_renderbuffer
*rb
,
105 GLuint x
, GLuint y
, GLuint w
, GLuint h
,
111 struct brw_context
*brw
= brw_context(ctx
);
112 struct swrast_renderbuffer
*srb
= (struct swrast_renderbuffer
*)rb
;
113 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
114 struct intel_mipmap_tree
*mt
;
119 /* this is a malloc'd renderbuffer (accum buffer), not an irb */
120 GLint bpp
= _mesa_get_format_bytes(rb
->Format
);
121 GLint rowStride
= srb
->RowStride
;
122 *out_map
= (GLubyte
*) srb
->Buffer
+ y
* rowStride
+ x
* bpp
;
123 *out_stride
= rowStride
;
127 intel_prepare_render(brw
);
129 /* The MapRenderbuffer API should always return a single-sampled mapping.
130 * The case we are asked to map multisampled RBs is in glReadPixels() (or
131 * swrast paths like glCopyTexImage()) from a window-system MSAA buffer,
132 * and GL expects an automatic resolve to happen.
134 * If it's a color miptree, there is a ->singlesample_mt which wraps the
135 * actual window system renderbuffer (which we may resolve to at any time),
136 * while the miptree itself is our driver-private allocation. If it's a
137 * depth or stencil miptree, we have a private MSAA buffer and no shared
138 * singlesample buffer, and since we don't expect anybody to ever actually
139 * resolve it, we just make a temporary singlesample buffer now when we
142 if (rb
->NumSamples
> 1) {
143 if (!irb
->singlesample_mt
) {
144 irb
->singlesample_mt
=
145 intel_miptree_create_for_renderbuffer(brw
, irb
->mt
->format
,
146 rb
->Width
, rb
->Height
,
148 if (!irb
->singlesample_mt
)
150 irb
->singlesample_mt_is_tmp
= true;
151 irb
->need_downsample
= true;
154 intel_renderbuffer_downsample(brw
, irb
);
155 mt
= irb
->singlesample_mt
;
157 irb
->need_map_upsample
= mode
& GL_MAP_WRITE_BIT
;
162 /* For a window-system renderbuffer, we need to flip the mapping we receive
163 * upside-down. So we need to ask for a rectangle on flipped vertically, and
164 * we then return a pointer to the bottom of it with a negative stride.
167 y
= rb
->Height
- y
- h
;
170 intel_miptree_map(brw
, mt
, irb
->mt_level
, irb
->mt_layer
,
171 x
, y
, w
, h
, mode
, &map
, &stride
);
174 map
+= (h
- 1) * stride
;
178 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%"PRIdPTR
"\n",
179 __func__
, rb
->Name
, _mesa_get_format_name(rb
->Format
),
180 x
, y
, w
, h
, map
, stride
);
183 *out_stride
= stride
;
192 * \see dd_function_table::UnmapRenderbuffer
195 intel_unmap_renderbuffer(struct gl_context
*ctx
,
196 struct gl_renderbuffer
*rb
)
198 struct brw_context
*brw
= brw_context(ctx
);
199 struct swrast_renderbuffer
*srb
= (struct swrast_renderbuffer
*)rb
;
200 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
201 struct intel_mipmap_tree
*mt
;
203 DBG("%s: rb %d (%s)\n", __func__
,
204 rb
->Name
, _mesa_get_format_name(rb
->Format
));
207 /* this is a malloc'd renderbuffer (accum buffer) */
212 if (rb
->NumSamples
> 1) {
213 mt
= irb
->singlesample_mt
;
218 intel_miptree_unmap(brw
, mt
, irb
->mt_level
, irb
->mt_layer
);
220 if (irb
->need_map_upsample
) {
221 intel_renderbuffer_upsample(brw
, irb
);
222 irb
->need_map_upsample
= false;
225 if (irb
->singlesample_mt_is_tmp
)
226 intel_miptree_release(&irb
->singlesample_mt
);
231 * Round up the requested multisample count to the next supported sample size.
234 intel_quantize_num_samples(struct intel_screen
*intel
, unsigned num_samples
)
236 const int *msaa_modes
= intel_supported_msaa_modes(intel
);
237 int quantized_samples
= 0;
239 for (int i
= 0; msaa_modes
[i
] != -1; ++i
) {
240 if (msaa_modes
[i
] >= num_samples
)
241 quantized_samples
= msaa_modes
[i
];
246 return quantized_samples
;
250 intel_renderbuffer_format(struct gl_context
* ctx
, GLenum internalFormat
)
252 struct brw_context
*brw
= brw_context(ctx
);
253 MAYBE_UNUSED
const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
255 switch (internalFormat
) {
257 /* Use the same format-choice logic as for textures.
258 * Renderbuffers aren't any different from textures for us,
259 * except they're less useful because you can't texture with
262 return ctx
->Driver
.ChooseTextureFormat(ctx
, GL_TEXTURE_2D
,
266 case GL_STENCIL_INDEX
:
267 case GL_STENCIL_INDEX1_EXT
:
268 case GL_STENCIL_INDEX4_EXT
:
269 case GL_STENCIL_INDEX8_EXT
:
270 case GL_STENCIL_INDEX16_EXT
:
271 /* These aren't actual texture formats, so force them here. */
272 if (brw
->has_separate_stencil
) {
273 return MESA_FORMAT_S_UINT8
;
275 assert(!devinfo
->must_use_separate_stencil
);
276 return MESA_FORMAT_Z24_UNORM_S8_UINT
;
282 intel_alloc_private_renderbuffer_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
283 GLenum internalFormat
,
284 GLuint width
, GLuint height
)
286 struct brw_context
*brw
= brw_context(ctx
);
287 struct intel_screen
*screen
= brw
->screen
;
288 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
290 assert(rb
->Format
!= MESA_FORMAT_NONE
);
292 rb
->NumSamples
= intel_quantize_num_samples(screen
, rb
->NumSamples
);
295 rb
->_BaseFormat
= _mesa_get_format_base_format(rb
->Format
);
297 intel_miptree_release(&irb
->mt
);
299 DBG("%s: %s: %s (%dx%d)\n", __func__
,
300 _mesa_enum_to_string(internalFormat
),
301 _mesa_get_format_name(rb
->Format
), width
, height
);
303 if (width
== 0 || height
== 0)
306 irb
->mt
= intel_miptree_create_for_renderbuffer(brw
, rb
->Format
,
308 MAX2(rb
->NumSamples
, 1));
312 irb
->layer_count
= 1;
318 * Called via glRenderbufferStorageEXT() to set the format and allocate
319 * storage for a user-created renderbuffer.
322 intel_alloc_renderbuffer_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
323 GLenum internalFormat
,
324 GLuint width
, GLuint height
)
326 rb
->Format
= intel_renderbuffer_format(ctx
, internalFormat
);
327 return intel_alloc_private_renderbuffer_storage(ctx
, rb
, internalFormat
, width
, height
);
331 intel_image_target_renderbuffer_storage(struct gl_context
*ctx
,
332 struct gl_renderbuffer
*rb
,
335 struct brw_context
*brw
= brw_context(ctx
);
336 struct intel_renderbuffer
*irb
;
337 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
340 image
= dri_screen
->dri2
.image
->lookupEGLImage(dri_screen
, image_handle
,
341 dri_screen
->loaderPrivate
);
345 if (image
->planar_format
&& image
->planar_format
->nplanes
> 1) {
346 _mesa_error(ctx
, GL_INVALID_OPERATION
,
347 "glEGLImageTargetRenderbufferStorage(planar buffers are not "
348 "supported as render targets.)");
352 /* __DRIimage is opaque to the core so it has to be checked here */
353 if (!brw
->mesa_format_supports_render
[image
->format
]) {
354 _mesa_error(ctx
, GL_INVALID_OPERATION
,
355 "glEGLImageTargetRenderbufferStorage(unsupported image format)");
359 irb
= intel_renderbuffer(rb
);
360 intel_miptree_release(&irb
->mt
);
362 /* Disable creation of the miptree's aux buffers because the driver exposes
363 * no EGL API to manage them. That is, there is no API for resolving the aux
364 * buffer's content to the main buffer nor for invalidating the aux buffer's
367 irb
->mt
= intel_miptree_create_for_dri_image(brw
, image
, GL_TEXTURE_2D
,
368 image
->format
, false);
372 rb
->InternalFormat
= image
->internal_format
;
373 rb
->Width
= image
->width
;
374 rb
->Height
= image
->height
;
375 rb
->Format
= image
->format
;
376 rb
->_BaseFormat
= _mesa_get_format_base_format(image
->format
);
377 rb
->NeedsFinishRenderTexture
= true;
378 irb
->layer_count
= 1;
382 * Called by _mesa_resize_framebuffer() for each hardware renderbuffer when a
383 * window system framebuffer is resized.
385 * Any actual buffer reallocations for hardware renderbuffers (which would
386 * have triggered _mesa_resize_framebuffer()) were done by
387 * intel_process_dri2_buffer().
390 intel_alloc_window_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
391 GLenum internalFormat
, GLuint width
, GLuint height
)
394 assert(rb
->Name
== 0);
397 rb
->InternalFormat
= internalFormat
;
402 /** Dummy function for gl_renderbuffer::AllocStorage() */
404 intel_nop_alloc_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
405 GLenum internalFormat
, GLuint width
, GLuint height
)
408 (void) internalFormat
;
411 _mesa_problem(ctx
, "intel_nop_alloc_storage should never be called.");
416 * Create an intel_renderbuffer for a __DRIdrawable. This function is
417 * unrelated to GL renderbuffers (that is, those created by
418 * glGenRenderbuffers).
420 * \param num_samples must be quantized.
422 struct intel_renderbuffer
*
423 intel_create_winsys_renderbuffer(struct intel_screen
*screen
,
424 mesa_format format
, unsigned num_samples
)
426 struct intel_renderbuffer
*irb
= CALLOC_STRUCT(intel_renderbuffer
);
430 struct gl_renderbuffer
*rb
= &irb
->Base
.Base
;
431 irb
->layer_count
= 1;
433 _mesa_init_renderbuffer(rb
, 0);
434 rb
->ClassID
= INTEL_RB_CLASS
;
435 rb
->NumSamples
= num_samples
;
437 /* The base format and internal format must be derived from the user-visible
438 * format (that is, the gl_config's format), even if we internally use
439 * choose a different format for the renderbuffer. Otherwise, rendering may
440 * use incorrect channel write masks.
442 rb
->_BaseFormat
= _mesa_get_format_base_format(format
);
443 rb
->InternalFormat
= rb
->_BaseFormat
;
446 if (!screen
->mesa_format_supports_render
[rb
->Format
]) {
447 /* The glRenderbufferStorage paths in core Mesa detect if the driver
448 * does not support the user-requested format, and then searches for
449 * a falback format. The DRI code bypasses core Mesa, though. So we do
450 * the fallbacks here.
452 * We must support MESA_FORMAT_R8G8B8X8 on Android because the Android
453 * framework requires HAL_PIXEL_FORMAT_RGBX8888 winsys surfaces.
455 rb
->Format
= _mesa_format_fallback_rgbx_to_rgba(rb
->Format
);
456 assert(screen
->mesa_format_supports_render
[rb
->Format
]);
459 /* intel-specific methods */
460 rb
->Delete
= intel_delete_renderbuffer
;
461 rb
->AllocStorage
= intel_alloc_window_storage
;
467 * Private window-system buffers (as opposed to ones shared with the display
468 * server created with intel_create_winsys_renderbuffer()) are most similar in their
469 * handling to user-created renderbuffers, but they have a resize handler that
470 * may be called at intel_update_renderbuffers() time.
472 * \param num_samples must be quantized.
474 struct intel_renderbuffer
*
475 intel_create_private_renderbuffer(struct intel_screen
*screen
,
476 mesa_format format
, unsigned num_samples
)
478 struct intel_renderbuffer
*irb
;
480 irb
= intel_create_winsys_renderbuffer(screen
, format
, num_samples
);
481 irb
->Base
.Base
.AllocStorage
= intel_alloc_private_renderbuffer_storage
;
487 * Create a new renderbuffer object.
488 * Typically called via glBindRenderbufferEXT().
490 static struct gl_renderbuffer
*
491 intel_new_renderbuffer(struct gl_context
* ctx
, GLuint name
)
493 struct intel_renderbuffer
*irb
;
494 struct gl_renderbuffer
*rb
;
496 irb
= CALLOC_STRUCT(intel_renderbuffer
);
498 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "creating renderbuffer");
502 rb
= &irb
->Base
.Base
;
504 _mesa_init_renderbuffer(rb
, name
);
505 rb
->ClassID
= INTEL_RB_CLASS
;
507 /* intel-specific methods */
508 rb
->Delete
= intel_delete_renderbuffer
;
509 rb
->AllocStorage
= intel_alloc_renderbuffer_storage
;
510 /* span routines set in alloc_storage function */
516 intel_renderbuffer_update_wrapper(struct brw_context
*brw
,
517 struct intel_renderbuffer
*irb
,
518 struct gl_texture_image
*image
,
522 struct gl_renderbuffer
*rb
= &irb
->Base
.Base
;
523 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
524 struct intel_mipmap_tree
*mt
= intel_image
->mt
;
525 int level
= image
->Level
;
527 rb
->AllocStorage
= intel_nop_alloc_storage
;
529 /* adjust for texture view parameters */
530 layer
+= image
->TexObject
->MinLayer
;
531 level
+= image
->TexObject
->MinLevel
;
533 intel_miptree_check_level_layer(mt
, level
, layer
);
534 irb
->mt_level
= level
;
535 irb
->mt_layer
= layer
;
538 irb
->layer_count
= 1;
539 } else if (mt
->target
!= GL_TEXTURE_3D
&& image
->TexObject
->NumLayers
> 0) {
540 irb
->layer_count
= image
->TexObject
->NumLayers
;
542 irb
->layer_count
= mt
->surf
.dim
== ISL_SURF_DIM_3D
?
543 minify(mt
->surf
.logical_level0_px
.depth
, level
) :
544 mt
->surf
.logical_level0_px
.array_len
;
547 intel_miptree_reference(&irb
->mt
, mt
);
549 intel_renderbuffer_set_draw_offset(irb
);
555 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer
*irb
)
557 unsigned int dst_x
, dst_y
;
559 /* compute offset of the particular 2D image within the texture region */
560 intel_miptree_get_image_offset(irb
->mt
,
570 * Called by glFramebufferTexture[123]DEXT() (and other places) to
571 * prepare for rendering into texture memory. This might be called
572 * many times to choose different texture levels, cube faces, etc
573 * before intel_finish_render_texture() is ever called.
576 intel_render_texture(struct gl_context
* ctx
,
577 struct gl_framebuffer
*fb
,
578 struct gl_renderbuffer_attachment
*att
)
580 struct brw_context
*brw
= brw_context(ctx
);
581 struct gl_renderbuffer
*rb
= att
->Renderbuffer
;
582 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
583 struct gl_texture_image
*image
= rb
->TexImage
;
584 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
585 struct intel_mipmap_tree
*mt
= intel_image
->mt
;
590 if (att
->CubeMapFace
> 0) {
591 assert(att
->Zoffset
== 0);
592 layer
= att
->CubeMapFace
;
594 layer
= att
->Zoffset
;
597 if (!intel_image
->mt
) {
598 /* Fallback on drawing to a texture that doesn't have a miptree
599 * (has a border, width/height 0, etc.)
601 _swrast_render_texture(ctx
, fb
, att
);
605 intel_miptree_check_level_layer(mt
, att
->TextureLevel
, layer
);
607 if (!intel_renderbuffer_update_wrapper(brw
, irb
, image
, layer
, att
->Layered
)) {
608 _swrast_render_texture(ctx
, fb
, att
);
612 DBG("Begin render %s texture tex=%u w=%d h=%d d=%d refcount=%d\n",
613 _mesa_get_format_name(image
->TexFormat
),
614 att
->Texture
->Name
, image
->Width
, image
->Height
, image
->Depth
,
619 #define fbo_incomplete(fb, ...) do { \
620 static GLuint msg_id = 0; \
621 if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \
622 _mesa_gl_debug(ctx, &msg_id, \
623 MESA_DEBUG_SOURCE_API, \
624 MESA_DEBUG_TYPE_OTHER, \
625 MESA_DEBUG_SEVERITY_MEDIUM, \
629 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; \
633 * Do additional "completeness" testing of a framebuffer object.
636 intel_validate_framebuffer(struct gl_context
*ctx
, struct gl_framebuffer
*fb
)
638 struct brw_context
*brw
= brw_context(ctx
);
639 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
640 struct intel_renderbuffer
*depthRb
=
641 intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
642 struct intel_renderbuffer
*stencilRb
=
643 intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
644 struct intel_mipmap_tree
*depth_mt
= NULL
, *stencil_mt
= NULL
;
647 DBG("%s() on fb %p (%s)\n", __func__
,
648 fb
, (fb
== ctx
->DrawBuffer
? "drawbuffer" :
649 (fb
== ctx
->ReadBuffer
? "readbuffer" : "other buffer")));
652 depth_mt
= depthRb
->mt
;
654 stencil_mt
= stencilRb
->mt
;
655 if (stencil_mt
->stencil_mt
)
656 stencil_mt
= stencil_mt
->stencil_mt
;
659 if (depth_mt
&& stencil_mt
) {
660 if (devinfo
->gen
>= 6) {
661 const unsigned d_width
= depth_mt
->surf
.phys_level0_sa
.width
;
662 const unsigned d_height
= depth_mt
->surf
.phys_level0_sa
.height
;
663 const unsigned d_depth
= depth_mt
->surf
.dim
== ISL_SURF_DIM_3D
?
664 depth_mt
->surf
.phys_level0_sa
.depth
:
665 depth_mt
->surf
.phys_level0_sa
.array_len
;
667 const unsigned s_width
= stencil_mt
->surf
.phys_level0_sa
.width
;
668 const unsigned s_height
= stencil_mt
->surf
.phys_level0_sa
.height
;
669 const unsigned s_depth
= stencil_mt
->surf
.dim
== ISL_SURF_DIM_3D
?
670 stencil_mt
->surf
.phys_level0_sa
.depth
:
671 stencil_mt
->surf
.phys_level0_sa
.array_len
;
673 /* For gen >= 6, we are using the lod/minimum-array-element fields
674 * and supporting layered rendering. This means that we must restrict
675 * the depth & stencil attachments to match in various more retrictive
676 * ways. (width, height, depth, LOD and layer)
678 if (d_width
!= s_width
||
679 d_height
!= s_height
||
680 d_depth
!= s_depth
||
681 depthRb
->mt_level
!= stencilRb
->mt_level
||
682 depthRb
->mt_layer
!= stencilRb
->mt_layer
) {
684 "FBO incomplete: depth and stencil must match in"
685 "width, height, depth, LOD and layer\n");
688 if (depth_mt
== stencil_mt
) {
689 /* For true packed depth/stencil (not faked on prefers-separate-stencil
690 * hardware) we need to be sure they're the same level/layer, since
691 * we'll be emitting a single packet describing the packed setup.
693 if (depthRb
->mt_level
!= stencilRb
->mt_level
||
694 depthRb
->mt_layer
!= stencilRb
->mt_layer
) {
696 "FBO incomplete: depth image level/layer %d/%d != "
697 "stencil image %d/%d\n",
701 stencilRb
->mt_layer
);
704 if (!brw
->has_separate_stencil
) {
705 fbo_incomplete(fb
, "FBO incomplete: separate stencil "
708 if (stencil_mt
->format
!= MESA_FORMAT_S_UINT8
) {
709 fbo_incomplete(fb
, "FBO incomplete: separate stencil is %s "
711 _mesa_get_format_name(stencil_mt
->format
));
713 if (devinfo
->gen
< 7 && !intel_renderbuffer_has_hiz(depthRb
)) {
714 /* Before Gen7, separate depth and stencil buffers can be used
715 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
716 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
717 * [DevSNB]: This field must be set to the same value (enabled
718 * or disabled) as Hierarchical Depth Buffer Enable.
720 fbo_incomplete(fb
, "FBO incomplete: separate stencil "
726 for (i
= 0; i
< ARRAY_SIZE(fb
->Attachment
); i
++) {
727 struct gl_renderbuffer
*rb
;
728 struct intel_renderbuffer
*irb
;
730 if (fb
->Attachment
[i
].Type
== GL_NONE
)
733 /* A supported attachment will have a Renderbuffer set either
734 * from being a Renderbuffer or being a texture that got the
735 * intel_wrap_texture() treatment.
737 rb
= fb
->Attachment
[i
].Renderbuffer
;
739 fbo_incomplete(fb
, "FBO incomplete: attachment without "
744 if (fb
->Attachment
[i
].Type
== GL_TEXTURE
) {
745 if (rb
->TexImage
->Border
) {
746 fbo_incomplete(fb
, "FBO incomplete: texture with border\n");
751 irb
= intel_renderbuffer(rb
);
753 fbo_incomplete(fb
, "FBO incomplete: software rendering "
758 if (!brw_render_target_supported(brw
, rb
)) {
759 fbo_incomplete(fb
, "FBO incomplete: Unsupported HW "
760 "texture/renderbuffer format attached: %s\n",
761 _mesa_get_format_name(intel_rb_format(irb
)));
767 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
768 * We can do this when the dst renderbuffer is actually a texture and
769 * there is no scaling, mirroring or scissoring.
771 * \return new buffer mask indicating the buffers left to blit using the
775 intel_blit_framebuffer_with_blitter(struct gl_context
*ctx
,
776 const struct gl_framebuffer
*readFb
,
777 const struct gl_framebuffer
*drawFb
,
778 GLint srcX0
, GLint srcY0
,
779 GLint srcX1
, GLint srcY1
,
780 GLint dstX0
, GLint dstY0
,
781 GLint dstX1
, GLint dstY1
,
784 struct brw_context
*brw
= brw_context(ctx
);
786 /* Sync up the state of window system buffers. We need to do this before
787 * we go looking for the buffers.
789 intel_prepare_render(brw
);
791 if (mask
& GL_COLOR_BUFFER_BIT
) {
793 struct gl_renderbuffer
*src_rb
= readFb
->_ColorReadBuffer
;
794 struct intel_renderbuffer
*src_irb
= intel_renderbuffer(src_rb
);
797 perf_debug("glBlitFramebuffer(): missing src renderbuffer. "
798 "Falling back to software rendering.\n");
802 /* If the source and destination are the same size with no mirroring,
803 * the rectangles are within the size of the texture and there is no
804 * scissor, then we can probably use the blit engine.
806 if (!(srcX0
- srcX1
== dstX0
- dstX1
&&
807 srcY0
- srcY1
== dstY0
- dstY1
&&
810 srcX0
>= 0 && srcX1
<= readFb
->Width
&&
811 srcY0
>= 0 && srcY1
<= readFb
->Height
&&
812 dstX0
>= 0 && dstX1
<= drawFb
->Width
&&
813 dstY0
>= 0 && dstY1
<= drawFb
->Height
&&
814 !(ctx
->Scissor
.EnableFlags
))) {
815 perf_debug("glBlitFramebuffer(): non-1:1 blit. "
816 "Falling back to software rendering.\n");
820 /* Blit to all active draw buffers. We don't do any pre-checking,
821 * because we assume that copying to MRTs is rare, and failure midway
822 * through copying is even more rare. Even if it was to occur, it's
823 * safe to let meta start the copy over from scratch, because
824 * glBlitFramebuffer completely overwrites the destination pixels, and
825 * results are undefined if any destination pixels have a dependency on
828 for (i
= 0; i
< drawFb
->_NumColorDrawBuffers
; i
++) {
829 struct gl_renderbuffer
*dst_rb
= drawFb
->_ColorDrawBuffers
[i
];
830 struct intel_renderbuffer
*dst_irb
= intel_renderbuffer(dst_rb
);
833 perf_debug("glBlitFramebuffer(): missing dst renderbuffer. "
834 "Falling back to software rendering.\n");
838 if (ctx
->Color
.sRGBEnabled
&&
839 _mesa_get_format_color_encoding(src_irb
->mt
->format
) !=
840 _mesa_get_format_color_encoding(dst_irb
->mt
->format
)) {
841 perf_debug("glBlitFramebuffer() with sRGB conversion cannot be "
842 "handled by BLT path.\n");
846 if (!intel_miptree_blit(brw
,
848 src_irb
->mt_level
, src_irb
->mt_layer
,
849 srcX0
, srcY0
, readFb
->FlipY
,
851 dst_irb
->mt_level
, dst_irb
->mt_layer
,
852 dstX0
, dstY0
, drawFb
->FlipY
,
853 dstX1
- dstX0
, dstY1
- dstY0
,
854 COLOR_LOGICOP_COPY
)) {
855 perf_debug("glBlitFramebuffer(): unknown blit failure. "
856 "Falling back to software rendering.\n");
861 mask
&= ~GL_COLOR_BUFFER_BIT
;
868 intel_blit_framebuffer(struct gl_context
*ctx
,
869 struct gl_framebuffer
*readFb
,
870 struct gl_framebuffer
*drawFb
,
871 GLint srcX0
, GLint srcY0
, GLint srcX1
, GLint srcY1
,
872 GLint dstX0
, GLint dstY0
, GLint dstX1
, GLint dstY1
,
873 GLbitfield mask
, GLenum filter
)
875 struct brw_context
*brw
= brw_context(ctx
);
876 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
878 /* Page 679 of OpenGL 4.4 spec says:
879 * "Added BlitFramebuffer to commands affected by conditional rendering in
880 * section 10.10 (Bug 9562)."
882 if (!_mesa_check_conditional_render(ctx
))
885 if (devinfo
->gen
< 6) {
886 /* On gen4-5, try BLT first.
888 * Gen4-5 have a single ring for both 3D and BLT operations, so there's
889 * no inter-ring synchronization issues like on Gen6+. It is apparently
890 * faster than using the 3D pipeline. Original Gen4 also has to rebase
891 * and copy miptree slices in order to render to unaligned locations.
893 mask
= intel_blit_framebuffer_with_blitter(ctx
, readFb
, drawFb
,
894 srcX0
, srcY0
, srcX1
, srcY1
,
895 dstX0
, dstY0
, dstX1
, dstY1
,
901 mask
= brw_blorp_framebuffer(brw
, readFb
, drawFb
,
902 srcX0
, srcY0
, srcX1
, srcY1
,
903 dstX0
, dstY0
, dstX1
, dstY1
,
908 mask
= _mesa_meta_BlitFramebuffer(ctx
, readFb
, drawFb
,
909 srcX0
, srcY0
, srcX1
, srcY1
,
910 dstX0
, dstY0
, dstX1
, dstY1
,
915 if (devinfo
->gen
>= 8 && (mask
& GL_STENCIL_BUFFER_BIT
)) {
916 assert(!"Invalid blit");
919 _swrast_BlitFramebuffer(ctx
, readFb
, drawFb
,
920 srcX0
, srcY0
, srcX1
, srcY1
,
921 dstX0
, dstY0
, dstX1
, dstY1
,
926 * Does the renderbuffer have hiz enabled?
929 intel_renderbuffer_has_hiz(struct intel_renderbuffer
*irb
)
931 return intel_miptree_level_has_hiz(irb
->mt
, irb
->mt_level
);
935 intel_renderbuffer_move_to_temp(struct brw_context
*brw
,
936 struct intel_renderbuffer
*irb
,
939 struct gl_renderbuffer
*rb
=&irb
->Base
.Base
;
940 struct intel_texture_image
*intel_image
= intel_texture_image(rb
->TexImage
);
941 struct intel_mipmap_tree
*new_mt
;
942 int width
, height
, depth
;
944 intel_get_image_dims(rb
->TexImage
, &width
, &height
, &depth
);
946 assert(irb
->align_wa_mt
== NULL
);
947 new_mt
= intel_miptree_create(brw
, GL_TEXTURE_2D
,
948 intel_image
->base
.Base
.TexFormat
,
951 irb
->mt
->surf
.samples
,
952 MIPTREE_CREATE_BUSY
);
955 intel_miptree_copy_slice(brw
, intel_image
->mt
,
956 intel_image
->base
.Base
.Level
, irb
->mt_layer
,
959 intel_miptree_reference(&irb
->align_wa_mt
, new_mt
);
960 intel_miptree_release(&new_mt
);
967 brw_cache_sets_clear(struct brw_context
*brw
)
969 struct hash_entry
*render_entry
;
970 hash_table_foreach(brw
->render_cache
, render_entry
)
971 _mesa_hash_table_remove(brw
->render_cache
, render_entry
);
973 struct set_entry
*depth_entry
;
974 set_foreach(brw
->depth_cache
, depth_entry
)
975 _mesa_set_remove(brw
->depth_cache
, depth_entry
);
979 * Emits an appropriate flush for a BO if it has been rendered to within the
980 * same batchbuffer as a read that's about to be emitted.
982 * The GPU has separate, incoherent caches for the render cache and the
983 * sampler cache, along with other caches. Usually data in the different
984 * caches don't interact (e.g. we don't render to our driver-generated
985 * immediate constant data), but for render-to-texture in FBOs we definitely
986 * do. When a batchbuffer is flushed, the kernel will ensure that everything
987 * necessary is flushed before another use of that BO, but for reuse from
988 * different caches within a batchbuffer, it's all our responsibility.
991 flush_depth_and_render_caches(struct brw_context
*brw
, struct brw_bo
*bo
)
993 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
995 if (devinfo
->gen
>= 6) {
996 brw_emit_pipe_control_flush(brw
,
997 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
998 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
999 PIPE_CONTROL_CS_STALL
);
1001 brw_emit_pipe_control_flush(brw
,
1002 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
1003 PIPE_CONTROL_CONST_CACHE_INVALIDATE
);
1005 brw_emit_mi_flush(brw
);
1008 brw_cache_sets_clear(brw
);
1012 brw_cache_flush_for_read(struct brw_context
*brw
, struct brw_bo
*bo
)
1014 if (_mesa_hash_table_search(brw
->render_cache
, bo
) ||
1015 _mesa_set_search(brw
->depth_cache
, bo
))
1016 flush_depth_and_render_caches(brw
, bo
);
1020 format_aux_tuple(enum isl_format format
, enum isl_aux_usage aux_usage
)
1022 return (void *)(uintptr_t)((uint32_t)format
<< 8 | aux_usage
);
1026 brw_cache_flush_for_render(struct brw_context
*brw
, struct brw_bo
*bo
,
1027 enum isl_format format
,
1028 enum isl_aux_usage aux_usage
)
1030 if (_mesa_set_search(brw
->depth_cache
, bo
))
1031 flush_depth_and_render_caches(brw
, bo
);
1033 /* Check to see if this bo has been used by a previous rendering operation
1034 * but with a different format or aux usage. If it has, flush the render
1035 * cache so we ensure that it's only in there with one format or aux usage
1038 * Even though it's not obvious, this can easily happen in practice.
1039 * Suppose a client is blending on a surface with sRGB encode enabled on
1040 * gen9. This implies that you get AUX_USAGE_CCS_D at best. If the client
1041 * then disables sRGB decode and continues blending we will flip on
1042 * AUX_USAGE_CCS_E without doing any sort of resolve in-between (this is
1043 * perfectly valid since CCS_E is a subset of CCS_D). However, this means
1044 * that we have fragments in-flight which are rendering with UNORM+CCS_E
1045 * and other fragments in-flight with SRGB+CCS_D on the same surface at the
1046 * same time and the pixel scoreboard and color blender are trying to sort
1047 * it all out. This ends badly (i.e. GPU hangs).
1049 * To date, we have never observed GPU hangs or even corruption to be
1050 * associated with switching the format, only the aux usage. However,
1051 * there are comments in various docs which indicate that the render cache
1052 * isn't 100% resilient to format changes. We may as well be conservative
1053 * and flush on format changes too. We can always relax this later if we
1054 * find it to be a performance problem.
1056 struct hash_entry
*entry
= _mesa_hash_table_search(brw
->render_cache
, bo
);
1057 if (entry
&& entry
->data
!= format_aux_tuple(format
, aux_usage
))
1058 flush_depth_and_render_caches(brw
, bo
);
1062 brw_render_cache_add_bo(struct brw_context
*brw
, struct brw_bo
*bo
,
1063 enum isl_format format
,
1064 enum isl_aux_usage aux_usage
)
1067 struct hash_entry
*entry
= _mesa_hash_table_search(brw
->render_cache
, bo
);
1069 /* Otherwise, someone didn't do a flush_for_render and that would be
1072 assert(entry
->data
== format_aux_tuple(format
, aux_usage
));
1076 _mesa_hash_table_insert(brw
->render_cache
, bo
,
1077 format_aux_tuple(format
, aux_usage
));
1081 brw_cache_flush_for_depth(struct brw_context
*brw
, struct brw_bo
*bo
)
1083 if (_mesa_hash_table_search(brw
->render_cache
, bo
))
1084 flush_depth_and_render_caches(brw
, bo
);
1088 brw_depth_cache_add_bo(struct brw_context
*brw
, struct brw_bo
*bo
)
1090 _mesa_set_add(brw
->depth_cache
, bo
);
1094 * Do one-time context initializations related to GL_EXT_framebuffer_object.
1095 * Hook in device driver functions.
1098 intel_fbo_init(struct brw_context
*brw
)
1100 struct dd_function_table
*dd
= &brw
->ctx
.Driver
;
1101 dd
->NewRenderbuffer
= intel_new_renderbuffer
;
1102 dd
->MapRenderbuffer
= intel_map_renderbuffer
;
1103 dd
->UnmapRenderbuffer
= intel_unmap_renderbuffer
;
1104 dd
->RenderTexture
= intel_render_texture
;
1105 dd
->ValidateFramebuffer
= intel_validate_framebuffer
;
1106 dd
->BlitFramebuffer
= intel_blit_framebuffer
;
1107 dd
->EGLImageTargetRenderbufferStorage
=
1108 intel_image_target_renderbuffer_storage
;
1110 brw
->render_cache
= _mesa_hash_table_create(brw
, _mesa_hash_pointer
,
1111 _mesa_key_pointer_equal
);
1112 brw
->depth_cache
= _mesa_set_create(brw
, _mesa_hash_pointer
,
1113 _mesa_key_pointer_equal
);