2 * Copyright 2006 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "main/enums.h"
27 #include "main/imports.h"
28 #include "main/macros.h"
29 #include "main/mtypes.h"
30 #include "main/fbobject.h"
31 #include "main/framebuffer.h"
32 #include "main/renderbuffer.h"
33 #include "main/context.h"
34 #include "main/teximage.h"
35 #include "main/image.h"
36 #include "main/condrender.h"
37 #include "util/hash_table.h"
40 #include "swrast/swrast.h"
41 #include "drivers/common/meta.h"
43 #include "intel_batchbuffer.h"
44 #include "intel_buffers.h"
45 #include "intel_blit.h"
46 #include "intel_fbo.h"
47 #include "intel_mipmap_tree.h"
48 #include "intel_image.h"
49 #include "intel_screen.h"
50 #include "intel_tex.h"
51 #include "brw_context.h"
52 #include "brw_defines.h"
54 #define FILE_DEBUG_FLAG DEBUG_FBO
56 /** Called by gl_renderbuffer::Delete() */
58 intel_delete_renderbuffer(struct gl_context
*ctx
, struct gl_renderbuffer
*rb
)
60 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
64 intel_miptree_release(&irb
->mt
);
65 intel_miptree_release(&irb
->singlesample_mt
);
67 _mesa_delete_renderbuffer(ctx
, rb
);
71 * \brief Downsample a winsys renderbuffer from mt to singlesample_mt.
73 * If the miptree needs no downsample, then skip.
76 intel_renderbuffer_downsample(struct brw_context
*brw
,
77 struct intel_renderbuffer
*irb
)
79 if (!irb
->need_downsample
)
81 intel_miptree_updownsample(brw
, irb
->mt
, irb
->singlesample_mt
);
82 irb
->need_downsample
= false;
86 * \brief Upsample a winsys renderbuffer from singlesample_mt to mt.
88 * The upsample is done unconditionally.
91 intel_renderbuffer_upsample(struct brw_context
*brw
,
92 struct intel_renderbuffer
*irb
)
94 assert(!irb
->need_downsample
);
96 intel_miptree_updownsample(brw
, irb
->singlesample_mt
, irb
->mt
);
100 * \see dd_function_table::MapRenderbuffer
103 intel_map_renderbuffer(struct gl_context
*ctx
,
104 struct gl_renderbuffer
*rb
,
105 GLuint x
, GLuint y
, GLuint w
, GLuint h
,
110 struct brw_context
*brw
= brw_context(ctx
);
111 struct swrast_renderbuffer
*srb
= (struct swrast_renderbuffer
*)rb
;
112 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
113 struct intel_mipmap_tree
*mt
;
118 /* this is a malloc'd renderbuffer (accum buffer), not an irb */
119 GLint bpp
= _mesa_get_format_bytes(rb
->Format
);
120 GLint rowStride
= srb
->RowStride
;
121 *out_map
= (GLubyte
*) srb
->Buffer
+ y
* rowStride
+ x
* bpp
;
122 *out_stride
= rowStride
;
126 intel_prepare_render(brw
);
128 /* The MapRenderbuffer API should always return a single-sampled mapping.
129 * The case we are asked to map multisampled RBs is in glReadPixels() (or
130 * swrast paths like glCopyTexImage()) from a window-system MSAA buffer,
131 * and GL expects an automatic resolve to happen.
133 * If it's a color miptree, there is a ->singlesample_mt which wraps the
134 * actual window system renderbuffer (which we may resolve to at any time),
135 * while the miptree itself is our driver-private allocation. If it's a
136 * depth or stencil miptree, we have a private MSAA buffer and no shared
137 * singlesample buffer, and since we don't expect anybody to ever actually
138 * resolve it, we just make a temporary singlesample buffer now when we
141 if (rb
->NumSamples
> 1) {
142 if (!irb
->singlesample_mt
) {
143 irb
->singlesample_mt
=
144 intel_miptree_create_for_renderbuffer(brw
, irb
->mt
->format
,
145 rb
->Width
, rb
->Height
,
147 if (!irb
->singlesample_mt
)
149 irb
->singlesample_mt_is_tmp
= true;
150 irb
->need_downsample
= true;
153 intel_renderbuffer_downsample(brw
, irb
);
154 mt
= irb
->singlesample_mt
;
156 irb
->need_map_upsample
= mode
& GL_MAP_WRITE_BIT
;
161 /* For a window-system renderbuffer, we need to flip the mapping we receive
162 * upside-down. So we need to ask for a rectangle on flipped vertically, and
163 * we then return a pointer to the bottom of it with a negative stride.
166 y
= rb
->Height
- y
- h
;
169 intel_miptree_map(brw
, mt
, irb
->mt_level
, irb
->mt_layer
,
170 x
, y
, w
, h
, mode
, &map
, &stride
);
173 map
+= (h
- 1) * stride
;
177 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%"PRIdPTR
"\n",
178 __func__
, rb
->Name
, _mesa_get_format_name(rb
->Format
),
179 x
, y
, w
, h
, map
, stride
);
182 *out_stride
= stride
;
191 * \see dd_function_table::UnmapRenderbuffer
194 intel_unmap_renderbuffer(struct gl_context
*ctx
,
195 struct gl_renderbuffer
*rb
)
197 struct brw_context
*brw
= brw_context(ctx
);
198 struct swrast_renderbuffer
*srb
= (struct swrast_renderbuffer
*)rb
;
199 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
200 struct intel_mipmap_tree
*mt
;
202 DBG("%s: rb %d (%s)\n", __func__
,
203 rb
->Name
, _mesa_get_format_name(rb
->Format
));
206 /* this is a malloc'd renderbuffer (accum buffer) */
211 if (rb
->NumSamples
> 1) {
212 mt
= irb
->singlesample_mt
;
217 intel_miptree_unmap(brw
, mt
, irb
->mt_level
, irb
->mt_layer
);
219 if (irb
->need_map_upsample
) {
220 intel_renderbuffer_upsample(brw
, irb
);
221 irb
->need_map_upsample
= false;
224 if (irb
->singlesample_mt_is_tmp
)
225 intel_miptree_release(&irb
->singlesample_mt
);
230 * Round up the requested multisample count to the next supported sample size.
233 intel_quantize_num_samples(struct intel_screen
*intel
, unsigned num_samples
)
235 const int *msaa_modes
= intel_supported_msaa_modes(intel
);
236 int quantized_samples
= 0;
238 for (int i
= 0; msaa_modes
[i
] != -1; ++i
) {
239 if (msaa_modes
[i
] >= num_samples
)
240 quantized_samples
= msaa_modes
[i
];
245 return quantized_samples
;
249 intel_renderbuffer_format(struct gl_context
* ctx
, GLenum internalFormat
)
251 struct brw_context
*brw
= brw_context(ctx
);
252 MAYBE_UNUSED
const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
254 switch (internalFormat
) {
256 /* Use the same format-choice logic as for textures.
257 * Renderbuffers aren't any different from textures for us,
258 * except they're less useful because you can't texture with
261 return ctx
->Driver
.ChooseTextureFormat(ctx
, GL_TEXTURE_2D
,
265 case GL_STENCIL_INDEX
:
266 case GL_STENCIL_INDEX1_EXT
:
267 case GL_STENCIL_INDEX4_EXT
:
268 case GL_STENCIL_INDEX8_EXT
:
269 case GL_STENCIL_INDEX16_EXT
:
270 /* These aren't actual texture formats, so force them here. */
271 if (brw
->has_separate_stencil
) {
272 return MESA_FORMAT_S_UINT8
;
274 assert(!devinfo
->must_use_separate_stencil
);
275 return MESA_FORMAT_Z24_UNORM_S8_UINT
;
281 intel_alloc_private_renderbuffer_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
282 GLenum internalFormat
,
283 GLuint width
, GLuint height
)
285 struct brw_context
*brw
= brw_context(ctx
);
286 struct intel_screen
*screen
= brw
->screen
;
287 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
289 assert(rb
->Format
!= MESA_FORMAT_NONE
);
291 rb
->NumSamples
= intel_quantize_num_samples(screen
, rb
->NumSamples
);
294 rb
->_BaseFormat
= _mesa_get_format_base_format(rb
->Format
);
296 intel_miptree_release(&irb
->mt
);
298 DBG("%s: %s: %s (%dx%d)\n", __func__
,
299 _mesa_enum_to_string(internalFormat
),
300 _mesa_get_format_name(rb
->Format
), width
, height
);
302 if (width
== 0 || height
== 0)
305 irb
->mt
= intel_miptree_create_for_renderbuffer(brw
, rb
->Format
,
307 MAX2(rb
->NumSamples
, 1));
311 irb
->layer_count
= 1;
317 * Called via glRenderbufferStorageEXT() to set the format and allocate
318 * storage for a user-created renderbuffer.
321 intel_alloc_renderbuffer_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
322 GLenum internalFormat
,
323 GLuint width
, GLuint height
)
325 rb
->Format
= intel_renderbuffer_format(ctx
, internalFormat
);
326 return intel_alloc_private_renderbuffer_storage(ctx
, rb
, internalFormat
, width
, height
);
330 intel_image_target_renderbuffer_storage(struct gl_context
*ctx
,
331 struct gl_renderbuffer
*rb
,
334 struct brw_context
*brw
= brw_context(ctx
);
335 struct intel_renderbuffer
*irb
;
336 __DRIscreen
*dri_screen
= brw
->screen
->driScrnPriv
;
339 image
= dri_screen
->dri2
.image
->lookupEGLImage(dri_screen
, image_handle
,
340 dri_screen
->loaderPrivate
);
344 if (image
->planar_format
&& image
->planar_format
->nplanes
> 1) {
345 _mesa_error(ctx
, GL_INVALID_OPERATION
,
346 "glEGLImageTargetRenderbufferStorage(planar buffers are not "
347 "supported as render targets.)");
351 /* __DRIimage is opaque to the core so it has to be checked here */
352 if (!brw
->mesa_format_supports_render
[image
->format
]) {
353 _mesa_error(ctx
, GL_INVALID_OPERATION
,
354 "glEGLImageTargetRenderbufferStorage(unsupported image format)");
358 irb
= intel_renderbuffer(rb
);
359 intel_miptree_release(&irb
->mt
);
361 /* Disable creation of the miptree's aux buffers because the driver exposes
362 * no EGL API to manage them. That is, there is no API for resolving the aux
363 * buffer's content to the main buffer nor for invalidating the aux buffer's
366 irb
->mt
= intel_miptree_create_for_dri_image(brw
, image
, GL_TEXTURE_2D
,
367 image
->format
, false);
371 rb
->InternalFormat
= image
->internal_format
;
372 rb
->Width
= image
->width
;
373 rb
->Height
= image
->height
;
374 rb
->Format
= image
->format
;
375 rb
->_BaseFormat
= _mesa_get_format_base_format(image
->format
);
376 rb
->NeedsFinishRenderTexture
= true;
377 irb
->layer_count
= 1;
381 * Called by _mesa_resize_framebuffer() for each hardware renderbuffer when a
382 * window system framebuffer is resized.
384 * Any actual buffer reallocations for hardware renderbuffers (which would
385 * have triggered _mesa_resize_framebuffer()) were done by
386 * intel_process_dri2_buffer().
389 intel_alloc_window_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
390 GLenum internalFormat
, GLuint width
, GLuint height
)
393 assert(rb
->Name
== 0);
396 rb
->InternalFormat
= internalFormat
;
401 /** Dummy function for gl_renderbuffer::AllocStorage() */
403 intel_nop_alloc_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
404 GLenum internalFormat
, GLuint width
, GLuint height
)
407 (void) internalFormat
;
410 _mesa_problem(ctx
, "intel_nop_alloc_storage should never be called.");
415 * Create an intel_renderbuffer for a __DRIdrawable. This function is
416 * unrelated to GL renderbuffers (that is, those created by
417 * glGenRenderbuffers).
419 * \param num_samples must be quantized.
421 struct intel_renderbuffer
*
422 intel_create_winsys_renderbuffer(struct intel_screen
*screen
,
423 mesa_format format
, unsigned num_samples
)
425 struct intel_renderbuffer
*irb
= CALLOC_STRUCT(intel_renderbuffer
);
429 struct gl_renderbuffer
*rb
= &irb
->Base
.Base
;
430 irb
->layer_count
= 1;
432 _mesa_init_renderbuffer(rb
, 0);
433 rb
->ClassID
= INTEL_RB_CLASS
;
434 rb
->NumSamples
= num_samples
;
436 /* The base format and internal format must be derived from the user-visible
437 * format (that is, the gl_config's format), even if we internally use
438 * choose a different format for the renderbuffer. Otherwise, rendering may
439 * use incorrect channel write masks.
441 rb
->_BaseFormat
= _mesa_get_format_base_format(format
);
442 rb
->InternalFormat
= rb
->_BaseFormat
;
445 if (!screen
->mesa_format_supports_render
[rb
->Format
]) {
446 /* The glRenderbufferStorage paths in core Mesa detect if the driver
447 * does not support the user-requested format, and then searches for
448 * a falback format. The DRI code bypasses core Mesa, though. So we do
449 * the fallbacks here.
451 * We must support MESA_FORMAT_R8G8B8X8 on Android because the Android
452 * framework requires HAL_PIXEL_FORMAT_RGBX8888 winsys surfaces.
454 rb
->Format
= _mesa_format_fallback_rgbx_to_rgba(rb
->Format
);
455 assert(screen
->mesa_format_supports_render
[rb
->Format
]);
458 /* intel-specific methods */
459 rb
->Delete
= intel_delete_renderbuffer
;
460 rb
->AllocStorage
= intel_alloc_window_storage
;
466 * Private window-system buffers (as opposed to ones shared with the display
467 * server created with intel_create_winsys_renderbuffer()) are most similar in their
468 * handling to user-created renderbuffers, but they have a resize handler that
469 * may be called at intel_update_renderbuffers() time.
471 * \param num_samples must be quantized.
473 struct intel_renderbuffer
*
474 intel_create_private_renderbuffer(struct intel_screen
*screen
,
475 mesa_format format
, unsigned num_samples
)
477 struct intel_renderbuffer
*irb
;
479 irb
= intel_create_winsys_renderbuffer(screen
, format
, num_samples
);
480 irb
->Base
.Base
.AllocStorage
= intel_alloc_private_renderbuffer_storage
;
486 * Create a new renderbuffer object.
487 * Typically called via glBindRenderbufferEXT().
489 static struct gl_renderbuffer
*
490 intel_new_renderbuffer(struct gl_context
* ctx
, GLuint name
)
492 struct intel_renderbuffer
*irb
;
493 struct gl_renderbuffer
*rb
;
495 irb
= CALLOC_STRUCT(intel_renderbuffer
);
497 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "creating renderbuffer");
501 rb
= &irb
->Base
.Base
;
503 _mesa_init_renderbuffer(rb
, name
);
504 rb
->ClassID
= INTEL_RB_CLASS
;
506 /* intel-specific methods */
507 rb
->Delete
= intel_delete_renderbuffer
;
508 rb
->AllocStorage
= intel_alloc_renderbuffer_storage
;
509 /* span routines set in alloc_storage function */
515 intel_renderbuffer_update_wrapper(struct brw_context
*brw
,
516 struct intel_renderbuffer
*irb
,
517 struct gl_texture_image
*image
,
521 struct gl_renderbuffer
*rb
= &irb
->Base
.Base
;
522 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
523 struct intel_mipmap_tree
*mt
= intel_image
->mt
;
524 int level
= image
->Level
;
526 rb
->AllocStorage
= intel_nop_alloc_storage
;
528 /* adjust for texture view parameters */
529 layer
+= image
->TexObject
->MinLayer
;
530 level
+= image
->TexObject
->MinLevel
;
532 intel_miptree_check_level_layer(mt
, level
, layer
);
533 irb
->mt_level
= level
;
534 irb
->mt_layer
= layer
;
537 irb
->layer_count
= 1;
538 } else if (mt
->target
!= GL_TEXTURE_3D
&& image
->TexObject
->NumLayers
> 0) {
539 irb
->layer_count
= image
->TexObject
->NumLayers
;
541 irb
->layer_count
= mt
->surf
.dim
== ISL_SURF_DIM_3D
?
542 minify(mt
->surf
.logical_level0_px
.depth
, level
) :
543 mt
->surf
.logical_level0_px
.array_len
;
546 intel_miptree_reference(&irb
->mt
, mt
);
548 intel_renderbuffer_set_draw_offset(irb
);
554 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer
*irb
)
556 unsigned int dst_x
, dst_y
;
558 /* compute offset of the particular 2D image within the texture region */
559 intel_miptree_get_image_offset(irb
->mt
,
569 * Called by glFramebufferTexture[123]DEXT() (and other places) to
570 * prepare for rendering into texture memory. This might be called
571 * many times to choose different texture levels, cube faces, etc
572 * before intel_finish_render_texture() is ever called.
575 intel_render_texture(struct gl_context
* ctx
,
576 struct gl_framebuffer
*fb
,
577 struct gl_renderbuffer_attachment
*att
)
579 struct brw_context
*brw
= brw_context(ctx
);
580 struct gl_renderbuffer
*rb
= att
->Renderbuffer
;
581 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
582 struct gl_texture_image
*image
= rb
->TexImage
;
583 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
584 struct intel_mipmap_tree
*mt
= intel_image
->mt
;
589 if (att
->CubeMapFace
> 0) {
590 assert(att
->Zoffset
== 0);
591 layer
= att
->CubeMapFace
;
593 layer
= att
->Zoffset
;
596 if (!intel_image
->mt
) {
597 /* Fallback on drawing to a texture that doesn't have a miptree
598 * (has a border, width/height 0, etc.)
600 _swrast_render_texture(ctx
, fb
, att
);
604 intel_miptree_check_level_layer(mt
, att
->TextureLevel
, layer
);
606 if (!intel_renderbuffer_update_wrapper(brw
, irb
, image
, layer
, att
->Layered
)) {
607 _swrast_render_texture(ctx
, fb
, att
);
611 DBG("Begin render %s texture tex=%u w=%d h=%d d=%d refcount=%d\n",
612 _mesa_get_format_name(image
->TexFormat
),
613 att
->Texture
->Name
, image
->Width
, image
->Height
, image
->Depth
,
618 #define fbo_incomplete(fb, ...) do { \
619 static GLuint msg_id = 0; \
620 if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \
621 _mesa_gl_debug(ctx, &msg_id, \
622 MESA_DEBUG_SOURCE_API, \
623 MESA_DEBUG_TYPE_OTHER, \
624 MESA_DEBUG_SEVERITY_MEDIUM, \
628 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; \
632 * Do additional "completeness" testing of a framebuffer object.
635 intel_validate_framebuffer(struct gl_context
*ctx
, struct gl_framebuffer
*fb
)
637 struct brw_context
*brw
= brw_context(ctx
);
638 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
639 struct intel_renderbuffer
*depthRb
=
640 intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
641 struct intel_renderbuffer
*stencilRb
=
642 intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
643 struct intel_mipmap_tree
*depth_mt
= NULL
, *stencil_mt
= NULL
;
646 DBG("%s() on fb %p (%s)\n", __func__
,
647 fb
, (fb
== ctx
->DrawBuffer
? "drawbuffer" :
648 (fb
== ctx
->ReadBuffer
? "readbuffer" : "other buffer")));
651 depth_mt
= depthRb
->mt
;
653 stencil_mt
= stencilRb
->mt
;
654 if (stencil_mt
->stencil_mt
)
655 stencil_mt
= stencil_mt
->stencil_mt
;
658 if (depth_mt
&& stencil_mt
) {
659 if (devinfo
->gen
>= 6) {
660 const unsigned d_width
= depth_mt
->surf
.phys_level0_sa
.width
;
661 const unsigned d_height
= depth_mt
->surf
.phys_level0_sa
.height
;
662 const unsigned d_depth
= depth_mt
->surf
.dim
== ISL_SURF_DIM_3D
?
663 depth_mt
->surf
.phys_level0_sa
.depth
:
664 depth_mt
->surf
.phys_level0_sa
.array_len
;
666 const unsigned s_width
= stencil_mt
->surf
.phys_level0_sa
.width
;
667 const unsigned s_height
= stencil_mt
->surf
.phys_level0_sa
.height
;
668 const unsigned s_depth
= stencil_mt
->surf
.dim
== ISL_SURF_DIM_3D
?
669 stencil_mt
->surf
.phys_level0_sa
.depth
:
670 stencil_mt
->surf
.phys_level0_sa
.array_len
;
672 /* For gen >= 6, we are using the lod/minimum-array-element fields
673 * and supporting layered rendering. This means that we must restrict
674 * the depth & stencil attachments to match in various more retrictive
675 * ways. (width, height, depth, LOD and layer)
677 if (d_width
!= s_width
||
678 d_height
!= s_height
||
679 d_depth
!= s_depth
||
680 depthRb
->mt_level
!= stencilRb
->mt_level
||
681 depthRb
->mt_layer
!= stencilRb
->mt_layer
) {
683 "FBO incomplete: depth and stencil must match in"
684 "width, height, depth, LOD and layer\n");
687 if (depth_mt
== stencil_mt
) {
688 /* For true packed depth/stencil (not faked on prefers-separate-stencil
689 * hardware) we need to be sure they're the same level/layer, since
690 * we'll be emitting a single packet describing the packed setup.
692 if (depthRb
->mt_level
!= stencilRb
->mt_level
||
693 depthRb
->mt_layer
!= stencilRb
->mt_layer
) {
695 "FBO incomplete: depth image level/layer %d/%d != "
696 "stencil image %d/%d\n",
700 stencilRb
->mt_layer
);
703 if (!brw
->has_separate_stencil
) {
704 fbo_incomplete(fb
, "FBO incomplete: separate stencil "
707 if (stencil_mt
->format
!= MESA_FORMAT_S_UINT8
) {
708 fbo_incomplete(fb
, "FBO incomplete: separate stencil is %s "
710 _mesa_get_format_name(stencil_mt
->format
));
712 if (devinfo
->gen
< 7 && !intel_renderbuffer_has_hiz(depthRb
)) {
713 /* Before Gen7, separate depth and stencil buffers can be used
714 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
715 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
716 * [DevSNB]: This field must be set to the same value (enabled
717 * or disabled) as Hierarchical Depth Buffer Enable.
719 fbo_incomplete(fb
, "FBO incomplete: separate stencil "
725 for (i
= 0; i
< ARRAY_SIZE(fb
->Attachment
); i
++) {
726 struct gl_renderbuffer
*rb
;
727 struct intel_renderbuffer
*irb
;
729 if (fb
->Attachment
[i
].Type
== GL_NONE
)
732 /* A supported attachment will have a Renderbuffer set either
733 * from being a Renderbuffer or being a texture that got the
734 * intel_wrap_texture() treatment.
736 rb
= fb
->Attachment
[i
].Renderbuffer
;
738 fbo_incomplete(fb
, "FBO incomplete: attachment without "
743 if (fb
->Attachment
[i
].Type
== GL_TEXTURE
) {
744 if (rb
->TexImage
->Border
) {
745 fbo_incomplete(fb
, "FBO incomplete: texture with border\n");
750 irb
= intel_renderbuffer(rb
);
752 fbo_incomplete(fb
, "FBO incomplete: software rendering "
757 if (!brw_render_target_supported(brw
, rb
)) {
758 fbo_incomplete(fb
, "FBO incomplete: Unsupported HW "
759 "texture/renderbuffer format attached: %s\n",
760 _mesa_get_format_name(intel_rb_format(irb
)));
766 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
767 * We can do this when the dst renderbuffer is actually a texture and
768 * there is no scaling, mirroring or scissoring.
770 * \return new buffer mask indicating the buffers left to blit using the
774 intel_blit_framebuffer_with_blitter(struct gl_context
*ctx
,
775 const struct gl_framebuffer
*readFb
,
776 const struct gl_framebuffer
*drawFb
,
777 GLint srcX0
, GLint srcY0
,
778 GLint srcX1
, GLint srcY1
,
779 GLint dstX0
, GLint dstY0
,
780 GLint dstX1
, GLint dstY1
,
783 struct brw_context
*brw
= brw_context(ctx
);
785 /* Sync up the state of window system buffers. We need to do this before
786 * we go looking for the buffers.
788 intel_prepare_render(brw
);
790 if (mask
& GL_COLOR_BUFFER_BIT
) {
792 struct gl_renderbuffer
*src_rb
= readFb
->_ColorReadBuffer
;
793 struct intel_renderbuffer
*src_irb
= intel_renderbuffer(src_rb
);
796 perf_debug("glBlitFramebuffer(): missing src renderbuffer. "
797 "Falling back to software rendering.\n");
801 /* If the source and destination are the same size with no mirroring,
802 * the rectangles are within the size of the texture and there is no
803 * scissor, then we can probably use the blit engine.
805 if (!(srcX0
- srcX1
== dstX0
- dstX1
&&
806 srcY0
- srcY1
== dstY0
- dstY1
&&
809 srcX0
>= 0 && srcX1
<= readFb
->Width
&&
810 srcY0
>= 0 && srcY1
<= readFb
->Height
&&
811 dstX0
>= 0 && dstX1
<= drawFb
->Width
&&
812 dstY0
>= 0 && dstY1
<= drawFb
->Height
&&
813 !(ctx
->Scissor
.EnableFlags
))) {
814 perf_debug("glBlitFramebuffer(): non-1:1 blit. "
815 "Falling back to software rendering.\n");
819 /* Blit to all active draw buffers. We don't do any pre-checking,
820 * because we assume that copying to MRTs is rare, and failure midway
821 * through copying is even more rare. Even if it was to occur, it's
822 * safe to let meta start the copy over from scratch, because
823 * glBlitFramebuffer completely overwrites the destination pixels, and
824 * results are undefined if any destination pixels have a dependency on
827 for (i
= 0; i
< drawFb
->_NumColorDrawBuffers
; i
++) {
828 struct gl_renderbuffer
*dst_rb
= drawFb
->_ColorDrawBuffers
[i
];
829 struct intel_renderbuffer
*dst_irb
= intel_renderbuffer(dst_rb
);
832 perf_debug("glBlitFramebuffer(): missing dst renderbuffer. "
833 "Falling back to software rendering.\n");
837 if (ctx
->Color
.sRGBEnabled
&&
838 _mesa_get_format_color_encoding(src_irb
->mt
->format
) !=
839 _mesa_get_format_color_encoding(dst_irb
->mt
->format
)) {
840 perf_debug("glBlitFramebuffer() with sRGB conversion cannot be "
841 "handled by BLT path.\n");
845 if (!intel_miptree_blit(brw
,
847 src_irb
->mt_level
, src_irb
->mt_layer
,
848 srcX0
, srcY0
, src_rb
->Name
== 0,
850 dst_irb
->mt_level
, dst_irb
->mt_layer
,
851 dstX0
, dstY0
, dst_rb
->Name
== 0,
852 dstX1
- dstX0
, dstY1
- dstY0
,
853 COLOR_LOGICOP_COPY
)) {
854 perf_debug("glBlitFramebuffer(): unknown blit failure. "
855 "Falling back to software rendering.\n");
860 mask
&= ~GL_COLOR_BUFFER_BIT
;
867 intel_blit_framebuffer(struct gl_context
*ctx
,
868 struct gl_framebuffer
*readFb
,
869 struct gl_framebuffer
*drawFb
,
870 GLint srcX0
, GLint srcY0
, GLint srcX1
, GLint srcY1
,
871 GLint dstX0
, GLint dstY0
, GLint dstX1
, GLint dstY1
,
872 GLbitfield mask
, GLenum filter
)
874 struct brw_context
*brw
= brw_context(ctx
);
875 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
877 /* Page 679 of OpenGL 4.4 spec says:
878 * "Added BlitFramebuffer to commands affected by conditional rendering in
879 * section 10.10 (Bug 9562)."
881 if (!_mesa_check_conditional_render(ctx
))
884 if (devinfo
->gen
< 6) {
885 /* On gen4-5, try BLT first.
887 * Gen4-5 have a single ring for both 3D and BLT operations, so there's
888 * no inter-ring synchronization issues like on Gen6+. It is apparently
889 * faster than using the 3D pipeline. Original Gen4 also has to rebase
890 * and copy miptree slices in order to render to unaligned locations.
892 mask
= intel_blit_framebuffer_with_blitter(ctx
, readFb
, drawFb
,
893 srcX0
, srcY0
, srcX1
, srcY1
,
894 dstX0
, dstY0
, dstX1
, dstY1
,
900 mask
= brw_blorp_framebuffer(brw
, readFb
, drawFb
,
901 srcX0
, srcY0
, srcX1
, srcY1
,
902 dstX0
, dstY0
, dstX1
, dstY1
,
907 mask
= _mesa_meta_BlitFramebuffer(ctx
, readFb
, drawFb
,
908 srcX0
, srcY0
, srcX1
, srcY1
,
909 dstX0
, dstY0
, dstX1
, dstY1
,
914 if (devinfo
->gen
>= 8 && (mask
& GL_STENCIL_BUFFER_BIT
)) {
915 assert(!"Invalid blit");
918 /* Try using the BLT engine. */
919 mask
= intel_blit_framebuffer_with_blitter(ctx
, readFb
, drawFb
,
920 srcX0
, srcY0
, srcX1
, srcY1
,
921 dstX0
, dstY0
, dstX1
, dstY1
,
926 _swrast_BlitFramebuffer(ctx
, readFb
, drawFb
,
927 srcX0
, srcY0
, srcX1
, srcY1
,
928 dstX0
, dstY0
, dstX1
, dstY1
,
933 * Does the renderbuffer have hiz enabled?
936 intel_renderbuffer_has_hiz(struct intel_renderbuffer
*irb
)
938 return intel_miptree_level_has_hiz(irb
->mt
, irb
->mt_level
);
942 intel_renderbuffer_move_to_temp(struct brw_context
*brw
,
943 struct intel_renderbuffer
*irb
,
946 struct gl_renderbuffer
*rb
=&irb
->Base
.Base
;
947 struct intel_texture_image
*intel_image
= intel_texture_image(rb
->TexImage
);
948 struct intel_mipmap_tree
*new_mt
;
949 int width
, height
, depth
;
951 intel_get_image_dims(rb
->TexImage
, &width
, &height
, &depth
);
953 assert(irb
->align_wa_mt
== NULL
);
954 new_mt
= intel_miptree_create(brw
, GL_TEXTURE_2D
,
955 intel_image
->base
.Base
.TexFormat
,
958 irb
->mt
->surf
.samples
,
959 MIPTREE_CREATE_BUSY
);
962 intel_miptree_copy_slice(brw
, intel_image
->mt
,
963 intel_image
->base
.Base
.Level
, irb
->mt_layer
,
966 intel_miptree_reference(&irb
->align_wa_mt
, new_mt
);
967 intel_miptree_release(&new_mt
);
974 brw_cache_sets_clear(struct brw_context
*brw
)
976 struct hash_entry
*render_entry
;
977 hash_table_foreach(brw
->render_cache
, render_entry
)
978 _mesa_hash_table_remove(brw
->render_cache
, render_entry
);
980 struct set_entry
*depth_entry
;
981 set_foreach(brw
->depth_cache
, depth_entry
)
982 _mesa_set_remove(brw
->depth_cache
, depth_entry
);
986 * Emits an appropriate flush for a BO if it has been rendered to within the
987 * same batchbuffer as a read that's about to be emitted.
989 * The GPU has separate, incoherent caches for the render cache and the
990 * sampler cache, along with other caches. Usually data in the different
991 * caches don't interact (e.g. we don't render to our driver-generated
992 * immediate constant data), but for render-to-texture in FBOs we definitely
993 * do. When a batchbuffer is flushed, the kernel will ensure that everything
994 * necessary is flushed before another use of that BO, but for reuse from
995 * different caches within a batchbuffer, it's all our responsibility.
998 flush_depth_and_render_caches(struct brw_context
*brw
, struct brw_bo
*bo
)
1000 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1002 if (devinfo
->gen
>= 6) {
1003 brw_emit_pipe_control_flush(brw
,
1004 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
1005 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
1006 PIPE_CONTROL_CS_STALL
);
1008 brw_emit_pipe_control_flush(brw
,
1009 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
1010 PIPE_CONTROL_CONST_CACHE_INVALIDATE
);
1012 brw_emit_mi_flush(brw
);
1015 brw_cache_sets_clear(brw
);
1019 brw_cache_flush_for_read(struct brw_context
*brw
, struct brw_bo
*bo
)
1021 if (_mesa_hash_table_search(brw
->render_cache
, bo
) ||
1022 _mesa_set_search(brw
->depth_cache
, bo
))
1023 flush_depth_and_render_caches(brw
, bo
);
1027 format_aux_tuple(enum isl_format format
, enum isl_aux_usage aux_usage
)
1029 return (void *)(uintptr_t)((uint32_t)format
<< 8 | aux_usage
);
1033 brw_cache_flush_for_render(struct brw_context
*brw
, struct brw_bo
*bo
,
1034 enum isl_format format
,
1035 enum isl_aux_usage aux_usage
)
1037 if (_mesa_set_search(brw
->depth_cache
, bo
))
1038 flush_depth_and_render_caches(brw
, bo
);
1040 /* Check to see if this bo has been used by a previous rendering operation
1041 * but with a different format or aux usage. If it has, flush the render
1042 * cache so we ensure that it's only in there with one format or aux usage
1045 * Even though it's not obvious, this can easily happen in practice.
1046 * Suppose a client is blending on a surface with sRGB encode enabled on
1047 * gen9. This implies that you get AUX_USAGE_CCS_D at best. If the client
1048 * then disables sRGB decode and continues blending we will flip on
1049 * AUX_USAGE_CCS_E without doing any sort of resolve in-between (this is
1050 * perfectly valid since CCS_E is a subset of CCS_D). However, this means
1051 * that we have fragments in-flight which are rendering with UNORM+CCS_E
1052 * and other fragments in-flight with SRGB+CCS_D on the same surface at the
1053 * same time and the pixel scoreboard and color blender are trying to sort
1054 * it all out. This ends badly (i.e. GPU hangs).
1056 * To date, we have never observed GPU hangs or even corruption to be
1057 * associated with switching the format, only the aux usage. However,
1058 * there are comments in various docs which indicate that the render cache
1059 * isn't 100% resilient to format changes. We may as well be conservative
1060 * and flush on format changes too. We can always relax this later if we
1061 * find it to be a performance problem.
1063 struct hash_entry
*entry
= _mesa_hash_table_search(brw
->render_cache
, bo
);
1064 if (entry
&& entry
->data
!= format_aux_tuple(format
, aux_usage
))
1065 flush_depth_and_render_caches(brw
, bo
);
1069 brw_render_cache_add_bo(struct brw_context
*brw
, struct brw_bo
*bo
,
1070 enum isl_format format
,
1071 enum isl_aux_usage aux_usage
)
1074 struct hash_entry
*entry
= _mesa_hash_table_search(brw
->render_cache
, bo
);
1076 /* Otherwise, someone didn't do a flush_for_render and that would be
1079 assert(entry
->data
== format_aux_tuple(format
, aux_usage
));
1083 _mesa_hash_table_insert(brw
->render_cache
, bo
,
1084 format_aux_tuple(format
, aux_usage
));
1088 brw_cache_flush_for_depth(struct brw_context
*brw
, struct brw_bo
*bo
)
1090 if (_mesa_hash_table_search(brw
->render_cache
, bo
))
1091 flush_depth_and_render_caches(brw
, bo
);
1095 brw_depth_cache_add_bo(struct brw_context
*brw
, struct brw_bo
*bo
)
1097 _mesa_set_add(brw
->depth_cache
, bo
);
1101 * Do one-time context initializations related to GL_EXT_framebuffer_object.
1102 * Hook in device driver functions.
1105 intel_fbo_init(struct brw_context
*brw
)
1107 struct dd_function_table
*dd
= &brw
->ctx
.Driver
;
1108 dd
->NewRenderbuffer
= intel_new_renderbuffer
;
1109 dd
->MapRenderbuffer
= intel_map_renderbuffer
;
1110 dd
->UnmapRenderbuffer
= intel_unmap_renderbuffer
;
1111 dd
->RenderTexture
= intel_render_texture
;
1112 dd
->ValidateFramebuffer
= intel_validate_framebuffer
;
1113 dd
->BlitFramebuffer
= intel_blit_framebuffer
;
1114 dd
->EGLImageTargetRenderbufferStorage
=
1115 intel_image_target_renderbuffer_storage
;
1117 brw
->render_cache
= _mesa_hash_table_create(brw
, _mesa_hash_pointer
,
1118 _mesa_key_pointer_equal
);
1119 brw
->depth_cache
= _mesa_set_create(brw
, _mesa_hash_pointer
,
1120 _mesa_key_pointer_equal
);