1 /**************************************************************************
3 * Copyright 2006 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "main/enums.h"
30 #include "main/imports.h"
31 #include "main/macros.h"
32 #include "main/mtypes.h"
33 #include "main/fbobject.h"
34 #include "main/framebuffer.h"
35 #include "main/renderbuffer.h"
36 #include "main/context.h"
37 #include "main/teximage.h"
38 #include "main/image.h"
39 #include "main/hash_table.h"
41 #include "main/condrender.h"
43 #include "swrast/swrast.h"
44 #include "drivers/common/meta.h"
46 #include "intel_batchbuffer.h"
47 #include "intel_buffers.h"
48 #include "intel_blit.h"
49 #include "intel_fbo.h"
50 #include "intel_mipmap_tree.h"
51 #include "intel_regions.h"
52 #include "intel_screen.h"
53 #include "intel_tex.h"
54 #include "brw_context.h"
56 #define FILE_DEBUG_FLAG DEBUG_FBO
59 * Create a new framebuffer object.
61 static struct gl_framebuffer
*
62 intel_new_framebuffer(struct gl_context
* ctx
, GLuint name
)
64 /* Only drawable state in intel_framebuffer at this time, just use Mesa's
67 return _mesa_new_framebuffer(ctx
, name
);
71 /** Called by gl_renderbuffer::Delete() */
73 intel_delete_renderbuffer(struct gl_context
*ctx
, struct gl_renderbuffer
*rb
)
75 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
79 intel_miptree_release(&irb
->mt
);
80 intel_miptree_release(&irb
->singlesample_mt
);
82 _mesa_delete_renderbuffer(ctx
, rb
);
86 * \brief Downsample a winsys renderbuffer from mt to singlesample_mt.
88 * If the miptree needs no downsample, then skip.
91 intel_renderbuffer_downsample(struct brw_context
*brw
,
92 struct intel_renderbuffer
*irb
)
94 if (!irb
->need_downsample
)
96 intel_miptree_updownsample(brw
, irb
->mt
, irb
->singlesample_mt
);
97 irb
->need_downsample
= false;
101 * \brief Upsample a winsys renderbuffer from singlesample_mt to mt.
103 * The upsample is done unconditionally.
106 intel_renderbuffer_upsample(struct brw_context
*brw
,
107 struct intel_renderbuffer
*irb
)
109 assert(!irb
->need_downsample
);
111 intel_miptree_updownsample(brw
, irb
->singlesample_mt
, irb
->mt
);
115 * \see dd_function_table::MapRenderbuffer
118 intel_map_renderbuffer(struct gl_context
*ctx
,
119 struct gl_renderbuffer
*rb
,
120 GLuint x
, GLuint y
, GLuint w
, GLuint h
,
125 struct brw_context
*brw
= brw_context(ctx
);
126 struct swrast_renderbuffer
*srb
= (struct swrast_renderbuffer
*)rb
;
127 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
128 struct intel_mipmap_tree
*mt
;
133 /* this is a malloc'd renderbuffer (accum buffer), not an irb */
134 GLint bpp
= _mesa_get_format_bytes(rb
->Format
);
135 GLint rowStride
= srb
->RowStride
;
136 *out_map
= (GLubyte
*) srb
->Buffer
+ y
* rowStride
+ x
* bpp
;
137 *out_stride
= rowStride
;
141 intel_prepare_render(brw
);
143 /* The MapRenderbuffer API should always return a single-sampled mapping.
144 * The case we are asked to map multisampled RBs is in glReadPixels() (or
145 * swrast paths like glCopyTexImage()) from a window-system MSAA buffer,
146 * and GL expects an automatic resolve to happen.
148 * If it's a color miptree, there is a ->singlesample_mt which wraps the
149 * actual window system renderbuffer (which we may resolve to at any time),
150 * while the miptree itself is our driver-private allocation. If it's a
151 * depth or stencil miptree, we have a private MSAA buffer and no shared
152 * singlesample buffer, and since we don't expect anybody to ever actually
153 * resolve it, we just make a temporary singlesample buffer now when we
156 if (rb
->NumSamples
> 1) {
157 if (!irb
->singlesample_mt
) {
158 irb
->singlesample_mt
=
159 intel_miptree_create_for_renderbuffer(brw
, irb
->mt
->format
,
160 rb
->Width
, rb
->Height
,
162 if (!irb
->singlesample_mt
)
164 irb
->singlesample_mt_is_tmp
= true;
165 irb
->need_downsample
= true;
168 intel_renderbuffer_downsample(brw
, irb
);
169 mt
= irb
->singlesample_mt
;
171 irb
->need_map_upsample
= mode
& GL_MAP_WRITE_BIT
;
176 /* For a window-system renderbuffer, we need to flip the mapping we receive
177 * upside-down. So we need to ask for a rectangle on flipped vertically, and
178 * we then return a pointer to the bottom of it with a negative stride.
181 y
= rb
->Height
- y
- h
;
184 intel_miptree_map(brw
, mt
, irb
->mt_level
, irb
->mt_layer
,
185 x
, y
, w
, h
, mode
, &map
, &stride
);
188 map
+= (h
- 1) * stride
;
192 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
193 __FUNCTION__
, rb
->Name
, _mesa_get_format_name(rb
->Format
),
194 x
, y
, w
, h
, map
, stride
);
197 *out_stride
= stride
;
206 * \see dd_function_table::UnmapRenderbuffer
209 intel_unmap_renderbuffer(struct gl_context
*ctx
,
210 struct gl_renderbuffer
*rb
)
212 struct brw_context
*brw
= brw_context(ctx
);
213 struct swrast_renderbuffer
*srb
= (struct swrast_renderbuffer
*)rb
;
214 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
215 struct intel_mipmap_tree
*mt
;
217 DBG("%s: rb %d (%s)\n", __FUNCTION__
,
218 rb
->Name
, _mesa_get_format_name(rb
->Format
));
221 /* this is a malloc'd renderbuffer (accum buffer) */
226 if (rb
->NumSamples
> 1) {
227 mt
= irb
->singlesample_mt
;
232 intel_miptree_unmap(brw
, mt
, irb
->mt_level
, irb
->mt_layer
);
234 if (irb
->need_map_upsample
) {
235 intel_renderbuffer_upsample(brw
, irb
);
236 irb
->need_map_upsample
= false;
239 if (irb
->singlesample_mt_is_tmp
)
240 intel_miptree_release(&irb
->singlesample_mt
);
245 * Round up the requested multisample count to the next supported sample size.
248 intel_quantize_num_samples(struct intel_screen
*intel
, unsigned num_samples
)
250 const int *msaa_modes
= intel_supported_msaa_modes(intel
);
251 int quantized_samples
= 0;
253 for (int i
= 0; msaa_modes
[i
] != -1; ++i
) {
254 if (msaa_modes
[i
] >= num_samples
)
255 quantized_samples
= msaa_modes
[i
];
260 return quantized_samples
;
264 intel_renderbuffer_format(struct gl_context
* ctx
, GLenum internalFormat
)
266 struct brw_context
*brw
= brw_context(ctx
);
268 switch (internalFormat
) {
270 /* Use the same format-choice logic as for textures.
271 * Renderbuffers aren't any different from textures for us,
272 * except they're less useful because you can't texture with
275 return ctx
->Driver
.ChooseTextureFormat(ctx
, GL_TEXTURE_2D
,
279 case GL_STENCIL_INDEX
:
280 case GL_STENCIL_INDEX1_EXT
:
281 case GL_STENCIL_INDEX4_EXT
:
282 case GL_STENCIL_INDEX8_EXT
:
283 case GL_STENCIL_INDEX16_EXT
:
284 /* These aren't actual texture formats, so force them here. */
285 if (brw
->has_separate_stencil
) {
286 return MESA_FORMAT_S_UINT8
;
288 assert(!brw
->must_use_separate_stencil
);
289 return MESA_FORMAT_Z24_UNORM_S8_UINT
;
295 intel_alloc_private_renderbuffer_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
296 GLenum internalFormat
,
297 GLuint width
, GLuint height
)
299 struct brw_context
*brw
= brw_context(ctx
);
300 struct intel_screen
*screen
= brw
->intelScreen
;
301 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
303 assert(rb
->Format
!= MESA_FORMAT_NONE
);
305 rb
->NumSamples
= intel_quantize_num_samples(screen
, rb
->NumSamples
);
308 rb
->_BaseFormat
= _mesa_base_fbo_format(ctx
, internalFormat
);
310 intel_miptree_release(&irb
->mt
);
312 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__
,
313 _mesa_lookup_enum_by_nr(internalFormat
),
314 _mesa_get_format_name(rb
->Format
), width
, height
);
316 if (width
== 0 || height
== 0)
319 irb
->mt
= intel_miptree_create_for_renderbuffer(brw
, rb
->Format
,
325 irb
->layer_count
= 1;
331 * Called via glRenderbufferStorageEXT() to set the format and allocate
332 * storage for a user-created renderbuffer.
335 intel_alloc_renderbuffer_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
336 GLenum internalFormat
,
337 GLuint width
, GLuint height
)
339 rb
->Format
= intel_renderbuffer_format(ctx
, internalFormat
);
340 return intel_alloc_private_renderbuffer_storage(ctx
, rb
, internalFormat
, width
, height
);
344 intel_image_target_renderbuffer_storage(struct gl_context
*ctx
,
345 struct gl_renderbuffer
*rb
,
348 struct brw_context
*brw
= brw_context(ctx
);
349 struct intel_renderbuffer
*irb
;
353 screen
= brw
->intelScreen
->driScrnPriv
;
354 image
= screen
->dri2
.image
->lookupEGLImage(screen
, image_handle
,
355 screen
->loaderPrivate
);
359 if (image
->planar_format
&& image
->planar_format
->nplanes
> 1) {
360 _mesa_error(ctx
, GL_INVALID_OPERATION
,
361 "glEGLImageTargetRenderbufferStorage(planar buffers are not "
362 "supported as render targets.");
366 /* Buffers originating from outside are for read-only. */
367 if (image
->dma_buf_imported
) {
368 _mesa_error(ctx
, GL_INVALID_OPERATION
,
369 "glEGLImageTargetRenderbufferStorage(dma buffers are read-only)");
373 /* __DRIimage is opaque to the core so it has to be checked here */
374 switch (image
->format
) {
375 case MESA_FORMAT_R8G8B8A8_UNORM
:
376 _mesa_error(ctx
, GL_INVALID_OPERATION
,
377 "glEGLImageTargetRenderbufferStorage(unsupported image format");
384 irb
= intel_renderbuffer(rb
);
385 intel_miptree_release(&irb
->mt
);
386 irb
->mt
= intel_miptree_create_for_bo(brw
,
390 image
->region
->width
,
391 image
->region
->height
,
392 image
->region
->pitch
,
393 image
->region
->tiling
);
397 rb
->InternalFormat
= image
->internal_format
;
398 rb
->Width
= image
->region
->width
;
399 rb
->Height
= image
->region
->height
;
400 rb
->Format
= image
->format
;
401 rb
->_BaseFormat
= _mesa_base_fbo_format(ctx
, image
->internal_format
);
402 rb
->NeedsFinishRenderTexture
= true;
403 irb
->layer_count
= 1;
407 * Called by _mesa_resize_framebuffer() for each hardware renderbuffer when a
408 * window system framebuffer is resized.
410 * Any actual buffer reallocations for hardware renderbuffers (which would
411 * have triggered _mesa_resize_framebuffer()) were done by
412 * intel_process_dri2_buffer().
415 intel_alloc_window_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
416 GLenum internalFormat
, GLuint width
, GLuint height
)
418 ASSERT(rb
->Name
== 0);
421 rb
->InternalFormat
= internalFormat
;
426 /** Dummy function for gl_renderbuffer::AllocStorage() */
428 intel_nop_alloc_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
429 GLenum internalFormat
, GLuint width
, GLuint height
)
431 _mesa_problem(ctx
, "intel_op_alloc_storage should never be called.");
436 * Create a new intel_renderbuffer which corresponds to an on-screen window,
437 * not a user-created renderbuffer.
439 * \param num_samples must be quantized.
441 struct intel_renderbuffer
*
442 intel_create_renderbuffer(mesa_format format
, unsigned num_samples
)
444 struct intel_renderbuffer
*irb
;
445 struct gl_renderbuffer
*rb
;
447 GET_CURRENT_CONTEXT(ctx
);
449 irb
= CALLOC_STRUCT(intel_renderbuffer
);
451 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "creating renderbuffer");
455 rb
= &irb
->Base
.Base
;
456 irb
->layer_count
= 1;
458 _mesa_init_renderbuffer(rb
, 0);
459 rb
->ClassID
= INTEL_RB_CLASS
;
460 rb
->_BaseFormat
= _mesa_get_format_base_format(format
);
462 rb
->InternalFormat
= rb
->_BaseFormat
;
463 rb
->NumSamples
= num_samples
;
465 /* intel-specific methods */
466 rb
->Delete
= intel_delete_renderbuffer
;
467 rb
->AllocStorage
= intel_alloc_window_storage
;
473 * Private window-system buffers (as opposed to ones shared with the display
474 * server created with intel_create_renderbuffer()) are most similar in their
475 * handling to user-created renderbuffers, but they have a resize handler that
476 * may be called at intel_update_renderbuffers() time.
478 * \param num_samples must be quantized.
480 struct intel_renderbuffer
*
481 intel_create_private_renderbuffer(mesa_format format
, unsigned num_samples
)
483 struct intel_renderbuffer
*irb
;
485 irb
= intel_create_renderbuffer(format
, num_samples
);
486 irb
->Base
.Base
.AllocStorage
= intel_alloc_private_renderbuffer_storage
;
492 * Create a new renderbuffer object.
493 * Typically called via glBindRenderbufferEXT().
495 static struct gl_renderbuffer
*
496 intel_new_renderbuffer(struct gl_context
* ctx
, GLuint name
)
498 struct intel_renderbuffer
*irb
;
499 struct gl_renderbuffer
*rb
;
501 irb
= CALLOC_STRUCT(intel_renderbuffer
);
503 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "creating renderbuffer");
507 rb
= &irb
->Base
.Base
;
509 _mesa_init_renderbuffer(rb
, name
);
510 rb
->ClassID
= INTEL_RB_CLASS
;
512 /* intel-specific methods */
513 rb
->Delete
= intel_delete_renderbuffer
;
514 rb
->AllocStorage
= intel_alloc_renderbuffer_storage
;
515 /* span routines set in alloc_storage function */
521 intel_renderbuffer_update_wrapper(struct brw_context
*brw
,
522 struct intel_renderbuffer
*irb
,
523 struct gl_texture_image
*image
,
527 struct gl_renderbuffer
*rb
= &irb
->Base
.Base
;
528 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
529 struct intel_mipmap_tree
*mt
= intel_image
->mt
;
530 int level
= image
->Level
;
532 rb
->AllocStorage
= intel_nop_alloc_storage
;
534 /* adjust for texture view parameters */
535 layer
+= image
->TexObject
->MinLayer
;
536 level
+= image
->TexObject
->MinLevel
;
538 intel_miptree_check_level_layer(mt
, level
, layer
);
539 irb
->mt_level
= level
;
541 int layer_multiplier
;
542 switch (mt
->msaa_layout
) {
543 case INTEL_MSAA_LAYOUT_UMS
:
544 case INTEL_MSAA_LAYOUT_CMS
:
545 layer_multiplier
= mt
->num_samples
;
549 layer_multiplier
= 1;
552 irb
->mt_layer
= layer_multiplier
* layer
;
555 irb
->layer_count
= image
->TexObject
->NumLayers
?: mt
->level
[level
].depth
/ layer_multiplier
;
557 irb
->layer_count
= 1;
560 intel_miptree_reference(&irb
->mt
, mt
);
562 intel_renderbuffer_set_draw_offset(irb
);
564 if (mt
->hiz_mt
== NULL
&& brw_is_hiz_depth_format(brw
, rb
->Format
)) {
565 intel_miptree_alloc_hiz(brw
, mt
);
574 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer
*irb
)
576 unsigned int dst_x
, dst_y
;
578 /* compute offset of the particular 2D image within the texture region */
579 intel_miptree_get_image_offset(irb
->mt
,
589 * Called by glFramebufferTexture[123]DEXT() (and other places) to
590 * prepare for rendering into texture memory. This might be called
591 * many times to choose different texture levels, cube faces, etc
592 * before intel_finish_render_texture() is ever called.
595 intel_render_texture(struct gl_context
* ctx
,
596 struct gl_framebuffer
*fb
,
597 struct gl_renderbuffer_attachment
*att
)
599 struct brw_context
*brw
= brw_context(ctx
);
600 struct gl_renderbuffer
*rb
= att
->Renderbuffer
;
601 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
602 struct gl_texture_image
*image
= rb
->TexImage
;
603 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
604 struct intel_mipmap_tree
*mt
= intel_image
->mt
;
609 if (att
->CubeMapFace
> 0) {
610 assert(att
->Zoffset
== 0);
611 layer
= att
->CubeMapFace
;
613 layer
= att
->Zoffset
;
616 if (!intel_image
->mt
) {
617 /* Fallback on drawing to a texture that doesn't have a miptree
618 * (has a border, width/height 0, etc.)
620 _swrast_render_texture(ctx
, fb
, att
);
624 intel_miptree_check_level_layer(mt
, att
->TextureLevel
, layer
);
626 if (!intel_renderbuffer_update_wrapper(brw
, irb
, image
, layer
, att
->Layered
)) {
627 _swrast_render_texture(ctx
, fb
, att
);
631 DBG("Begin render %s texture tex=%u w=%d h=%d d=%d refcount=%d\n",
632 _mesa_get_format_name(image
->TexFormat
),
633 att
->Texture
->Name
, image
->Width
, image
->Height
, image
->Depth
,
638 #define fbo_incomplete(fb, ...) do { \
639 static GLuint msg_id = 0; \
640 if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \
641 _mesa_gl_debug(ctx, &msg_id, \
642 MESA_DEBUG_TYPE_OTHER, \
643 MESA_DEBUG_SEVERITY_MEDIUM, \
647 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; \
651 * Do additional "completeness" testing of a framebuffer object.
654 intel_validate_framebuffer(struct gl_context
*ctx
, struct gl_framebuffer
*fb
)
656 struct brw_context
*brw
= brw_context(ctx
);
657 struct intel_renderbuffer
*depthRb
=
658 intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
659 struct intel_renderbuffer
*stencilRb
=
660 intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
661 struct intel_mipmap_tree
*depth_mt
= NULL
, *stencil_mt
= NULL
;
664 DBG("%s() on fb %p (%s)\n", __FUNCTION__
,
665 fb
, (fb
== ctx
->DrawBuffer
? "drawbuffer" :
666 (fb
== ctx
->ReadBuffer
? "readbuffer" : "other buffer")));
669 depth_mt
= depthRb
->mt
;
671 stencil_mt
= stencilRb
->mt
;
672 if (stencil_mt
->stencil_mt
)
673 stencil_mt
= stencil_mt
->stencil_mt
;
676 if (depth_mt
&& stencil_mt
) {
678 /* For gen >= 7, we are using the lod/minimum-array-element fields
679 * and supportting layered rendering. This means that we must restrict
680 * the depth & stencil attachments to match in various more retrictive
681 * ways. (width, height, depth, LOD and layer)
683 if (depth_mt
->physical_width0
!= stencil_mt
->physical_width0
||
684 depth_mt
->physical_height0
!= stencil_mt
->physical_height0
||
685 depth_mt
->physical_depth0
!= stencil_mt
->physical_depth0
||
686 depthRb
->mt_level
!= stencilRb
->mt_level
||
687 depthRb
->mt_layer
!= stencilRb
->mt_layer
) {
689 "FBO incomplete: depth and stencil must match in"
690 "width, height, depth, LOD and layer\n");
693 if (depth_mt
== stencil_mt
) {
694 /* For true packed depth/stencil (not faked on prefers-separate-stencil
695 * hardware) we need to be sure they're the same level/layer, since
696 * we'll be emitting a single packet describing the packed setup.
698 if (depthRb
->mt_level
!= stencilRb
->mt_level
||
699 depthRb
->mt_layer
!= stencilRb
->mt_layer
) {
701 "FBO incomplete: depth image level/layer %d/%d != "
702 "stencil image %d/%d\n",
706 stencilRb
->mt_layer
);
709 if (!brw
->has_separate_stencil
) {
710 fbo_incomplete(fb
, "FBO incomplete: separate stencil "
713 if (stencil_mt
->format
!= MESA_FORMAT_S_UINT8
) {
714 fbo_incomplete(fb
, "FBO incomplete: separate stencil is %s "
716 _mesa_get_format_name(stencil_mt
->format
));
718 if (brw
->gen
< 7 && !intel_renderbuffer_has_hiz(depthRb
)) {
719 /* Before Gen7, separate depth and stencil buffers can be used
720 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
721 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
722 * [DevSNB]: This field must be set to the same value (enabled
723 * or disabled) as Hierarchical Depth Buffer Enable.
725 fbo_incomplete(fb
, "FBO incomplete: separate stencil "
731 for (i
= 0; i
< Elements(fb
->Attachment
); i
++) {
732 struct gl_renderbuffer
*rb
;
733 struct intel_renderbuffer
*irb
;
735 if (fb
->Attachment
[i
].Type
== GL_NONE
)
738 /* A supported attachment will have a Renderbuffer set either
739 * from being a Renderbuffer or being a texture that got the
740 * intel_wrap_texture() treatment.
742 rb
= fb
->Attachment
[i
].Renderbuffer
;
744 fbo_incomplete(fb
, "FBO incomplete: attachment without "
749 if (fb
->Attachment
[i
].Type
== GL_TEXTURE
) {
750 if (rb
->TexImage
->Border
) {
751 fbo_incomplete(fb
, "FBO incomplete: texture with border\n");
756 irb
= intel_renderbuffer(rb
);
758 fbo_incomplete(fb
, "FBO incomplete: software rendering "
763 if (!brw_render_target_supported(brw
, rb
)) {
764 fbo_incomplete(fb
, "FBO incomplete: Unsupported HW "
765 "texture/renderbuffer format attached: %s\n",
766 _mesa_get_format_name(intel_rb_format(irb
)));
772 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
773 * We can do this when the dst renderbuffer is actually a texture and
774 * there is no scaling, mirroring or scissoring.
776 * \return new buffer mask indicating the buffers left to blit using the
780 intel_blit_framebuffer_with_blitter(struct gl_context
*ctx
,
781 GLint srcX0
, GLint srcY0
,
782 GLint srcX1
, GLint srcY1
,
783 GLint dstX0
, GLint dstY0
,
784 GLint dstX1
, GLint dstY1
,
785 GLbitfield mask
, GLenum filter
)
787 struct brw_context
*brw
= brw_context(ctx
);
789 /* Sync up the state of window system buffers. We need to do this before
790 * we go looking for the buffers.
792 intel_prepare_render(brw
);
794 if (mask
& GL_COLOR_BUFFER_BIT
) {
796 const struct gl_framebuffer
*drawFb
= ctx
->DrawBuffer
;
797 const struct gl_framebuffer
*readFb
= ctx
->ReadBuffer
;
798 struct gl_renderbuffer
*src_rb
= readFb
->_ColorReadBuffer
;
799 struct intel_renderbuffer
*src_irb
= intel_renderbuffer(src_rb
);
802 perf_debug("glBlitFramebuffer(): missing src renderbuffer. "
803 "Falling back to software rendering.\n");
807 /* If the source and destination are the same size with no mirroring,
808 * the rectangles are within the size of the texture and there is no
809 * scissor, then we can probably use the blit engine.
811 if (!(srcX0
- srcX1
== dstX0
- dstX1
&&
812 srcY0
- srcY1
== dstY0
- dstY1
&&
815 srcX0
>= 0 && srcX1
<= readFb
->Width
&&
816 srcY0
>= 0 && srcY1
<= readFb
->Height
&&
817 dstX0
>= 0 && dstX1
<= drawFb
->Width
&&
818 dstY0
>= 0 && dstY1
<= drawFb
->Height
&&
819 !(ctx
->Scissor
.EnableFlags
))) {
820 perf_debug("glBlitFramebuffer(): non-1:1 blit. "
821 "Falling back to software rendering.\n");
825 /* Blit to all active draw buffers. We don't do any pre-checking,
826 * because we assume that copying to MRTs is rare, and failure midway
827 * through copying is even more rare. Even if it was to occur, it's
828 * safe to let meta start the copy over from scratch, because
829 * glBlitFramebuffer completely overwrites the destination pixels, and
830 * results are undefined if any destination pixels have a dependency on
833 for (i
= 0; i
< ctx
->DrawBuffer
->_NumColorDrawBuffers
; i
++) {
834 struct gl_renderbuffer
*dst_rb
= ctx
->DrawBuffer
->_ColorDrawBuffers
[i
];
835 struct intel_renderbuffer
*dst_irb
= intel_renderbuffer(dst_rb
);
838 perf_debug("glBlitFramebuffer(): missing dst renderbuffer. "
839 "Falling back to software rendering.\n");
843 if (!intel_miptree_blit(brw
,
845 src_irb
->mt_level
, src_irb
->mt_layer
,
846 srcX0
, srcY0
, src_rb
->Name
== 0,
848 dst_irb
->mt_level
, dst_irb
->mt_layer
,
849 dstX0
, dstY0
, dst_rb
->Name
== 0,
850 dstX1
- dstX0
, dstY1
- dstY0
, GL_COPY
)) {
851 perf_debug("glBlitFramebuffer(): unknown blit failure. "
852 "Falling back to software rendering.\n");
857 mask
&= ~GL_COLOR_BUFFER_BIT
;
864 intel_blit_framebuffer(struct gl_context
*ctx
,
865 GLint srcX0
, GLint srcY0
, GLint srcX1
, GLint srcY1
,
866 GLint dstX0
, GLint dstY0
, GLint dstX1
, GLint dstY1
,
867 GLbitfield mask
, GLenum filter
)
869 /* Page 679 of OpenGL 4.4 spec says:
870 * "Added BlitFramebuffer to commands affected by conditional rendering in
871 * section 10.10 (Bug 9562)."
873 if (!_mesa_check_conditional_render(ctx
))
876 mask
= brw_blorp_framebuffer(brw_context(ctx
),
877 srcX0
, srcY0
, srcX1
, srcY1
,
878 dstX0
, dstY0
, dstX1
, dstY1
,
883 /* Try using the BLT engine. */
884 mask
= intel_blit_framebuffer_with_blitter(ctx
,
885 srcX0
, srcY0
, srcX1
, srcY1
,
886 dstX0
, dstY0
, dstX1
, dstY1
,
892 _mesa_meta_BlitFramebuffer(ctx
,
893 srcX0
, srcY0
, srcX1
, srcY1
,
894 dstX0
, dstY0
, dstX1
, dstY1
,
899 * Does the renderbuffer have hiz enabled?
902 intel_renderbuffer_has_hiz(struct intel_renderbuffer
*irb
)
904 return intel_miptree_slice_has_hiz(irb
->mt
, irb
->mt_level
, irb
->mt_layer
);
908 intel_renderbuffer_resolve_hiz(struct brw_context
*brw
,
909 struct intel_renderbuffer
*irb
)
912 return intel_miptree_slice_resolve_hiz(brw
,
921 intel_renderbuffer_att_set_needs_depth_resolve(struct gl_renderbuffer_attachment
*att
)
923 struct intel_renderbuffer
*irb
= intel_renderbuffer(att
->Renderbuffer
);
926 intel_miptree_set_all_slices_need_depth_resolve(irb
->mt
, irb
->mt_level
);
928 intel_miptree_slice_set_needs_depth_resolve(irb
->mt
,
936 intel_renderbuffer_resolve_depth(struct brw_context
*brw
,
937 struct intel_renderbuffer
*irb
)
940 return intel_miptree_slice_resolve_depth(brw
,
949 intel_renderbuffer_move_to_temp(struct brw_context
*brw
,
950 struct intel_renderbuffer
*irb
,
953 struct gl_renderbuffer
*rb
=&irb
->Base
.Base
;
954 struct intel_texture_image
*intel_image
= intel_texture_image(rb
->TexImage
);
955 struct intel_mipmap_tree
*new_mt
;
956 int width
, height
, depth
;
958 intel_miptree_get_dimensions_for_image(rb
->TexImage
, &width
, &height
, &depth
);
960 new_mt
= intel_miptree_create(brw
, rb
->TexImage
->TexObject
->Target
,
961 intel_image
->base
.Base
.TexFormat
,
962 intel_image
->base
.Base
.Level
,
963 intel_image
->base
.Base
.Level
,
964 width
, height
, depth
,
966 irb
->mt
->num_samples
,
967 INTEL_MIPTREE_TILING_ANY
);
969 if (brw_is_hiz_depth_format(brw
, new_mt
->format
)) {
970 intel_miptree_alloc_hiz(brw
, new_mt
);
973 intel_miptree_copy_teximage(brw
, intel_image
, new_mt
, invalidate
);
975 intel_miptree_reference(&irb
->mt
, intel_image
->mt
);
976 intel_renderbuffer_set_draw_offset(irb
);
977 intel_miptree_release(&new_mt
);
981 brw_render_cache_set_clear(struct brw_context
*brw
)
983 struct set_entry
*entry
;
985 set_foreach(brw
->render_cache
, entry
) {
986 _mesa_set_remove(brw
->render_cache
, entry
);
991 brw_render_cache_set_add_bo(struct brw_context
*brw
, drm_intel_bo
*bo
)
993 _mesa_set_add(brw
->render_cache
, _mesa_hash_pointer(bo
), bo
);
997 * Emits an appropriate flush for a BO if it has been rendered to within the
998 * same batchbuffer as a read that's about to be emitted.
1000 * The GPU has separate, incoherent caches for the render cache and the
1001 * sampler cache, along with other caches. Usually data in the different
1002 * caches don't interact (e.g. we don't render to our driver-generated
1003 * immediate constant data), but for render-to-texture in FBOs we definitely
1004 * do. When a batchbuffer is flushed, the kernel will ensure that everything
1005 * necessary is flushed before another use of that BO, but for reuse from
1006 * different caches within a batchbuffer, it's all our responsibility.
1009 brw_render_cache_set_check_flush(struct brw_context
*brw
, drm_intel_bo
*bo
)
1011 if (!_mesa_set_search(brw
->render_cache
, _mesa_hash_pointer(bo
), bo
))
1014 intel_batchbuffer_emit_mi_flush(brw
);
1018 * Do one-time context initializations related to GL_EXT_framebuffer_object.
1019 * Hook in device driver functions.
1022 intel_fbo_init(struct brw_context
*brw
)
1024 struct dd_function_table
*dd
= &brw
->ctx
.Driver
;
1025 dd
->NewFramebuffer
= intel_new_framebuffer
;
1026 dd
->NewRenderbuffer
= intel_new_renderbuffer
;
1027 dd
->MapRenderbuffer
= intel_map_renderbuffer
;
1028 dd
->UnmapRenderbuffer
= intel_unmap_renderbuffer
;
1029 dd
->RenderTexture
= intel_render_texture
;
1030 dd
->ValidateFramebuffer
= intel_validate_framebuffer
;
1031 dd
->BlitFramebuffer
= intel_blit_framebuffer
;
1032 dd
->EGLImageTargetRenderbufferStorage
=
1033 intel_image_target_renderbuffer_storage
;
1035 brw
->render_cache
= _mesa_set_create(brw
, _mesa_key_pointer_equal
);