1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "main/enums.h"
30 #include "main/imports.h"
31 #include "main/macros.h"
32 #include "main/mtypes.h"
33 #include "main/fbobject.h"
34 #include "main/framebuffer.h"
35 #include "main/renderbuffer.h"
36 #include "main/context.h"
37 #include "main/teximage.h"
38 #include "main/image.h"
40 #include "swrast/swrast.h"
41 #include "drivers/common/meta.h"
43 #include "intel_batchbuffer.h"
44 #include "intel_buffers.h"
45 #include "intel_blit.h"
46 #include "intel_fbo.h"
47 #include "intel_mipmap_tree.h"
48 #include "intel_regions.h"
49 #include "intel_tex.h"
50 #include "brw_context.h"
52 #define FILE_DEBUG_FLAG DEBUG_FBO
55 * Create a new framebuffer object.
57 static struct gl_framebuffer
*
58 intel_new_framebuffer(struct gl_context
* ctx
, GLuint name
)
60 /* Only drawable state in intel_framebuffer at this time, just use Mesa's
63 return _mesa_new_framebuffer(ctx
, name
);
67 /** Called by gl_renderbuffer::Delete() */
69 intel_delete_renderbuffer(struct gl_context
*ctx
, struct gl_renderbuffer
*rb
)
71 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
75 intel_miptree_release(&irb
->mt
);
77 _mesa_delete_renderbuffer(ctx
, rb
);
81 * \see dd_function_table::MapRenderbuffer
84 intel_map_renderbuffer(struct gl_context
*ctx
,
85 struct gl_renderbuffer
*rb
,
86 GLuint x
, GLuint y
, GLuint w
, GLuint h
,
91 struct brw_context
*brw
= brw_context(ctx
);
92 struct swrast_renderbuffer
*srb
= (struct swrast_renderbuffer
*)rb
;
93 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
98 /* this is a malloc'd renderbuffer (accum buffer), not an irb */
99 GLint bpp
= _mesa_get_format_bytes(rb
->Format
);
100 GLint rowStride
= srb
->RowStride
;
101 *out_map
= (GLubyte
*) srb
->Buffer
+ y
* rowStride
+ x
* bpp
;
102 *out_stride
= rowStride
;
106 intel_prepare_render(brw
);
108 /* For a window-system renderbuffer, we need to flip the mapping we receive
109 * upside-down. So we need to ask for a rectangle on flipped vertically, and
110 * we then return a pointer to the bottom of it with a negative stride.
113 y
= rb
->Height
- y
- h
;
116 intel_miptree_map(brw
, irb
->mt
, irb
->mt_level
, irb
->mt_layer
,
117 x
, y
, w
, h
, mode
, &map
, &stride
);
120 map
+= (h
- 1) * stride
;
124 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
125 __FUNCTION__
, rb
->Name
, _mesa_get_format_name(rb
->Format
),
126 x
, y
, w
, h
, map
, stride
);
129 *out_stride
= stride
;
133 * \see dd_function_table::UnmapRenderbuffer
136 intel_unmap_renderbuffer(struct gl_context
*ctx
,
137 struct gl_renderbuffer
*rb
)
139 struct brw_context
*brw
= brw_context(ctx
);
140 struct swrast_renderbuffer
*srb
= (struct swrast_renderbuffer
*)rb
;
141 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
143 DBG("%s: rb %d (%s)\n", __FUNCTION__
,
144 rb
->Name
, _mesa_get_format_name(rb
->Format
));
147 /* this is a malloc'd renderbuffer (accum buffer) */
152 intel_miptree_unmap(brw
, irb
->mt
, irb
->mt_level
, irb
->mt_layer
);
157 * Round up the requested multisample count to the next supported sample size.
160 intel_quantize_num_samples(struct intel_screen
*intel
, unsigned num_samples
)
162 switch (intel
->gen
) {
164 /* Gen6 supports only 4x multisampling. */
170 /* Gen7 supports 4x and 8x multisampling. */
173 else if (num_samples
> 0)
179 /* MSAA unsupported. */
186 * Called via glRenderbufferStorageEXT() to set the format and allocate
187 * storage for a user-created renderbuffer.
190 intel_alloc_renderbuffer_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
191 GLenum internalFormat
,
192 GLuint width
, GLuint height
)
194 struct brw_context
*brw
= brw_context(ctx
);
195 struct intel_context
*intel
= intel_context(ctx
);
196 struct intel_screen
*screen
= brw
->intelScreen
;
197 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
198 rb
->NumSamples
= intel_quantize_num_samples(screen
, rb
->NumSamples
);
200 switch (internalFormat
) {
202 /* Use the same format-choice logic as for textures.
203 * Renderbuffers aren't any different from textures for us,
204 * except they're less useful because you can't texture with
207 rb
->Format
= ctx
->Driver
.ChooseTextureFormat(ctx
, GL_TEXTURE_2D
,
211 case GL_STENCIL_INDEX
:
212 case GL_STENCIL_INDEX1_EXT
:
213 case GL_STENCIL_INDEX4_EXT
:
214 case GL_STENCIL_INDEX8_EXT
:
215 case GL_STENCIL_INDEX16_EXT
:
216 /* These aren't actual texture formats, so force them here. */
217 if (intel
->has_separate_stencil
) {
218 rb
->Format
= MESA_FORMAT_S8
;
220 assert(!intel
->must_use_separate_stencil
);
221 rb
->Format
= MESA_FORMAT_S8_Z24
;
228 rb
->_BaseFormat
= _mesa_base_fbo_format(ctx
, internalFormat
);
230 intel_miptree_release(&irb
->mt
);
232 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__
,
233 _mesa_lookup_enum_by_nr(internalFormat
),
234 _mesa_get_format_name(rb
->Format
), width
, height
);
236 if (width
== 0 || height
== 0)
239 irb
->mt
= intel_miptree_create_for_renderbuffer(brw
, rb
->Format
,
250 intel_image_target_renderbuffer_storage(struct gl_context
*ctx
,
251 struct gl_renderbuffer
*rb
,
254 struct brw_context
*brw
= brw_context(ctx
);
255 struct intel_renderbuffer
*irb
;
259 screen
= brw
->intelScreen
->driScrnPriv
;
260 image
= screen
->dri2
.image
->lookupEGLImage(screen
, image_handle
,
261 screen
->loaderPrivate
);
265 /* __DRIimage is opaque to the core so it has to be checked here */
266 switch (image
->format
) {
267 case MESA_FORMAT_RGBA8888_REV
:
268 _mesa_error(ctx
, GL_INVALID_OPERATION
,
269 "glEGLImageTargetRenderbufferStorage(unsupported image format");
276 irb
= intel_renderbuffer(rb
);
277 intel_miptree_release(&irb
->mt
);
278 irb
->mt
= intel_miptree_create_for_bo(brw
,
282 image
->region
->width
,
283 image
->region
->height
,
284 image
->region
->pitch
,
285 image
->region
->tiling
);
289 rb
->InternalFormat
= image
->internal_format
;
290 rb
->Width
= image
->region
->width
;
291 rb
->Height
= image
->region
->height
;
292 rb
->Format
= image
->format
;
293 rb
->_BaseFormat
= _mesa_base_fbo_format(ctx
, image
->internal_format
);
294 rb
->NeedsFinishRenderTexture
= true;
298 * Called by _mesa_resize_framebuffer() for each hardware renderbuffer when a
299 * window system framebuffer is resized.
301 * Any actual buffer reallocations for hardware renderbuffers (which would
302 * have triggered _mesa_resize_framebuffer()) were done by
303 * intel_process_dri2_buffer().
306 intel_alloc_window_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
307 GLenum internalFormat
, GLuint width
, GLuint height
)
309 ASSERT(rb
->Name
== 0);
312 rb
->InternalFormat
= internalFormat
;
317 /** Dummy function for gl_renderbuffer::AllocStorage() */
319 intel_nop_alloc_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
320 GLenum internalFormat
, GLuint width
, GLuint height
)
322 _mesa_problem(ctx
, "intel_op_alloc_storage should never be called.");
327 * Create a new intel_renderbuffer which corresponds to an on-screen window,
328 * not a user-created renderbuffer.
330 * \param num_samples must be quantized.
332 struct intel_renderbuffer
*
333 intel_create_renderbuffer(gl_format format
, unsigned num_samples
)
335 struct intel_renderbuffer
*irb
;
336 struct gl_renderbuffer
*rb
;
338 GET_CURRENT_CONTEXT(ctx
);
340 irb
= CALLOC_STRUCT(intel_renderbuffer
);
342 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "creating renderbuffer");
346 rb
= &irb
->Base
.Base
;
348 _mesa_init_renderbuffer(rb
, 0);
349 rb
->ClassID
= INTEL_RB_CLASS
;
350 rb
->_BaseFormat
= _mesa_get_format_base_format(format
);
352 rb
->InternalFormat
= rb
->_BaseFormat
;
353 rb
->NumSamples
= num_samples
;
355 /* intel-specific methods */
356 rb
->Delete
= intel_delete_renderbuffer
;
357 rb
->AllocStorage
= intel_alloc_window_storage
;
363 * Private window-system buffers (as opposed to ones shared with the display
364 * server created with intel_create_renderbuffer()) are most similar in their
365 * handling to user-created renderbuffers, but they have a resize handler that
366 * may be called at intel_update_renderbuffers() time.
368 * \param num_samples must be quantized.
370 struct intel_renderbuffer
*
371 intel_create_private_renderbuffer(gl_format format
, unsigned num_samples
)
373 struct intel_renderbuffer
*irb
;
375 irb
= intel_create_renderbuffer(format
, num_samples
);
376 irb
->Base
.Base
.AllocStorage
= intel_alloc_renderbuffer_storage
;
382 * Create a new renderbuffer object.
383 * Typically called via glBindRenderbufferEXT().
385 static struct gl_renderbuffer
*
386 intel_new_renderbuffer(struct gl_context
* ctx
, GLuint name
)
388 /*struct intel_context *intel = intel_context(ctx); */
389 struct intel_renderbuffer
*irb
;
390 struct gl_renderbuffer
*rb
;
392 irb
= CALLOC_STRUCT(intel_renderbuffer
);
394 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "creating renderbuffer");
398 rb
= &irb
->Base
.Base
;
400 _mesa_init_renderbuffer(rb
, name
);
401 rb
->ClassID
= INTEL_RB_CLASS
;
403 /* intel-specific methods */
404 rb
->Delete
= intel_delete_renderbuffer
;
405 rb
->AllocStorage
= intel_alloc_renderbuffer_storage
;
406 /* span routines set in alloc_storage function */
412 intel_renderbuffer_update_wrapper(struct brw_context
*brw
,
413 struct intel_renderbuffer
*irb
,
414 struct gl_texture_image
*image
,
417 struct gl_renderbuffer
*rb
= &irb
->Base
.Base
;
418 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
419 struct intel_mipmap_tree
*mt
= intel_image
->mt
;
420 int level
= image
->Level
;
422 rb
->Depth
= image
->Depth
;
424 rb
->AllocStorage
= intel_nop_alloc_storage
;
426 intel_miptree_check_level_layer(mt
, level
, layer
);
427 irb
->mt_level
= level
;
429 switch (mt
->msaa_layout
) {
430 case INTEL_MSAA_LAYOUT_UMS
:
431 case INTEL_MSAA_LAYOUT_CMS
:
432 irb
->mt_layer
= layer
* mt
->num_samples
;
436 irb
->mt_layer
= layer
;
439 intel_miptree_reference(&irb
->mt
, mt
);
441 intel_renderbuffer_set_draw_offset(irb
);
443 if (mt
->hiz_mt
== NULL
&& brw_is_hiz_depth_format(brw
, rb
->Format
)) {
444 intel_miptree_alloc_hiz(brw
, mt
);
453 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer
*irb
)
455 unsigned int dst_x
, dst_y
;
457 /* compute offset of the particular 2D image within the texture region */
458 intel_miptree_get_image_offset(irb
->mt
,
468 * Called by glFramebufferTexture[123]DEXT() (and other places) to
469 * prepare for rendering into texture memory. This might be called
470 * many times to choose different texture levels, cube faces, etc
471 * before intel_finish_render_texture() is ever called.
474 intel_render_texture(struct gl_context
* ctx
,
475 struct gl_framebuffer
*fb
,
476 struct gl_renderbuffer_attachment
*att
)
478 struct brw_context
*brw
= brw_context(ctx
);
479 struct gl_renderbuffer
*rb
= att
->Renderbuffer
;
480 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
481 struct gl_texture_image
*image
= rb
->TexImage
;
482 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
483 struct intel_mipmap_tree
*mt
= intel_image
->mt
;
488 if (att
->CubeMapFace
> 0) {
489 assert(att
->Zoffset
== 0);
490 layer
= att
->CubeMapFace
;
492 layer
= att
->Zoffset
;
495 if (!intel_image
->mt
) {
496 /* Fallback on drawing to a texture that doesn't have a miptree
497 * (has a border, width/height 0, etc.)
499 _swrast_render_texture(ctx
, fb
, att
);
503 intel_miptree_check_level_layer(mt
, att
->TextureLevel
, layer
);
505 if (!intel_renderbuffer_update_wrapper(brw
, irb
, image
, layer
)) {
506 _swrast_render_texture(ctx
, fb
, att
);
510 DBG("Begin render %s texture tex=%u w=%d h=%d d=%d refcount=%d\n",
511 _mesa_get_format_name(image
->TexFormat
),
512 att
->Texture
->Name
, image
->Width
, image
->Height
, image
->Depth
,
518 * Called by Mesa when rendering to a texture is done.
521 intel_finish_render_texture(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
)
523 struct brw_context
*brw
= brw_context(ctx
);
525 DBG("Finish render %s texture\n", _mesa_get_format_name(rb
->Format
));
527 /* Since we've (probably) rendered to the texture and will (likely) use
528 * it in the texture domain later on in this batchbuffer, flush the
529 * batch. Once again, we wish for a domain tracker in libdrm to cover
530 * usage inside of a batchbuffer like GEM does in the kernel.
532 intel_batchbuffer_emit_mi_flush(brw
);
535 #define fbo_incomplete(fb, ...) do { \
536 static GLuint msg_id = 0; \
537 if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \
538 _mesa_gl_debug(ctx, &msg_id, \
539 MESA_DEBUG_TYPE_OTHER, \
540 MESA_DEBUG_SEVERITY_MEDIUM, \
544 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; \
548 * Do additional "completeness" testing of a framebuffer object.
551 intel_validate_framebuffer(struct gl_context
*ctx
, struct gl_framebuffer
*fb
)
553 struct brw_context
*brw
= brw_context(ctx
);
554 struct intel_context
*intel
= intel_context(ctx
);
555 struct intel_renderbuffer
*depthRb
=
556 intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
557 struct intel_renderbuffer
*stencilRb
=
558 intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
559 struct intel_mipmap_tree
*depth_mt
= NULL
, *stencil_mt
= NULL
;
562 DBG("%s() on fb %p (%s)\n", __FUNCTION__
,
563 fb
, (fb
== ctx
->DrawBuffer
? "drawbuffer" :
564 (fb
== ctx
->ReadBuffer
? "readbuffer" : "other buffer")));
567 depth_mt
= depthRb
->mt
;
569 stencil_mt
= stencilRb
->mt
;
570 if (stencil_mt
->stencil_mt
)
571 stencil_mt
= stencil_mt
->stencil_mt
;
574 if (depth_mt
&& stencil_mt
) {
575 if (depth_mt
== stencil_mt
) {
576 /* For true packed depth/stencil (not faked on prefers-separate-stencil
577 * hardware) we need to be sure they're the same level/layer, since
578 * we'll be emitting a single packet describing the packed setup.
580 if (depthRb
->mt_level
!= stencilRb
->mt_level
||
581 depthRb
->mt_layer
!= stencilRb
->mt_layer
) {
583 "FBO incomplete: depth image level/layer %d/%d != "
584 "stencil image %d/%d\n",
588 stencilRb
->mt_layer
);
591 if (!intel
->has_separate_stencil
) {
592 fbo_incomplete(fb
, "FBO incomplete: separate stencil "
595 if (stencil_mt
->format
!= MESA_FORMAT_S8
) {
596 fbo_incomplete(fb
, "FBO incomplete: separate stencil is %s "
598 _mesa_get_format_name(stencil_mt
->format
));
600 if (intel
->gen
< 7 && !intel_renderbuffer_has_hiz(depthRb
)) {
601 /* Before Gen7, separate depth and stencil buffers can be used
602 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
603 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
604 * [DevSNB]: This field must be set to the same value (enabled
605 * or disabled) as Hierarchical Depth Buffer Enable.
607 fbo_incomplete(fb
, "FBO incomplete: separate stencil "
613 for (i
= 0; i
< Elements(fb
->Attachment
); i
++) {
614 struct gl_renderbuffer
*rb
;
615 struct intel_renderbuffer
*irb
;
617 if (fb
->Attachment
[i
].Type
== GL_NONE
)
620 /* A supported attachment will have a Renderbuffer set either
621 * from being a Renderbuffer or being a texture that got the
622 * intel_wrap_texture() treatment.
624 rb
= fb
->Attachment
[i
].Renderbuffer
;
626 fbo_incomplete(fb
, "FBO incomplete: attachment without "
631 if (fb
->Attachment
[i
].Type
== GL_TEXTURE
) {
632 if (rb
->TexImage
->Border
) {
633 fbo_incomplete(fb
, "FBO incomplete: texture with border\n");
638 irb
= intel_renderbuffer(rb
);
640 fbo_incomplete(fb
, "FBO incomplete: software rendering "
645 if (!brw_render_target_supported(brw
, rb
)) {
646 fbo_incomplete(fb
, "FBO incomplete: Unsupported HW "
647 "texture/renderbuffer format attached: %s\n",
648 _mesa_get_format_name(intel_rb_format(irb
)));
654 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
655 * We can do this when the dst renderbuffer is actually a texture and
656 * there is no scaling, mirroring or scissoring.
658 * \return new buffer mask indicating the buffers left to blit using the
662 intel_blit_framebuffer_with_blitter(struct gl_context
*ctx
,
663 GLint srcX0
, GLint srcY0
,
664 GLint srcX1
, GLint srcY1
,
665 GLint dstX0
, GLint dstY0
,
666 GLint dstX1
, GLint dstY1
,
667 GLbitfield mask
, GLenum filter
)
669 struct brw_context
*brw
= brw_context(ctx
);
671 if (mask
& GL_COLOR_BUFFER_BIT
) {
673 const struct gl_framebuffer
*drawFb
= ctx
->DrawBuffer
;
674 const struct gl_framebuffer
*readFb
= ctx
->ReadBuffer
;
675 struct gl_renderbuffer
*src_rb
= readFb
->_ColorReadBuffer
;
676 struct intel_renderbuffer
*src_irb
= intel_renderbuffer(src_rb
);
679 perf_debug("glBlitFramebuffer(): missing src renderbuffer. "
680 "Falling back to software rendering.\n");
684 /* If the source and destination are the same size with no mirroring,
685 * the rectangles are within the size of the texture and there is no
686 * scissor, then we can probably use the blit engine.
688 if (!(srcX0
- srcX1
== dstX0
- dstX1
&&
689 srcY0
- srcY1
== dstY0
- dstY1
&&
692 srcX0
>= 0 && srcX1
<= readFb
->Width
&&
693 srcY0
>= 0 && srcY1
<= readFb
->Height
&&
694 dstX0
>= 0 && dstX1
<= drawFb
->Width
&&
695 dstY0
>= 0 && dstY1
<= drawFb
->Height
&&
696 !ctx
->Scissor
.Enabled
)) {
697 perf_debug("glBlitFramebuffer(): non-1:1 blit. "
698 "Falling back to software rendering.\n");
702 /* Blit to all active draw buffers. We don't do any pre-checking,
703 * because we assume that copying to MRTs is rare, and failure midway
704 * through copying is even more rare. Even if it was to occur, it's
705 * safe to let meta start the copy over from scratch, because
706 * glBlitFramebuffer completely overwrites the destination pixels, and
707 * results are undefined if any destination pixels have a dependency on
710 for (i
= 0; i
< ctx
->DrawBuffer
->_NumColorDrawBuffers
; i
++) {
711 struct gl_renderbuffer
*dst_rb
= ctx
->DrawBuffer
->_ColorDrawBuffers
[i
];
712 struct intel_renderbuffer
*dst_irb
= intel_renderbuffer(dst_rb
);
715 perf_debug("glBlitFramebuffer(): missing dst renderbuffer. "
716 "Falling back to software rendering.\n");
720 gl_format src_format
= _mesa_get_srgb_format_linear(src_rb
->Format
);
721 gl_format dst_format
= _mesa_get_srgb_format_linear(dst_rb
->Format
);
722 if (src_format
!= dst_format
) {
723 perf_debug("glBlitFramebuffer(): unsupported blit from %s to %s. "
724 "Falling back to software rendering.\n",
725 _mesa_get_format_name(src_format
),
726 _mesa_get_format_name(dst_format
));
730 if (!intel_miptree_blit(brw
,
732 src_irb
->mt_level
, src_irb
->mt_layer
,
733 srcX0
, srcY0
, src_rb
->Name
== 0,
735 dst_irb
->mt_level
, dst_irb
->mt_layer
,
736 dstX0
, dstY0
, dst_rb
->Name
== 0,
737 dstX1
- dstX0
, dstY1
- dstY0
, GL_COPY
)) {
738 perf_debug("glBlitFramebuffer(): unknown blit failure. "
739 "Falling back to software rendering.\n");
744 mask
&= ~GL_COLOR_BUFFER_BIT
;
751 intel_blit_framebuffer(struct gl_context
*ctx
,
752 GLint srcX0
, GLint srcY0
, GLint srcX1
, GLint srcY1
,
753 GLint dstX0
, GLint dstY0
, GLint dstX1
, GLint dstY1
,
754 GLbitfield mask
, GLenum filter
)
756 mask
= brw_blorp_framebuffer(brw_context(ctx
),
757 srcX0
, srcY0
, srcX1
, srcY1
,
758 dstX0
, dstY0
, dstX1
, dstY1
,
763 /* Try using the BLT engine. */
764 mask
= intel_blit_framebuffer_with_blitter(ctx
,
765 srcX0
, srcY0
, srcX1
, srcY1
,
766 dstX0
, dstY0
, dstX1
, dstY1
,
772 _mesa_meta_BlitFramebuffer(ctx
,
773 srcX0
, srcY0
, srcX1
, srcY1
,
774 dstX0
, dstY0
, dstX1
, dstY1
,
779 * This is a no-op except on multisample buffers shared with DRI2.
782 intel_renderbuffer_set_needs_downsample(struct intel_renderbuffer
*irb
)
784 if (irb
->mt
&& irb
->mt
->singlesample_mt
)
785 irb
->mt
->need_downsample
= true;
789 * Does the renderbuffer have hiz enabled?
792 intel_renderbuffer_has_hiz(struct intel_renderbuffer
*irb
)
794 return intel_miptree_slice_has_hiz(irb
->mt
, irb
->mt_level
, irb
->mt_layer
);
798 intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer
*irb
)
801 intel_miptree_slice_set_needs_hiz_resolve(irb
->mt
,
808 intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer
*irb
)
811 intel_miptree_slice_set_needs_depth_resolve(irb
->mt
,
818 intel_renderbuffer_resolve_hiz(struct brw_context
*brw
,
819 struct intel_renderbuffer
*irb
)
822 return intel_miptree_slice_resolve_hiz(brw
,
831 intel_renderbuffer_resolve_depth(struct brw_context
*brw
,
832 struct intel_renderbuffer
*irb
)
835 return intel_miptree_slice_resolve_depth(brw
,
844 intel_renderbuffer_move_to_temp(struct brw_context
*brw
,
845 struct intel_renderbuffer
*irb
,
848 struct gl_renderbuffer
*rb
=&irb
->Base
.Base
;
849 struct intel_texture_image
*intel_image
= intel_texture_image(rb
->TexImage
);
850 struct intel_mipmap_tree
*new_mt
;
851 int width
, height
, depth
;
853 intel_miptree_get_dimensions_for_image(rb
->TexImage
, &width
, &height
, &depth
);
855 new_mt
= intel_miptree_create(brw
, rb
->TexImage
->TexObject
->Target
,
856 intel_image
->base
.Base
.TexFormat
,
857 intel_image
->base
.Base
.Level
,
858 intel_image
->base
.Base
.Level
,
859 width
, height
, depth
,
861 irb
->mt
->num_samples
,
862 INTEL_MIPTREE_TILING_ANY
);
864 if (brw_is_hiz_depth_format(brw
, new_mt
->format
)) {
865 intel_miptree_alloc_hiz(brw
, new_mt
);
868 intel_miptree_copy_teximage(brw
, intel_image
, new_mt
, invalidate
);
870 intel_miptree_reference(&irb
->mt
, intel_image
->mt
);
871 intel_renderbuffer_set_draw_offset(irb
);
872 intel_miptree_release(&new_mt
);
876 * Do one-time context initializations related to GL_EXT_framebuffer_object.
877 * Hook in device driver functions.
880 intel_fbo_init(struct brw_context
*brw
)
882 struct dd_function_table
*dd
= &brw
->intel
.ctx
.Driver
;
883 dd
->NewFramebuffer
= intel_new_framebuffer
;
884 dd
->NewRenderbuffer
= intel_new_renderbuffer
;
885 dd
->MapRenderbuffer
= intel_map_renderbuffer
;
886 dd
->UnmapRenderbuffer
= intel_unmap_renderbuffer
;
887 dd
->RenderTexture
= intel_render_texture
;
888 dd
->FinishRenderTexture
= intel_finish_render_texture
;
889 dd
->ValidateFramebuffer
= intel_validate_framebuffer
;
890 dd
->BlitFramebuffer
= intel_blit_framebuffer
;
891 dd
->EGLImageTargetRenderbufferStorage
=
892 intel_image_target_renderbuffer_storage
;