1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "main/enums.h"
30 #include "main/imports.h"
31 #include "main/macros.h"
32 #include "main/mfeatures.h"
33 #include "main/mtypes.h"
34 #include "main/fbobject.h"
35 #include "main/framebuffer.h"
36 #include "main/renderbuffer.h"
37 #include "main/context.h"
38 #include "main/teximage.h"
39 #include "main/image.h"
41 #include "swrast/swrast.h"
42 #include "drivers/common/meta.h"
44 #include "intel_context.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_buffers.h"
47 #include "intel_blit.h"
48 #include "intel_fbo.h"
49 #include "intel_mipmap_tree.h"
50 #include "intel_regions.h"
51 #include "intel_tex.h"
52 #include "intel_span.h"
54 #include "brw_context.h"
57 #define FILE_DEBUG_FLAG DEBUG_FBO
61 intel_framebuffer_has_hiz(struct gl_framebuffer
*fb
)
63 struct intel_renderbuffer
*rb
= NULL
;
65 rb
= intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
66 return rb
&& rb
->mt
&& rb
->mt
->hiz_mt
;
70 intel_get_rb_region(struct gl_framebuffer
*fb
, GLuint attIndex
)
72 struct intel_renderbuffer
*irb
= intel_get_renderbuffer(fb
, attIndex
);
74 return irb
->mt
->region
;
80 * Create a new framebuffer object.
82 static struct gl_framebuffer
*
83 intel_new_framebuffer(struct gl_context
* ctx
, GLuint name
)
85 /* Only drawable state in intel_framebuffer at this time, just use Mesa's
88 return _mesa_new_framebuffer(ctx
, name
);
92 /** Called by gl_renderbuffer::Delete() */
94 intel_delete_renderbuffer(struct gl_renderbuffer
*rb
)
96 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
100 intel_miptree_release(&irb
->mt
);
102 _mesa_reference_renderbuffer(&irb
->wrapped_depth
, NULL
);
103 _mesa_reference_renderbuffer(&irb
->wrapped_stencil
, NULL
);
109 * \brief Map a renderbuffer through the GTT.
111 * \see intel_map_renderbuffer()
114 intel_map_renderbuffer_gtt(struct gl_context
*ctx
,
115 struct gl_renderbuffer
*rb
,
116 GLuint x
, GLuint y
, GLuint w
, GLuint h
,
121 struct intel_context
*intel
= intel_context(ctx
);
122 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
124 int stride
, flip_stride
;
128 irb
->map_mode
= mode
;
134 stride
= irb
->mt
->region
->pitch
* irb
->mt
->region
->cpp
;
137 y
= irb
->mt
->region
->height
- 1 - y
;
138 flip_stride
= -stride
;
142 flip_stride
= stride
;
145 if (drm_intel_bo_references(intel
->batch
.bo
, irb
->mt
->region
->bo
)) {
146 intel_batchbuffer_flush(intel
);
149 drm_intel_gem_bo_map_gtt(irb
->mt
->region
->bo
);
151 map
= irb
->mt
->region
->bo
->virtual;
152 map
+= x
* irb
->mt
->region
->cpp
;
153 map
+= (int)y
* stride
;
156 *out_stride
= flip_stride
;
158 DBG("%s: rb %d (%s) gtt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
159 __FUNCTION__
, rb
->Name
, _mesa_get_format_name(rb
->Format
),
160 x
, y
, w
, h
, *out_map
, *out_stride
);
164 * \brief Map a renderbuffer by blitting it to a temporary gem buffer.
166 * On gen6+, we have LLC sharing, which means we can get high-performance
167 * access to linear-mapped buffers.
169 * This function allocates a temporary gem buffer at
170 * intel_renderbuffer::map_bo, then blits the renderbuffer into it, and
171 * returns a map of that. (Note: Only X tiled buffers can be blitted).
173 * \see intel_renderbuffer::map_bo
174 * \see intel_map_renderbuffer()
177 intel_map_renderbuffer_blit(struct gl_context
*ctx
,
178 struct gl_renderbuffer
*rb
,
179 GLuint x
, GLuint y
, GLuint w
, GLuint h
,
184 struct intel_context
*intel
= intel_context(ctx
);
185 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
190 assert(irb
->mt
->region
);
191 assert(intel
->gen
>= 6);
192 assert(!(mode
& GL_MAP_WRITE_BIT
));
193 assert(irb
->mt
->region
->tiling
== I915_TILING_X
);
195 irb
->map_mode
= mode
;
201 dst_stride
= ALIGN(w
* irb
->mt
->region
->cpp
, 4);
204 src_x
= x
+ irb
->draw_x
;
205 src_y
= y
+ irb
->draw_y
;
208 src_y
= irb
->mt
->region
->height
- y
- h
;
211 irb
->map_bo
= drm_intel_bo_alloc(intel
->bufmgr
, "MapRenderbuffer() temp",
212 dst_stride
* h
, 4096);
214 /* We don't do the flip in the blit, because it's always so tricky to get
218 intelEmitCopyBlit(intel
,
219 irb
->mt
->region
->cpp
,
220 irb
->mt
->region
->pitch
, irb
->mt
->region
->bo
,
221 0, irb
->mt
->region
->tiling
,
222 dst_stride
/ irb
->mt
->region
->cpp
, irb
->map_bo
,
228 intel_batchbuffer_flush(intel
);
229 drm_intel_bo_map(irb
->map_bo
, false);
232 *out_map
= irb
->map_bo
->virtual;
233 *out_stride
= dst_stride
;
235 *out_map
= irb
->map_bo
->virtual + (h
- 1) * dst_stride
;
236 *out_stride
= -dst_stride
;
239 DBG("%s: rb %d (%s) blit mapped: (%d, %d) (%dx%d) -> %p/%d\n",
240 __FUNCTION__
, rb
->Name
, _mesa_get_format_name(rb
->Format
),
241 src_x
, src_y
, w
, h
, *out_map
, *out_stride
);
243 /* Fallback to GTT mapping. */
244 drm_intel_bo_unreference(irb
->map_bo
);
246 intel_map_renderbuffer_gtt(ctx
, rb
,
249 out_map
, out_stride
);
254 * \brief Map a stencil renderbuffer.
256 * Stencil buffers are W-tiled. Since the GTT has no W fence, we must detile
257 * the buffer in software.
259 * This function allocates a temporary malloc'd buffer at
260 * intel_renderbuffer::map_buffer, detiles the stencil buffer into it, then
261 * returns the temporary buffer as the map.
263 * \see intel_renderbuffer::map_buffer
264 * \see intel_map_renderbuffer()
265 * \see intel_unmap_renderbuffer_s8()
268 intel_map_renderbuffer_s8(struct gl_context
*ctx
,
269 struct gl_renderbuffer
*rb
,
270 GLuint x
, GLuint y
, GLuint w
, GLuint h
,
275 struct intel_context
*intel
= intel_context(ctx
);
276 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
277 uint8_t *tiled_s8_map
;
278 uint8_t *untiled_s8_map
;
280 assert(rb
->Format
== MESA_FORMAT_S8
);
283 irb
->map_mode
= mode
;
289 /* Flip the Y axis for the default framebuffer. */
290 int y_flip
= (rb
->Name
== 0) ? -1 : 1;
291 int y_bias
= (rb
->Name
== 0) ? (rb
->Height
- 1) : 0;
293 irb
->map_buffer
= malloc(w
* h
);
294 untiled_s8_map
= irb
->map_buffer
;
295 tiled_s8_map
= intel_region_map(intel
, irb
->mt
->region
, mode
);
297 for (uint32_t pix_y
= 0; pix_y
< h
; pix_y
++) {
298 for (uint32_t pix_x
= 0; pix_x
< w
; pix_x
++) {
299 uint32_t flipped_y
= y_flip
* (int32_t)(y
+ pix_y
) + y_bias
;
300 ptrdiff_t offset
= intel_offset_S8(irb
->mt
->region
->pitch
,
303 untiled_s8_map
[pix_y
* w
+ pix_x
] = tiled_s8_map
[offset
];
307 *out_map
= untiled_s8_map
;
310 DBG("%s: rb %d (%s) s8 detiled mapped: (%d, %d) (%dx%d) -> %p/%d\n",
311 __FUNCTION__
, rb
->Name
, _mesa_get_format_name(rb
->Format
),
312 x
, y
, w
, h
, *out_map
, *out_stride
);
316 * \brief Map a depthstencil buffer with separate stencil.
318 * A depthstencil renderbuffer, if using separate stencil, consists of a depth
319 * renderbuffer and a hidden stencil renderbuffer. This function maps the
320 * depth buffer, whose format is MESA_FORMAT_X8_Z24, through the GTT and
321 * returns that as the mapped pointer. The caller need not be aware of the
322 * hidden stencil buffer and may safely assume that the mapped pointer points
323 * to a MESA_FORMAT_S8_Z24 buffer
325 * The consistency between the depth buffer's S8 bits and the hidden stencil
326 * buffer is managed within intel_map_renderbuffer() and
327 * intel_unmap_renderbuffer() by scattering or gathering the stencil bits
328 * according to the map mode.
330 * \see intel_map_renderbuffer()
331 * \see intel_unmap_renderbuffer_separate_s8z24()
334 intel_map_renderbuffer_separate_s8z24(struct gl_context
*ctx
,
335 struct gl_renderbuffer
*rb
,
336 GLuint x
, GLuint y
, GLuint w
, GLuint h
,
341 struct intel_context
*intel
= intel_context(ctx
);
342 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
345 int32_t s8z24_stride
;
347 struct intel_renderbuffer
*s8_irb
;
350 assert(rb
->Name
!= 0);
351 assert(rb
->Format
== MESA_FORMAT_S8_Z24
);
352 assert(irb
->wrapped_depth
!= NULL
);
353 assert(irb
->wrapped_stencil
!= NULL
);
355 irb
->map_mode
= mode
;
361 /* Map with write mode for the gather below. */
362 intel_map_renderbuffer_gtt(ctx
, irb
->wrapped_depth
,
363 x
, y
, w
, h
, mode
| GL_MAP_WRITE_BIT
,
364 &s8z24_map
, &s8z24_stride
);
366 s8_irb
= intel_renderbuffer(irb
->wrapped_stencil
);
367 s8_map
= intel_region_map(intel
, s8_irb
->mt
->region
, GL_MAP_READ_BIT
);
369 /* Gather the stencil buffer into the depth buffer. */
370 for (uint32_t pix_y
= 0; pix_y
< h
; ++pix_y
) {
371 for (uint32_t pix_x
= 0; pix_x
< w
; ++pix_x
) {
372 ptrdiff_t s8_offset
= intel_offset_S8(s8_irb
->mt
->region
->pitch
,
375 ptrdiff_t s8z24_offset
= pix_y
* s8z24_stride
378 s8z24_map
[s8z24_offset
] = s8_map
[s8_offset
];
382 intel_region_unmap(intel
, s8_irb
->mt
->region
);
384 *out_map
= s8z24_map
;
385 *out_stride
= s8z24_stride
;
389 * \see dd_function_table::MapRenderbuffer
392 intel_map_renderbuffer(struct gl_context
*ctx
,
393 struct gl_renderbuffer
*rb
,
394 GLuint x
, GLuint y
, GLuint w
, GLuint h
,
399 struct intel_context
*intel
= intel_context(ctx
);
400 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
402 /* We sometimes get called with this by our intel_span.c usage. */
403 if (!irb
->mt
&& !irb
->wrapped_depth
) {
409 if (rb
->Format
== MESA_FORMAT_S8
) {
410 intel_map_renderbuffer_s8(ctx
, rb
, x
, y
, w
, h
, mode
,
411 out_map
, out_stride
);
412 } else if (irb
->wrapped_depth
) {
413 intel_map_renderbuffer_separate_s8z24(ctx
, rb
, x
, y
, w
, h
, mode
,
414 out_map
, out_stride
);
415 } else if (intel
->gen
>= 6 &&
416 !(mode
& GL_MAP_WRITE_BIT
) &&
417 irb
->mt
->region
->tiling
== I915_TILING_X
) {
418 intel_map_renderbuffer_blit(ctx
, rb
, x
, y
, w
, h
, mode
,
419 out_map
, out_stride
);
421 intel_map_renderbuffer_gtt(ctx
, rb
, x
, y
, w
, h
, mode
,
422 out_map
, out_stride
);
427 * \see intel_map_renderbuffer_s8()
430 intel_unmap_renderbuffer_s8(struct gl_context
*ctx
,
431 struct gl_renderbuffer
*rb
)
433 struct intel_context
*intel
= intel_context(ctx
);
434 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
436 DBG("%s: rb %d (%s)\n", __FUNCTION__
,
437 rb
->Name
, _mesa_get_format_name(rb
->Format
));
439 assert(rb
->Format
== MESA_FORMAT_S8
);
441 if (!irb
->map_buffer
)
444 if (irb
->map_mode
& GL_MAP_WRITE_BIT
) {
445 /* The temporary buffer was written to, so we must copy its pixels into
448 uint8_t *untiled_s8_map
= irb
->map_buffer
;
449 uint8_t *tiled_s8_map
= irb
->mt
->region
->bo
->virtual;
451 /* Flip the Y axis for the default framebuffer. */
452 int y_flip
= (rb
->Name
== 0) ? -1 : 1;
453 int y_bias
= (rb
->Name
== 0) ? (rb
->Height
- 1) : 0;
455 for (uint32_t pix_y
= 0; pix_y
< irb
->map_h
; pix_y
++) {
456 for (uint32_t pix_x
= 0; pix_x
< irb
->map_w
; pix_x
++) {
457 uint32_t flipped_y
= y_flip
* (int32_t)(pix_y
+ irb
->map_y
) + y_bias
;
458 ptrdiff_t offset
= intel_offset_S8(irb
->mt
->region
->pitch
,
461 tiled_s8_map
[offset
] =
462 untiled_s8_map
[pix_y
* irb
->map_w
+ pix_x
];
467 intel_region_unmap(intel
, irb
->mt
->region
);
468 free(irb
->map_buffer
);
469 irb
->map_buffer
= NULL
;
473 * \brief Unmap a depthstencil renderbuffer with separate stencil.
475 * \see intel_map_renderbuffer_separate_s8z24()
476 * \see intel_unmap_renderbuffer()
479 intel_unmap_renderbuffer_separate_s8z24(struct gl_context
*ctx
,
480 struct gl_renderbuffer
*rb
)
482 struct intel_context
*intel
= intel_context(ctx
);
483 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
484 struct intel_renderbuffer
*s8z24_irb
;
486 assert(rb
->Name
!= 0);
487 assert(rb
->Format
== MESA_FORMAT_S8_Z24
);
488 assert(irb
->wrapped_depth
!= NULL
);
489 assert(irb
->wrapped_stencil
!= NULL
);
491 s8z24_irb
= intel_renderbuffer(irb
->wrapped_depth
);
493 if (irb
->map_mode
& GL_MAP_WRITE_BIT
) {
494 /* Copy the stencil bits from the depth buffer into the stencil buffer.
496 uint32_t map_x
= irb
->map_x
;
497 uint32_t map_y
= irb
->map_y
;
498 uint32_t map_w
= irb
->map_w
;
499 uint32_t map_h
= irb
->map_h
;
501 struct intel_renderbuffer
*s8_irb
;
504 s8_irb
= intel_renderbuffer(irb
->wrapped_stencil
);
505 s8_map
= intel_region_map(intel
, s8_irb
->mt
->region
, GL_MAP_WRITE_BIT
);
507 int32_t s8z24_stride
= 4 * s8z24_irb
->mt
->region
->pitch
;
508 uint8_t *s8z24_map
= s8z24_irb
->mt
->region
->bo
->virtual
509 + map_y
* s8z24_stride
512 for (uint32_t pix_y
= 0; pix_y
< map_h
; ++pix_y
) {
513 for (uint32_t pix_x
= 0; pix_x
< map_w
; ++pix_x
) {
514 ptrdiff_t s8_offset
= intel_offset_S8(s8_irb
->mt
->region
->pitch
,
517 ptrdiff_t s8z24_offset
= pix_y
* s8z24_stride
520 s8_map
[s8_offset
] = s8z24_map
[s8z24_offset
];
524 intel_region_unmap(intel
, s8_irb
->mt
->region
);
527 drm_intel_gem_bo_unmap_gtt(s8z24_irb
->mt
->region
->bo
);
531 * \see dd_function_table::UnmapRenderbuffer
534 intel_unmap_renderbuffer(struct gl_context
*ctx
,
535 struct gl_renderbuffer
*rb
)
537 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
539 DBG("%s: rb %d (%s)\n", __FUNCTION__
,
540 rb
->Name
, _mesa_get_format_name(rb
->Format
));
542 if (rb
->Format
== MESA_FORMAT_S8
) {
543 intel_unmap_renderbuffer_s8(ctx
, rb
);
544 } else if (irb
->wrapped_depth
) {
545 intel_unmap_renderbuffer_separate_s8z24(ctx
, rb
);
546 } else if (irb
->map_bo
) {
547 /* Paired with intel_map_renderbuffer_blit(). */
548 drm_intel_bo_unmap(irb
->map_bo
);
549 drm_intel_bo_unreference(irb
->map_bo
);
552 /* Paired with intel_map_renderbuffer_gtt(). */
554 /* The miptree may be null when intel_map_renderbuffer() is
555 * called from intel_span.c.
557 drm_intel_gem_bo_unmap_gtt(irb
->mt
->region
->bo
);
563 * Return a pointer to a specific pixel in a renderbuffer.
566 intel_get_pointer(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
569 /* By returning NULL we force all software rendering to go through
577 * Called via glRenderbufferStorageEXT() to set the format and allocate
578 * storage for a user-created renderbuffer.
581 intel_alloc_renderbuffer_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
582 GLenum internalFormat
,
583 GLuint width
, GLuint height
)
585 struct intel_context
*intel
= intel_context(ctx
);
586 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
589 ASSERT(rb
->Name
!= 0);
591 switch (internalFormat
) {
593 /* Use the same format-choice logic as for textures.
594 * Renderbuffers aren't any different from textures for us,
595 * except they're less useful because you can't texture with
598 rb
->Format
= intel
->ctx
.Driver
.ChooseTextureFormat(ctx
, internalFormat
,
601 case GL_STENCIL_INDEX
:
602 case GL_STENCIL_INDEX1_EXT
:
603 case GL_STENCIL_INDEX4_EXT
:
604 case GL_STENCIL_INDEX8_EXT
:
605 case GL_STENCIL_INDEX16_EXT
:
606 /* These aren't actual texture formats, so force them here. */
607 if (intel
->has_separate_stencil
) {
608 rb
->Format
= MESA_FORMAT_S8
;
610 assert(!intel
->must_use_separate_stencil
);
611 rb
->Format
= MESA_FORMAT_S8_Z24
;
618 rb
->_BaseFormat
= _mesa_base_fbo_format(ctx
, internalFormat
);
619 rb
->DataType
= intel_mesa_format_to_rb_datatype(rb
->Format
);
620 cpp
= _mesa_get_format_bytes(rb
->Format
);
624 intel_miptree_release(&irb
->mt
);
626 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__
,
627 _mesa_lookup_enum_by_nr(internalFormat
),
628 _mesa_get_format_name(rb
->Format
), width
, height
);
630 tiling
= I915_TILING_NONE
;
631 if (intel
->use_texture_tiling
) {
632 GLenum base_format
= _mesa_get_format_base_format(rb
->Format
);
634 if (intel
->gen
>= 4 && (base_format
== GL_DEPTH_COMPONENT
||
635 base_format
== GL_STENCIL_INDEX
||
636 base_format
== GL_DEPTH_STENCIL
))
637 tiling
= I915_TILING_Y
;
639 tiling
= I915_TILING_X
;
642 if (irb
->Base
.Format
== MESA_FORMAT_S8
) {
644 * The stencil buffer is W tiled. However, we request from the kernel a
645 * non-tiled buffer because the GTT is incapable of W fencing.
647 * The stencil buffer has quirky pitch requirements. From Vol 2a,
648 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
649 * The pitch must be set to 2x the value computed based on width, as
650 * the stencil buffer is stored with two rows interleaved.
651 * To accomplish this, we resort to the nasty hack of doubling the drm
652 * region's cpp and halving its height.
654 * If we neglect to double the pitch, then render corruption occurs.
656 irb
->mt
= intel_miptree_create_for_renderbuffer(
662 ALIGN((height
+ 1) / 2, 64));
666 } else if (irb
->Base
.Format
== MESA_FORMAT_S8_Z24
667 && intel
->must_use_separate_stencil
) {
670 struct gl_renderbuffer
*depth_rb
;
671 struct gl_renderbuffer
*stencil_rb
;
673 depth_rb
= intel_create_wrapped_renderbuffer(ctx
, width
, height
,
675 stencil_rb
= intel_create_wrapped_renderbuffer(ctx
, width
, height
,
677 ok
= depth_rb
&& stencil_rb
;
678 ok
= ok
&& intel_alloc_renderbuffer_storage(ctx
, depth_rb
,
679 depth_rb
->InternalFormat
,
681 ok
= ok
&& intel_alloc_renderbuffer_storage(ctx
, stencil_rb
,
682 stencil_rb
->InternalFormat
,
687 intel_delete_renderbuffer(depth_rb
);
690 intel_delete_renderbuffer(stencil_rb
);
695 depth_rb
->Wrapped
= rb
;
696 stencil_rb
->Wrapped
= rb
;
697 _mesa_reference_renderbuffer(&irb
->wrapped_depth
, depth_rb
);
698 _mesa_reference_renderbuffer(&irb
->wrapped_stencil
, stencil_rb
);
701 irb
->mt
= intel_miptree_create_for_renderbuffer(intel
, rb
->Format
,
707 if (intel
->vtbl
.is_hiz_depth_format(intel
, rb
->Format
)) {
708 bool ok
= intel_miptree_alloc_hiz(intel
, irb
->mt
);
710 intel_miptree_release(&irb
->mt
);
720 #if FEATURE_OES_EGL_image
722 intel_image_target_renderbuffer_storage(struct gl_context
*ctx
,
723 struct gl_renderbuffer
*rb
,
726 struct intel_context
*intel
= intel_context(ctx
);
727 struct intel_renderbuffer
*irb
;
731 screen
= intel
->intelScreen
->driScrnPriv
;
732 image
= screen
->dri2
.image
->lookupEGLImage(screen
, image_handle
,
733 screen
->loaderPrivate
);
737 /* __DRIimage is opaque to the core so it has to be checked here */
738 switch (image
->format
) {
739 case MESA_FORMAT_RGBA8888_REV
:
740 _mesa_error(&intel
->ctx
, GL_INVALID_OPERATION
,
741 "glEGLImageTargetRenderbufferStorage(unsupported image format");
748 irb
= intel_renderbuffer(rb
);
749 intel_miptree_release(&irb
->mt
);
750 irb
->mt
= intel_miptree_create_for_region(intel
,
757 rb
->InternalFormat
= image
->internal_format
;
758 rb
->Width
= image
->region
->width
;
759 rb
->Height
= image
->region
->height
;
760 rb
->Format
= image
->format
;
761 rb
->DataType
= image
->data_type
;
762 rb
->_BaseFormat
= _mesa_base_fbo_format(&intel
->ctx
,
763 image
->internal_format
);
768 * Called for each hardware renderbuffer when a _window_ is resized.
769 * Just update fields.
770 * Not used for user-created renderbuffers!
773 intel_alloc_window_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
774 GLenum internalFormat
, GLuint width
, GLuint height
)
776 ASSERT(rb
->Name
== 0);
779 rb
->InternalFormat
= internalFormat
;
786 intel_resize_buffers(struct gl_context
*ctx
, struct gl_framebuffer
*fb
,
787 GLuint width
, GLuint height
)
791 _mesa_resize_framebuffer(ctx
, fb
, width
, height
);
793 fb
->Initialized
= true; /* XXX remove someday */
800 /* Make sure all window system renderbuffers are up to date */
801 for (i
= BUFFER_FRONT_LEFT
; i
<= BUFFER_BACK_RIGHT
; i
++) {
802 struct gl_renderbuffer
*rb
= fb
->Attachment
[i
].Renderbuffer
;
804 /* only resize if size is changing */
805 if (rb
&& (rb
->Width
!= width
|| rb
->Height
!= height
)) {
806 rb
->AllocStorage(ctx
, rb
, rb
->InternalFormat
, width
, height
);
812 /** Dummy function for gl_renderbuffer::AllocStorage() */
814 intel_nop_alloc_storage(struct gl_context
* ctx
, struct gl_renderbuffer
*rb
,
815 GLenum internalFormat
, GLuint width
, GLuint height
)
817 _mesa_problem(ctx
, "intel_op_alloc_storage should never be called.");
822 * Create a new intel_renderbuffer which corresponds to an on-screen window,
823 * not a user-created renderbuffer.
825 struct intel_renderbuffer
*
826 intel_create_renderbuffer(gl_format format
)
828 GET_CURRENT_CONTEXT(ctx
);
830 struct intel_renderbuffer
*irb
;
832 irb
= CALLOC_STRUCT(intel_renderbuffer
);
834 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "creating renderbuffer");
838 _mesa_init_renderbuffer(&irb
->Base
, 0);
839 irb
->Base
.ClassID
= INTEL_RB_CLASS
;
840 irb
->Base
._BaseFormat
= _mesa_get_format_base_format(format
);
841 irb
->Base
.Format
= format
;
842 irb
->Base
.InternalFormat
= irb
->Base
._BaseFormat
;
843 irb
->Base
.DataType
= intel_mesa_format_to_rb_datatype(format
);
845 /* intel-specific methods */
846 irb
->Base
.Delete
= intel_delete_renderbuffer
;
847 irb
->Base
.AllocStorage
= intel_alloc_window_storage
;
848 irb
->Base
.GetPointer
= intel_get_pointer
;
854 struct gl_renderbuffer
*
855 intel_create_wrapped_renderbuffer(struct gl_context
* ctx
,
856 int width
, int height
,
860 * The name here is irrelevant, as long as its nonzero, because the
861 * renderbuffer never gets entered into Mesa's renderbuffer hash table.
865 struct intel_renderbuffer
*irb
= CALLOC_STRUCT(intel_renderbuffer
);
867 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "creating renderbuffer");
871 struct gl_renderbuffer
*rb
= &irb
->Base
;
872 _mesa_init_renderbuffer(rb
, name
);
873 rb
->ClassID
= INTEL_RB_CLASS
;
874 rb
->_BaseFormat
= _mesa_get_format_base_format(format
);
876 rb
->InternalFormat
= rb
->_BaseFormat
;
877 rb
->DataType
= intel_mesa_format_to_rb_datatype(format
);
886 * Create a new renderbuffer object.
887 * Typically called via glBindRenderbufferEXT().
889 static struct gl_renderbuffer
*
890 intel_new_renderbuffer(struct gl_context
* ctx
, GLuint name
)
892 /*struct intel_context *intel = intel_context(ctx); */
893 struct intel_renderbuffer
*irb
;
895 irb
= CALLOC_STRUCT(intel_renderbuffer
);
897 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "creating renderbuffer");
901 _mesa_init_renderbuffer(&irb
->Base
, name
);
902 irb
->Base
.ClassID
= INTEL_RB_CLASS
;
904 /* intel-specific methods */
905 irb
->Base
.Delete
= intel_delete_renderbuffer
;
906 irb
->Base
.AllocStorage
= intel_alloc_renderbuffer_storage
;
907 irb
->Base
.GetPointer
= intel_get_pointer
;
908 /* span routines set in alloc_storage function */
915 * Called via glBindFramebufferEXT().
918 intel_bind_framebuffer(struct gl_context
* ctx
, GLenum target
,
919 struct gl_framebuffer
*fb
, struct gl_framebuffer
*fbread
)
921 if (target
== GL_FRAMEBUFFER_EXT
|| target
== GL_DRAW_FRAMEBUFFER_EXT
) {
922 intel_draw_buffer(ctx
);
925 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
931 * Called via glFramebufferRenderbufferEXT().
934 intel_framebuffer_renderbuffer(struct gl_context
* ctx
,
935 struct gl_framebuffer
*fb
,
936 GLenum attachment
, struct gl_renderbuffer
*rb
)
938 DBG("Intel FramebufferRenderbuffer %u %u\n", fb
->Name
, rb
? rb
->Name
: 0);
942 _mesa_framebuffer_renderbuffer(ctx
, fb
, attachment
, rb
);
943 intel_draw_buffer(ctx
);
946 static struct intel_renderbuffer
*
947 intel_renderbuffer_wrap_miptree(struct intel_context
*intel
,
948 struct intel_mipmap_tree
*mt
,
952 GLenum internal_format
);
955 * \par Special case for separate stencil
957 * When wrapping a depthstencil texture that uses separate stencil, this
958 * function is recursively called twice: once to create \c
959 * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the
960 * call to create \c irb->wrapped_depth, the \c format and \c
961 * internal_format parameters do not match \c mt->format. In that case, \c
962 * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
963 * MESA_FORMAT_X8_Z24.
965 * @return true on success
968 intel_renderbuffer_update_wrapper(struct intel_context
*intel
,
969 struct intel_renderbuffer
*irb
,
970 struct intel_mipmap_tree
*mt
,
974 GLenum internal_format
)
976 struct gl_renderbuffer
*rb
= &irb
->Base
;
979 if (!intel_span_supports_format(rb
->Format
)) {
980 DBG("Render to texture BAD FORMAT %s\n",
981 _mesa_get_format_name(rb
->Format
));
984 DBG("Render to texture %s\n", _mesa_get_format_name(rb
->Format
));
987 rb
->InternalFormat
= internal_format
;
988 rb
->DataType
= intel_mesa_format_to_rb_datatype(rb
->Format
);
989 rb
->_BaseFormat
= _mesa_get_format_base_format(rb
->Format
);
990 rb
->Width
= mt
->level
[level
].width
;
991 rb
->Height
= mt
->level
[level
].height
;
993 irb
->Base
.Delete
= intel_delete_renderbuffer
;
994 irb
->Base
.AllocStorage
= intel_nop_alloc_storage
;
996 intel_miptree_check_level_layer(mt
, level
, layer
);
997 irb
->mt_level
= level
;
998 irb
->mt_layer
= layer
;
1000 if (mt
->stencil_mt
&& _mesa_is_depthstencil_format(rb
->InternalFormat
)) {
1001 assert((irb
->wrapped_depth
== NULL
) == (irb
->wrapped_stencil
== NULL
));
1003 struct intel_renderbuffer
*depth_irb
;
1004 struct intel_renderbuffer
*stencil_irb
;
1006 if (!irb
->wrapped_depth
) {
1007 depth_irb
= intel_renderbuffer_wrap_miptree(intel
,
1010 GL_DEPTH_COMPONENT24
);
1011 stencil_irb
= intel_renderbuffer_wrap_miptree(intel
,
1016 _mesa_reference_renderbuffer(&irb
->wrapped_depth
, &depth_irb
->Base
);
1017 _mesa_reference_renderbuffer(&irb
->wrapped_stencil
, &stencil_irb
->Base
);
1019 if (!irb
->wrapped_depth
|| !irb
->wrapped_stencil
)
1024 depth_irb
= intel_renderbuffer(irb
->wrapped_depth
);
1025 stencil_irb
= intel_renderbuffer(irb
->wrapped_stencil
);
1027 ok
&= intel_renderbuffer_update_wrapper(intel
,
1032 GL_DEPTH_COMPONENT24
);
1033 ok
&= intel_renderbuffer_update_wrapper(intel
,
1043 intel_miptree_reference(&irb
->mt
, mt
);
1044 intel_renderbuffer_set_draw_offset(irb
);
1051 * \brief Wrap a renderbuffer around a single slice of a miptree.
1053 * Called by glFramebufferTexture*(). This just allocates a
1054 * ``struct intel_renderbuffer`` then calls
1055 * intel_renderbuffer_update_wrapper() to do the real work.
1057 * \see intel_renderbuffer_update_wrapper()
1059 static struct intel_renderbuffer
*
1060 intel_renderbuffer_wrap_miptree(struct intel_context
*intel
,
1061 struct intel_mipmap_tree
*mt
,
1065 GLenum internal_format
)
1068 const GLuint name
= ~0; /* not significant, but distinct for debugging */
1069 struct gl_context
*ctx
= &intel
->ctx
;
1070 struct intel_renderbuffer
*irb
;
1072 intel_miptree_check_level_layer(mt
, level
, layer
);
1074 irb
= CALLOC_STRUCT(intel_renderbuffer
);
1076 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "glFramebufferTexture");
1080 _mesa_init_renderbuffer(&irb
->Base
, name
);
1081 irb
->Base
.ClassID
= INTEL_RB_CLASS
;
1083 if (!intel_renderbuffer_update_wrapper(intel
, irb
,
1085 format
, internal_format
)) {
1094 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer
*irb
)
1096 unsigned int dst_x
, dst_y
;
1098 /* compute offset of the particular 2D image within the texture region */
1099 intel_miptree_get_image_offset(irb
->mt
,
1101 0, /* face, which we ignore */
1105 irb
->draw_x
= dst_x
;
1106 irb
->draw_y
= dst_y
;
1110 * Rendering to tiled buffers requires that the base address of the
1111 * buffer be aligned to a page boundary. We generally render to
1112 * textures by pointing the surface at the mipmap image level, which
1113 * may not be aligned to a tile boundary.
1115 * This function returns an appropriately-aligned base offset
1116 * according to the tiling restrictions, plus any required x/y offset
1120 intel_renderbuffer_tile_offsets(struct intel_renderbuffer
*irb
,
1124 struct intel_region
*region
= irb
->mt
->region
;
1125 int cpp
= region
->cpp
;
1126 uint32_t pitch
= region
->pitch
* cpp
;
1128 if (region
->tiling
== I915_TILING_NONE
) {
1131 return irb
->draw_x
* cpp
+ irb
->draw_y
* pitch
;
1132 } else if (region
->tiling
== I915_TILING_X
) {
1133 *tile_x
= irb
->draw_x
% (512 / cpp
);
1134 *tile_y
= irb
->draw_y
% 8;
1135 return ((irb
->draw_y
/ 8) * (8 * pitch
) +
1136 (irb
->draw_x
- *tile_x
) / (512 / cpp
) * 4096);
1138 assert(region
->tiling
== I915_TILING_Y
);
1139 *tile_x
= irb
->draw_x
% (128 / cpp
);
1140 *tile_y
= irb
->draw_y
% 32;
1141 return ((irb
->draw_y
/ 32) * (32 * pitch
) +
1142 (irb
->draw_x
- *tile_x
) / (128 / cpp
) * 4096);
1148 need_tile_offset_workaround(struct brw_context
*brw
,
1149 struct intel_renderbuffer
*irb
)
1151 uint32_t tile_x
, tile_y
;
1153 if (brw
->has_surface_tile_offset
)
1156 intel_renderbuffer_tile_offsets(irb
, &tile_x
, &tile_y
);
1158 return tile_x
!= 0 || tile_y
!= 0;
1163 * Called by glFramebufferTexture[123]DEXT() (and other places) to
1164 * prepare for rendering into texture memory. This might be called
1165 * many times to choose different texture levels, cube faces, etc
1166 * before intel_finish_render_texture() is ever called.
1169 intel_render_texture(struct gl_context
* ctx
,
1170 struct gl_framebuffer
*fb
,
1171 struct gl_renderbuffer_attachment
*att
)
1173 struct intel_context
*intel
= intel_context(ctx
);
1174 struct gl_texture_image
*image
= _mesa_get_attachment_teximage(att
);
1175 struct intel_renderbuffer
*irb
= intel_renderbuffer(att
->Renderbuffer
);
1176 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
1177 struct intel_mipmap_tree
*mt
= intel_image
->mt
;
1182 if (att
->CubeMapFace
> 0) {
1183 assert(att
->Zoffset
== 0);
1184 layer
= att
->CubeMapFace
;
1186 layer
= att
->Zoffset
;
1189 if (!intel_image
->mt
) {
1190 /* Fallback on drawing to a texture that doesn't have a miptree
1191 * (has a border, width/height 0, etc.)
1193 _mesa_reference_renderbuffer(&att
->Renderbuffer
, NULL
);
1194 _swrast_render_texture(ctx
, fb
, att
);
1198 irb
= intel_renderbuffer_wrap_miptree(intel
,
1203 image
->InternalFormat
);
1206 /* bind the wrapper to the attachment point */
1207 _mesa_reference_renderbuffer(&att
->Renderbuffer
, &irb
->Base
);
1210 /* fallback to software rendering */
1211 _swrast_render_texture(ctx
, fb
, att
);
1216 if (!intel_renderbuffer_update_wrapper(intel
, irb
,
1217 mt
, att
->TextureLevel
, layer
,
1219 image
->InternalFormat
)) {
1220 _mesa_reference_renderbuffer(&att
->Renderbuffer
, NULL
);
1221 _swrast_render_texture(ctx
, fb
, att
);
1225 DBG("Begin render texture tid %lx tex=%u w=%d h=%d refcount=%d\n",
1227 att
->Texture
->Name
, image
->Width
, image
->Height
,
1228 irb
->Base
.RefCount
);
1230 intel_image
->used_as_render_target
= true;
1233 if (need_tile_offset_workaround(brw_context(ctx
), irb
)) {
1234 /* Original gen4 hardware couldn't draw to a non-tile-aligned
1235 * destination in a miptree unless you actually setup your
1236 * renderbuffer as a miptree and used the fragile
1237 * lod/array_index/etc. controls to select the image. So,
1238 * instead, we just make a new single-level miptree and render
1241 struct intel_context
*intel
= intel_context(ctx
);
1242 struct intel_mipmap_tree
*new_mt
;
1243 int width
, height
, depth
;
1245 intel_miptree_get_dimensions_for_image(image
, &width
, &height
, &depth
);
1247 new_mt
= intel_miptree_create(intel
, image
->TexObject
->Target
,
1248 intel_image
->base
.Base
.TexFormat
,
1249 intel_image
->base
.Base
.Level
,
1250 intel_image
->base
.Base
.Level
,
1251 width
, height
, depth
,
1254 intel_miptree_copy_teximage(intel
, intel_image
, new_mt
);
1255 intel_renderbuffer_set_draw_offset(irb
);
1257 intel_miptree_reference(&irb
->mt
, intel_image
->mt
);
1258 intel_miptree_release(&new_mt
);
1261 /* update drawing region, etc */
1262 intel_draw_buffer(ctx
);
1267 * Called by Mesa when rendering to a texture is done.
1270 intel_finish_render_texture(struct gl_context
* ctx
,
1271 struct gl_renderbuffer_attachment
*att
)
1273 struct intel_context
*intel
= intel_context(ctx
);
1274 struct gl_texture_object
*tex_obj
= att
->Texture
;
1275 struct gl_texture_image
*image
=
1276 tex_obj
->Image
[att
->CubeMapFace
][att
->TextureLevel
];
1277 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
1279 DBG("Finish render texture tid %lx tex=%u\n",
1280 _glthread_GetID(), att
->Texture
->Name
);
1282 /* Flag that this image may now be validated into the object's miptree. */
1284 intel_image
->used_as_render_target
= false;
1286 /* Since we've (probably) rendered to the texture and will (likely) use
1287 * it in the texture domain later on in this batchbuffer, flush the
1288 * batch. Once again, we wish for a domain tracker in libdrm to cover
1289 * usage inside of a batchbuffer like GEM does in the kernel.
1291 intel_batchbuffer_emit_mi_flush(intel
);
1295 * Do additional "completeness" testing of a framebuffer object.
1298 intel_validate_framebuffer(struct gl_context
*ctx
, struct gl_framebuffer
*fb
)
1300 struct intel_context
*intel
= intel_context(ctx
);
1301 const struct intel_renderbuffer
*depthRb
=
1302 intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
1303 const struct intel_renderbuffer
*stencilRb
=
1304 intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
1308 * The depth and stencil renderbuffers are the same renderbuffer or wrap
1311 if (depthRb
&& stencilRb
) {
1312 bool depth_stencil_are_same
;
1313 if (depthRb
== stencilRb
)
1314 depth_stencil_are_same
= true;
1315 else if ((fb
->Attachment
[BUFFER_DEPTH
].Type
== GL_TEXTURE
) &&
1316 (fb
->Attachment
[BUFFER_STENCIL
].Type
== GL_TEXTURE
) &&
1317 (fb
->Attachment
[BUFFER_DEPTH
].Texture
->Name
==
1318 fb
->Attachment
[BUFFER_STENCIL
].Texture
->Name
))
1319 depth_stencil_are_same
= true;
1321 depth_stencil_are_same
= false;
1323 if (!intel
->has_separate_stencil
&& !depth_stencil_are_same
) {
1324 fb
->_Status
= GL_FRAMEBUFFER_UNSUPPORTED_EXT
;
1328 for (i
= 0; i
< Elements(fb
->Attachment
); i
++) {
1329 struct gl_renderbuffer
*rb
;
1330 struct intel_renderbuffer
*irb
;
1332 if (fb
->Attachment
[i
].Type
== GL_NONE
)
1335 /* A supported attachment will have a Renderbuffer set either
1336 * from being a Renderbuffer or being a texture that got the
1337 * intel_wrap_texture() treatment.
1339 rb
= fb
->Attachment
[i
].Renderbuffer
;
1341 DBG("attachment without renderbuffer\n");
1342 fb
->_Status
= GL_FRAMEBUFFER_UNSUPPORTED_EXT
;
1346 irb
= intel_renderbuffer(rb
);
1348 DBG("software rendering renderbuffer\n");
1349 fb
->_Status
= GL_FRAMEBUFFER_UNSUPPORTED_EXT
;
1353 if (!intel_span_supports_format(irb
->Base
.Format
) ||
1354 !intel
->vtbl
.render_target_supported(irb
->Base
.Format
)) {
1355 DBG("Unsupported texture/renderbuffer format attached: %s\n",
1356 _mesa_get_format_name(irb
->Base
.Format
));
1357 fb
->_Status
= GL_FRAMEBUFFER_UNSUPPORTED_EXT
;
1363 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
1364 * We can do this when the dst renderbuffer is actually a texture and
1365 * there is no scaling, mirroring or scissoring.
1367 * \return new buffer mask indicating the buffers left to blit using the
1371 intel_blit_framebuffer_copy_tex_sub_image(struct gl_context
*ctx
,
1372 GLint srcX0
, GLint srcY0
,
1373 GLint srcX1
, GLint srcY1
,
1374 GLint dstX0
, GLint dstY0
,
1375 GLint dstX1
, GLint dstY1
,
1376 GLbitfield mask
, GLenum filter
)
1378 if (mask
& GL_COLOR_BUFFER_BIT
) {
1379 const struct gl_framebuffer
*drawFb
= ctx
->DrawBuffer
;
1380 const struct gl_framebuffer
*readFb
= ctx
->ReadBuffer
;
1381 const struct gl_renderbuffer_attachment
*drawAtt
=
1382 &drawFb
->Attachment
[drawFb
->_ColorDrawBufferIndexes
[0]];
1384 /* If the source and destination are the same size with no
1385 mirroring, the rectangles are within the size of the
1386 texture and there is no scissor then we can use
1387 glCopyTexSubimage2D to implement the blit. This will end
1388 up as a fast hardware blit on some drivers */
1389 if (drawAtt
&& drawAtt
->Texture
&&
1390 srcX0
- srcX1
== dstX0
- dstX1
&&
1391 srcY0
- srcY1
== dstY0
- dstY1
&&
1394 srcX0
>= 0 && srcX1
<= readFb
->Width
&&
1395 srcY0
>= 0 && srcY1
<= readFb
->Height
&&
1396 dstX0
>= 0 && dstX1
<= drawFb
->Width
&&
1397 dstY0
>= 0 && dstY1
<= drawFb
->Height
&&
1398 !ctx
->Scissor
.Enabled
) {
1399 const struct gl_texture_object
*texObj
= drawAtt
->Texture
;
1400 const GLuint dstLevel
= drawAtt
->TextureLevel
;
1401 const GLenum target
= texObj
->Target
;
1403 struct gl_texture_image
*texImage
=
1404 _mesa_select_tex_image(ctx
, texObj
, target
, dstLevel
);
1406 if (intel_copy_texsubimage(intel_context(ctx
),
1407 intel_texture_image(texImage
),
1410 srcX1
- srcX0
, /* width */
1412 mask
&= ~GL_COLOR_BUFFER_BIT
;
1420 intel_blit_framebuffer(struct gl_context
*ctx
,
1421 GLint srcX0
, GLint srcY0
, GLint srcX1
, GLint srcY1
,
1422 GLint dstX0
, GLint dstY0
, GLint dstX1
, GLint dstY1
,
1423 GLbitfield mask
, GLenum filter
)
1425 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
1426 mask
= intel_blit_framebuffer_copy_tex_sub_image(ctx
,
1427 srcX0
, srcY0
, srcX1
, srcY1
,
1428 dstX0
, dstY0
, dstX1
, dstY1
,
1433 _mesa_meta_BlitFramebuffer(ctx
,
1434 srcX0
, srcY0
, srcX1
, srcY1
,
1435 dstX0
, dstY0
, dstX1
, dstY1
,
1440 intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer
*irb
)
1443 intel_miptree_slice_set_needs_hiz_resolve(irb
->mt
,
1446 } else if (irb
->wrapped_depth
) {
1447 intel_renderbuffer_set_needs_hiz_resolve(
1448 intel_renderbuffer(irb
->wrapped_depth
));
1455 intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer
*irb
)
1458 intel_miptree_slice_set_needs_depth_resolve(irb
->mt
,
1461 } else if (irb
->wrapped_depth
) {
1462 intel_renderbuffer_set_needs_depth_resolve(
1463 intel_renderbuffer(irb
->wrapped_depth
));
1470 intel_renderbuffer_resolve_hiz(struct intel_context
*intel
,
1471 struct intel_renderbuffer
*irb
)
1474 return intel_miptree_slice_resolve_hiz(intel
,
1478 if (irb
->wrapped_depth
)
1479 return intel_renderbuffer_resolve_hiz(intel
,
1480 intel_renderbuffer(irb
->wrapped_depth
));
1486 intel_renderbuffer_resolve_depth(struct intel_context
*intel
,
1487 struct intel_renderbuffer
*irb
)
1490 return intel_miptree_slice_resolve_depth(intel
,
1495 if (irb
->wrapped_depth
)
1496 return intel_renderbuffer_resolve_depth(intel
,
1497 intel_renderbuffer(irb
->wrapped_depth
));
1503 * Do one-time context initializations related to GL_EXT_framebuffer_object.
1504 * Hook in device driver functions.
1507 intel_fbo_init(struct intel_context
*intel
)
1509 intel
->ctx
.Driver
.NewFramebuffer
= intel_new_framebuffer
;
1510 intel
->ctx
.Driver
.NewRenderbuffer
= intel_new_renderbuffer
;
1511 intel
->ctx
.Driver
.MapRenderbuffer
= intel_map_renderbuffer
;
1512 intel
->ctx
.Driver
.UnmapRenderbuffer
= intel_unmap_renderbuffer
;
1513 intel
->ctx
.Driver
.BindFramebuffer
= intel_bind_framebuffer
;
1514 intel
->ctx
.Driver
.FramebufferRenderbuffer
= intel_framebuffer_renderbuffer
;
1515 intel
->ctx
.Driver
.RenderTexture
= intel_render_texture
;
1516 intel
->ctx
.Driver
.FinishRenderTexture
= intel_finish_render_texture
;
1517 intel
->ctx
.Driver
.ResizeBuffers
= intel_resize_buffers
;
1518 intel
->ctx
.Driver
.ValidateFramebuffer
= intel_validate_framebuffer
;
1519 intel
->ctx
.Driver
.BlitFramebuffer
= intel_blit_framebuffer
;
1521 #if FEATURE_OES_EGL_image
1522 intel
->ctx
.Driver
.EGLImageTargetRenderbufferStorage
=
1523 intel_image_target_renderbuffer_storage
;