a53985b189354d9441fc1636268921ced627f992
[mesa.git] / src / mesa / drivers / dri / intel / intel_fbo.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/enums.h"
30 #include "main/imports.h"
31 #include "main/macros.h"
32 #include "main/mfeatures.h"
33 #include "main/mtypes.h"
34 #include "main/fbobject.h"
35 #include "main/framebuffer.h"
36 #include "main/renderbuffer.h"
37 #include "main/context.h"
38 #include "main/teximage.h"
39 #include "main/image.h"
40
41 #include "swrast/swrast.h"
42 #include "drivers/common/meta.h"
43
44 #include "intel_context.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_buffers.h"
47 #include "intel_blit.h"
48 #include "intel_fbo.h"
49 #include "intel_mipmap_tree.h"
50 #include "intel_regions.h"
51 #include "intel_tex.h"
52 #include "intel_span.h"
53 #ifndef I915
54 #include "brw_context.h"
55 #endif
56
57 #define FILE_DEBUG_FLAG DEBUG_FBO
58
59 static struct gl_renderbuffer *
60 intel_new_renderbuffer(struct gl_context * ctx, GLuint name);
61
62 struct intel_region*
63 intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
64 {
65 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
66 if (irb && irb->mt) {
67 if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt)
68 return irb->mt->stencil_mt->region;
69 else
70 return irb->mt->region;
71 } else
72 return NULL;
73 }
74
75 /**
76 * Create a new framebuffer object.
77 */
78 static struct gl_framebuffer *
79 intel_new_framebuffer(struct gl_context * ctx, GLuint name)
80 {
81 /* Only drawable state in intel_framebuffer at this time, just use Mesa's
82 * class
83 */
84 return _mesa_new_framebuffer(ctx, name);
85 }
86
87
88 /** Called by gl_renderbuffer::Delete() */
89 static void
90 intel_delete_renderbuffer(struct gl_renderbuffer *rb)
91 {
92 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
93
94 ASSERT(irb);
95
96 intel_miptree_release(&irb->mt);
97
98 free(irb);
99 }
100
101 /**
102 * \see dd_function_table::MapRenderbuffer
103 */
104 static void
105 intel_map_renderbuffer(struct gl_context *ctx,
106 struct gl_renderbuffer *rb,
107 GLuint x, GLuint y, GLuint w, GLuint h,
108 GLbitfield mode,
109 GLubyte **out_map,
110 GLint *out_stride)
111 {
112 struct intel_context *intel = intel_context(ctx);
113 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
114 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
115 void *map;
116 int stride;
117
118 if (srb->Buffer) {
119 /* this is a malloc'd renderbuffer (accum buffer), not an irb */
120 GLint bpp = _mesa_get_format_bytes(rb->Format);
121 GLint rowStride = srb->RowStride;
122 *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp;
123 *out_stride = rowStride;
124 return;
125 }
126
127 /* We sometimes get called with this by our intel_span.c usage. */
128 if (!irb->mt) {
129 *out_map = NULL;
130 *out_stride = 0;
131 return;
132 }
133
134 /* For a window-system renderbuffer, we need to flip the mapping we receive
135 * upside-down. So we need to ask for a rectangle on flipped vertically, and
136 * we then return a pointer to the bottom of it with a negative stride.
137 */
138 if (rb->Name == 0) {
139 y = rb->Height - y - h;
140 }
141
142 intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
143 x, y, w, h, mode, &map, &stride);
144
145 if (rb->Name == 0) {
146 map += (h - 1) * stride;
147 stride = -stride;
148 }
149
150 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
151 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
152 x, y, w, h, map, stride);
153
154 *out_map = map;
155 *out_stride = stride;
156 }
157
158 /**
159 * \see dd_function_table::UnmapRenderbuffer
160 */
161 static void
162 intel_unmap_renderbuffer(struct gl_context *ctx,
163 struct gl_renderbuffer *rb)
164 {
165 struct intel_context *intel = intel_context(ctx);
166 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
167 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
168
169 DBG("%s: rb %d (%s)\n", __FUNCTION__,
170 rb->Name, _mesa_get_format_name(rb->Format));
171
172 if (srb->Buffer) {
173 /* this is a malloc'd renderbuffer (accum buffer) */
174 /* nothing to do */
175 return;
176 }
177
178 intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
179 }
180
181
182 /**
183 * Round up the requested multisample count to the next supported sample size.
184 */
185 static unsigned
186 quantize_num_samples(struct intel_context *intel, unsigned num_samples)
187 {
188 switch (intel->gen) {
189 case 6:
190 /* Gen6 supports only 4x multisampling. */
191 if (num_samples > 0)
192 return 4;
193 else
194 return 0;
195 case 7:
196 /* Gen7 supports 4x and 8x multisampling. */
197 if (num_samples > 4)
198 return 8;
199 else if (num_samples > 0)
200 return 4;
201 else
202 return 0;
203 return 0;
204 default:
205 /* MSAA unsupported */
206 return 0;
207 }
208 }
209
210
211 /**
212 * Called via glRenderbufferStorageEXT() to set the format and allocate
213 * storage for a user-created renderbuffer.
214 */
215 GLboolean
216 intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
217 GLenum internalFormat,
218 GLuint width, GLuint height)
219 {
220 struct intel_context *intel = intel_context(ctx);
221 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
222 rb->NumSamples = quantize_num_samples(intel, rb->NumSamples);
223
224 switch (internalFormat) {
225 default:
226 /* Use the same format-choice logic as for textures.
227 * Renderbuffers aren't any different from textures for us,
228 * except they're less useful because you can't texture with
229 * them.
230 */
231 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
232 GL_NONE, GL_NONE);
233 break;
234 case GL_STENCIL_INDEX:
235 case GL_STENCIL_INDEX1_EXT:
236 case GL_STENCIL_INDEX4_EXT:
237 case GL_STENCIL_INDEX8_EXT:
238 case GL_STENCIL_INDEX16_EXT:
239 /* These aren't actual texture formats, so force them here. */
240 if (intel->has_separate_stencil) {
241 rb->Format = MESA_FORMAT_S8;
242 } else {
243 assert(!intel->must_use_separate_stencil);
244 rb->Format = MESA_FORMAT_S8_Z24;
245 }
246 break;
247 }
248
249 rb->Width = width;
250 rb->Height = height;
251 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
252
253 intel_miptree_release(&irb->mt);
254
255 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
256 _mesa_lookup_enum_by_nr(internalFormat),
257 _mesa_get_format_name(rb->Format), width, height);
258
259 if (width == 0 || height == 0)
260 return true;
261
262 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
263 width, height,
264 rb->NumSamples);
265 if (!irb->mt)
266 return false;
267
268 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
269 bool ok = intel_miptree_alloc_hiz(intel, irb->mt, rb->NumSamples);
270 if (!ok) {
271 intel_miptree_release(&irb->mt);
272 return false;
273 }
274 }
275
276 if (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
277 bool ok = intel_miptree_alloc_mcs(intel, irb->mt, rb->NumSamples);
278 if (!ok) {
279 intel_miptree_release(&irb->mt);
280 return false;
281 }
282 }
283
284 return true;
285 }
286
287
288 #if FEATURE_OES_EGL_image
289 static void
290 intel_image_target_renderbuffer_storage(struct gl_context *ctx,
291 struct gl_renderbuffer *rb,
292 void *image_handle)
293 {
294 struct intel_context *intel = intel_context(ctx);
295 struct intel_renderbuffer *irb;
296 __DRIscreen *screen;
297 __DRIimage *image;
298
299 screen = intel->intelScreen->driScrnPriv;
300 image = screen->dri2.image->lookupEGLImage(screen, image_handle,
301 screen->loaderPrivate);
302 if (image == NULL)
303 return;
304
305 /* __DRIimage is opaque to the core so it has to be checked here */
306 switch (image->format) {
307 case MESA_FORMAT_RGBA8888_REV:
308 _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
309 "glEGLImageTargetRenderbufferStorage(unsupported image format");
310 return;
311 break;
312 default:
313 break;
314 }
315
316 irb = intel_renderbuffer(rb);
317 intel_miptree_release(&irb->mt);
318 irb->mt = intel_miptree_create_for_region(intel,
319 GL_TEXTURE_2D,
320 image->format,
321 image->region);
322 if (!irb->mt)
323 return;
324
325 rb->InternalFormat = image->internal_format;
326 rb->Width = image->region->width;
327 rb->Height = image->region->height;
328 rb->Format = image->format;
329 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
330 image->internal_format);
331 }
332 #endif
333
334 /**
335 * Called for each hardware renderbuffer when a _window_ is resized.
336 * Just update fields.
337 * Not used for user-created renderbuffers!
338 */
339 static GLboolean
340 intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
341 GLenum internalFormat, GLuint width, GLuint height)
342 {
343 ASSERT(rb->Name == 0);
344 rb->Width = width;
345 rb->Height = height;
346 rb->InternalFormat = internalFormat;
347
348 return true;
349 }
350
351
352 static void
353 intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
354 GLuint width, GLuint height)
355 {
356 int i;
357
358 _mesa_resize_framebuffer(ctx, fb, width, height);
359
360 fb->Initialized = true; /* XXX remove someday */
361
362 if (_mesa_is_user_fbo(fb)) {
363 return;
364 }
365
366
367 /* Make sure all window system renderbuffers are up to date */
368 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
369 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
370
371 /* only resize if size is changing */
372 if (rb && (rb->Width != width || rb->Height != height)) {
373 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
374 }
375 }
376 }
377
378
379 /** Dummy function for gl_renderbuffer::AllocStorage() */
380 static GLboolean
381 intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
382 GLenum internalFormat, GLuint width, GLuint height)
383 {
384 _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
385 return false;
386 }
387
388 /**
389 * Create a new intel_renderbuffer which corresponds to an on-screen window,
390 * not a user-created renderbuffer.
391 */
392 struct intel_renderbuffer *
393 intel_create_renderbuffer(gl_format format)
394 {
395 struct intel_renderbuffer *irb;
396 struct gl_renderbuffer *rb;
397
398 GET_CURRENT_CONTEXT(ctx);
399
400 irb = CALLOC_STRUCT(intel_renderbuffer);
401 if (!irb) {
402 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
403 return NULL;
404 }
405
406 rb = &irb->Base.Base;
407
408 _mesa_init_renderbuffer(rb, 0);
409 rb->ClassID = INTEL_RB_CLASS;
410 rb->_BaseFormat = _mesa_get_format_base_format(format);
411 rb->Format = format;
412 rb->InternalFormat = rb->_BaseFormat;
413
414 /* intel-specific methods */
415 rb->Delete = intel_delete_renderbuffer;
416 rb->AllocStorage = intel_alloc_window_storage;
417
418 return irb;
419 }
420
421 /**
422 * Private window-system buffers (as opposed to ones shared with the display
423 * server created with intel_create_renderbuffer()) are most similar in their
424 * handling to user-created renderbuffers, but they have a resize handler that
425 * may be called at intel_update_renderbuffers() time.
426 */
427 struct intel_renderbuffer *
428 intel_create_private_renderbuffer(gl_format format)
429 {
430 struct intel_renderbuffer *irb;
431
432 irb = intel_create_renderbuffer(format);
433 irb->Base.Base.AllocStorage = intel_alloc_renderbuffer_storage;
434
435 return irb;
436 }
437
438 /**
439 * Create a new renderbuffer object.
440 * Typically called via glBindRenderbufferEXT().
441 */
442 static struct gl_renderbuffer *
443 intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
444 {
445 /*struct intel_context *intel = intel_context(ctx); */
446 struct intel_renderbuffer *irb;
447 struct gl_renderbuffer *rb;
448
449 irb = CALLOC_STRUCT(intel_renderbuffer);
450 if (!irb) {
451 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
452 return NULL;
453 }
454
455 rb = &irb->Base.Base;
456
457 _mesa_init_renderbuffer(rb, name);
458 rb->ClassID = INTEL_RB_CLASS;
459
460 /* intel-specific methods */
461 rb->Delete = intel_delete_renderbuffer;
462 rb->AllocStorage = intel_alloc_renderbuffer_storage;
463 /* span routines set in alloc_storage function */
464
465 return rb;
466 }
467
468
469 /**
470 * Called via glBindFramebufferEXT().
471 */
472 static void
473 intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
474 struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
475 {
476 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
477 intel_draw_buffer(ctx);
478 }
479 else {
480 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
481 }
482 }
483
484
485 /**
486 * Called via glFramebufferRenderbufferEXT().
487 */
488 static void
489 intel_framebuffer_renderbuffer(struct gl_context * ctx,
490 struct gl_framebuffer *fb,
491 GLenum attachment, struct gl_renderbuffer *rb)
492 {
493 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
494
495 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
496 intel_draw_buffer(ctx);
497 }
498
499 /**
500 * \par Special case for separate stencil
501 *
502 * When wrapping a depthstencil texture that uses separate stencil, this
503 * function is recursively called twice: once to create \c
504 * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the
505 * call to create \c irb->wrapped_depth, the \c format and \c
506 * internal_format parameters do not match \c mt->format. In that case, \c
507 * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
508 * MESA_FORMAT_X8_Z24.
509 *
510 * @return true on success
511 */
512
513 static bool
514 intel_renderbuffer_update_wrapper(struct intel_context *intel,
515 struct intel_renderbuffer *irb,
516 struct gl_texture_image *image,
517 uint32_t layer)
518 {
519 struct gl_renderbuffer *rb = &irb->Base.Base;
520 struct intel_texture_image *intel_image = intel_texture_image(image);
521 struct intel_mipmap_tree *mt = intel_image->mt;
522 int level = image->Level;
523
524 rb->Format = image->TexFormat;
525 rb->InternalFormat = image->InternalFormat;
526 rb->_BaseFormat = image->_BaseFormat;
527 rb->Width = mt->level[level].width;
528 rb->Height = mt->level[level].height;
529
530 rb->Delete = intel_delete_renderbuffer;
531 rb->AllocStorage = intel_nop_alloc_storage;
532
533 intel_miptree_check_level_layer(mt, level, layer);
534 irb->mt_level = level;
535 irb->mt_layer = layer;
536
537 intel_miptree_reference(&irb->mt, mt);
538
539 intel_renderbuffer_set_draw_offset(irb);
540
541 if (mt->hiz_mt == NULL &&
542 intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
543 intel_miptree_alloc_hiz(intel, mt, 0 /* num_samples */);
544 if (!mt->hiz_mt)
545 return false;
546 }
547
548 return true;
549 }
550
551 void
552 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
553 {
554 unsigned int dst_x, dst_y;
555
556 /* compute offset of the particular 2D image within the texture region */
557 intel_miptree_get_image_offset(irb->mt,
558 irb->mt_level,
559 0, /* face, which we ignore */
560 irb->mt_layer,
561 &dst_x, &dst_y);
562
563 irb->draw_x = dst_x;
564 irb->draw_y = dst_y;
565 }
566
567 /**
568 * Rendering to tiled buffers requires that the base address of the
569 * buffer be aligned to a page boundary. We generally render to
570 * textures by pointing the surface at the mipmap image level, which
571 * may not be aligned to a tile boundary.
572 *
573 * This function returns an appropriately-aligned base offset
574 * according to the tiling restrictions, plus any required x/y offset
575 * from there.
576 */
577 uint32_t
578 intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
579 uint32_t *tile_x,
580 uint32_t *tile_y)
581 {
582 struct intel_region *region = irb->mt->region;
583 uint32_t mask_x, mask_y;
584
585 intel_region_get_tile_masks(region, &mask_x, &mask_y);
586
587 *tile_x = irb->draw_x & mask_x;
588 *tile_y = irb->draw_y & mask_y;
589 return intel_region_get_aligned_offset(region, irb->draw_x & ~mask_x,
590 irb->draw_y & ~mask_y);
591 }
592
593 /**
594 * Called by glFramebufferTexture[123]DEXT() (and other places) to
595 * prepare for rendering into texture memory. This might be called
596 * many times to choose different texture levels, cube faces, etc
597 * before intel_finish_render_texture() is ever called.
598 */
599 static void
600 intel_render_texture(struct gl_context * ctx,
601 struct gl_framebuffer *fb,
602 struct gl_renderbuffer_attachment *att)
603 {
604 struct intel_context *intel = intel_context(ctx);
605 struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
606 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
607 struct intel_texture_image *intel_image = intel_texture_image(image);
608 struct intel_mipmap_tree *mt = intel_image->mt;
609 int layer;
610
611 (void) fb;
612
613 if (att->CubeMapFace > 0) {
614 assert(att->Zoffset == 0);
615 layer = att->CubeMapFace;
616 } else {
617 layer = att->Zoffset;
618 }
619
620 if (!intel_image->mt) {
621 /* Fallback on drawing to a texture that doesn't have a miptree
622 * (has a border, width/height 0, etc.)
623 */
624 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
625 _swrast_render_texture(ctx, fb, att);
626 return;
627 }
628 else if (!irb) {
629 intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
630
631 irb = (struct intel_renderbuffer *)intel_new_renderbuffer(ctx, ~0);
632
633 if (irb) {
634 /* bind the wrapper to the attachment point */
635 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base.Base);
636 }
637 else {
638 /* fallback to software rendering */
639 _swrast_render_texture(ctx, fb, att);
640 return;
641 }
642 }
643
644 if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
645 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
646 _swrast_render_texture(ctx, fb, att);
647 return;
648 }
649
650 irb->tex_image = image;
651
652 DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
653 _mesa_get_format_name(image->TexFormat),
654 att->Texture->Name, image->Width, image->Height,
655 irb->Base.Base.RefCount);
656
657 /* update drawing region, etc */
658 intel_draw_buffer(ctx);
659 }
660
661
662 /**
663 * Called by Mesa when rendering to a texture is done.
664 */
665 static void
666 intel_finish_render_texture(struct gl_context * ctx,
667 struct gl_renderbuffer_attachment *att)
668 {
669 struct intel_context *intel = intel_context(ctx);
670 struct gl_texture_object *tex_obj = att->Texture;
671 struct gl_texture_image *image =
672 tex_obj->Image[att->CubeMapFace][att->TextureLevel];
673 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
674
675 DBG("Finish render %s texture tex=%u\n",
676 _mesa_get_format_name(image->TexFormat), att->Texture->Name);
677
678 if (irb)
679 irb->tex_image = NULL;
680
681 /* Since we've (probably) rendered to the texture and will (likely) use
682 * it in the texture domain later on in this batchbuffer, flush the
683 * batch. Once again, we wish for a domain tracker in libdrm to cover
684 * usage inside of a batchbuffer like GEM does in the kernel.
685 */
686 intel_batchbuffer_emit_mi_flush(intel);
687 }
688
689 /**
690 * Do additional "completeness" testing of a framebuffer object.
691 */
692 static void
693 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
694 {
695 struct intel_context *intel = intel_context(ctx);
696 const struct intel_renderbuffer *depthRb =
697 intel_get_renderbuffer(fb, BUFFER_DEPTH);
698 const struct intel_renderbuffer *stencilRb =
699 intel_get_renderbuffer(fb, BUFFER_STENCIL);
700 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
701 int i;
702
703 DBG("%s() on fb %p (%s)\n", __FUNCTION__,
704 fb, (fb == ctx->DrawBuffer ? "drawbuffer" :
705 (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer")));
706
707 if (depthRb)
708 depth_mt = depthRb->mt;
709 if (stencilRb) {
710 stencil_mt = stencilRb->mt;
711 if (stencil_mt->stencil_mt)
712 stencil_mt = stencil_mt->stencil_mt;
713 }
714
715 if (depth_mt && stencil_mt) {
716 if (depth_mt == stencil_mt) {
717 /* For true packed depth/stencil (not faked on prefers-separate-stencil
718 * hardware) we need to be sure they're the same level/layer, since
719 * we'll be emitting a single packet describing the packed setup.
720 */
721 if (depthRb->mt_level != stencilRb->mt_level ||
722 depthRb->mt_layer != stencilRb->mt_layer) {
723 DBG("depth image level/layer %d/%d != stencil image %d/%d\n",
724 depthRb->mt_level,
725 depthRb->mt_layer,
726 stencilRb->mt_level,
727 stencilRb->mt_layer);
728 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
729 }
730 } else {
731 if (!intel->has_separate_stencil) {
732 DBG("separate stencil unsupported\n");
733 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
734 }
735 if (stencil_mt->format != MESA_FORMAT_S8) {
736 DBG("separate stencil is %s instead of S8\n",
737 _mesa_get_format_name(stencil_mt->format));
738 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
739 }
740 if (intel->gen < 7 && depth_mt->hiz_mt == NULL) {
741 /* Before Gen7, separate depth and stencil buffers can be used
742 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
743 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
744 * [DevSNB]: This field must be set to the same value (enabled
745 * or disabled) as Hierarchical Depth Buffer Enable.
746 */
747 DBG("separate stencil without HiZ\n");
748 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED;
749 }
750 }
751 }
752
753 for (i = 0; i < Elements(fb->Attachment); i++) {
754 struct gl_renderbuffer *rb;
755 struct intel_renderbuffer *irb;
756
757 if (fb->Attachment[i].Type == GL_NONE)
758 continue;
759
760 /* A supported attachment will have a Renderbuffer set either
761 * from being a Renderbuffer or being a texture that got the
762 * intel_wrap_texture() treatment.
763 */
764 rb = fb->Attachment[i].Renderbuffer;
765 if (rb == NULL) {
766 DBG("attachment without renderbuffer\n");
767 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
768 continue;
769 }
770
771 if (fb->Attachment[i].Type == GL_TEXTURE) {
772 const struct gl_texture_image *img =
773 _mesa_get_attachment_teximage_const(&fb->Attachment[i]);
774
775 if (img->Border) {
776 DBG("texture with border\n");
777 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
778 continue;
779 }
780 }
781
782 irb = intel_renderbuffer(rb);
783 if (irb == NULL) {
784 DBG("software rendering renderbuffer\n");
785 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
786 continue;
787 }
788
789 if (!intel->vtbl.render_target_supported(intel, rb)) {
790 DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
791 _mesa_get_format_name(intel_rb_format(irb)));
792 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
793 }
794 }
795 }
796
797 /**
798 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
799 * We can do this when the dst renderbuffer is actually a texture and
800 * there is no scaling, mirroring or scissoring.
801 *
802 * \return new buffer mask indicating the buffers left to blit using the
803 * normal path.
804 */
805 static GLbitfield
806 intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
807 GLint srcX0, GLint srcY0,
808 GLint srcX1, GLint srcY1,
809 GLint dstX0, GLint dstY0,
810 GLint dstX1, GLint dstY1,
811 GLbitfield mask, GLenum filter)
812 {
813 if (mask & GL_COLOR_BUFFER_BIT) {
814 const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
815 const struct gl_framebuffer *readFb = ctx->ReadBuffer;
816 const struct gl_renderbuffer_attachment *drawAtt =
817 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
818 struct intel_renderbuffer *srcRb =
819 intel_renderbuffer(readFb->_ColorReadBuffer);
820
821 /* If the source and destination are the same size with no
822 mirroring, the rectangles are within the size of the
823 texture and there is no scissor then we can use
824 glCopyTexSubimage2D to implement the blit. This will end
825 up as a fast hardware blit on some drivers */
826 if (srcRb && drawAtt && drawAtt->Texture &&
827 srcX0 - srcX1 == dstX0 - dstX1 &&
828 srcY0 - srcY1 == dstY0 - dstY1 &&
829 srcX1 >= srcX0 &&
830 srcY1 >= srcY0 &&
831 srcX0 >= 0 && srcX1 <= readFb->Width &&
832 srcY0 >= 0 && srcY1 <= readFb->Height &&
833 dstX0 >= 0 && dstX1 <= drawFb->Width &&
834 dstY0 >= 0 && dstY1 <= drawFb->Height &&
835 !ctx->Scissor.Enabled) {
836 const struct gl_texture_object *texObj = drawAtt->Texture;
837 const GLuint dstLevel = drawAtt->TextureLevel;
838 const GLenum target = texObj->Target;
839
840 struct gl_texture_image *texImage =
841 _mesa_select_tex_image(ctx, texObj, target, dstLevel);
842
843 if (intel_copy_texsubimage(intel_context(ctx),
844 intel_texture_image(texImage),
845 dstX0, dstY0,
846 srcRb,
847 srcX0, srcY0,
848 srcX1 - srcX0, /* width */
849 srcY1 - srcY0))
850 mask &= ~GL_COLOR_BUFFER_BIT;
851 }
852 }
853
854 return mask;
855 }
856
857 static void
858 intel_blit_framebuffer(struct gl_context *ctx,
859 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
860 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
861 GLbitfield mask, GLenum filter)
862 {
863 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
864 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
865 srcX0, srcY0, srcX1, srcY1,
866 dstX0, dstY0, dstX1, dstY1,
867 mask, filter);
868 if (mask == 0x0)
869 return;
870
871 #ifndef I915
872 mask = brw_blorp_framebuffer(intel_context(ctx),
873 srcX0, srcY0, srcX1, srcY1,
874 dstX0, dstY0, dstX1, dstY1,
875 mask, filter);
876 if (mask == 0x0)
877 return;
878 #endif
879
880 _mesa_meta_BlitFramebuffer(ctx,
881 srcX0, srcY0, srcX1, srcY1,
882 dstX0, dstY0, dstX1, dstY1,
883 mask, filter);
884 }
885
886 void
887 intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
888 {
889 if (irb->mt) {
890 intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
891 irb->mt_level,
892 irb->mt_layer);
893 }
894 }
895
896 void
897 intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
898 {
899 if (irb->mt) {
900 intel_miptree_slice_set_needs_depth_resolve(irb->mt,
901 irb->mt_level,
902 irb->mt_layer);
903 }
904 }
905
906 bool
907 intel_renderbuffer_resolve_hiz(struct intel_context *intel,
908 struct intel_renderbuffer *irb)
909 {
910 if (irb->mt)
911 return intel_miptree_slice_resolve_hiz(intel,
912 irb->mt,
913 irb->mt_level,
914 irb->mt_layer);
915
916 return false;
917 }
918
919 bool
920 intel_renderbuffer_resolve_depth(struct intel_context *intel,
921 struct intel_renderbuffer *irb)
922 {
923 if (irb->mt)
924 return intel_miptree_slice_resolve_depth(intel,
925 irb->mt,
926 irb->mt_level,
927 irb->mt_layer);
928
929 return false;
930 }
931
932 /**
933 * Do one-time context initializations related to GL_EXT_framebuffer_object.
934 * Hook in device driver functions.
935 */
936 void
937 intel_fbo_init(struct intel_context *intel)
938 {
939 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
940 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
941 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
942 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
943 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
944 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
945 intel->ctx.Driver.RenderTexture = intel_render_texture;
946 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
947 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
948 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
949 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
950
951 #if FEATURE_OES_EGL_image
952 intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
953 intel_image_target_renderbuffer_storage;
954 #endif
955 }