i965: Add hardware context support.
[mesa.git] / src / mesa / drivers / dri / intel / intel_fbo.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/enums.h"
30 #include "main/imports.h"
31 #include "main/macros.h"
32 #include "main/mfeatures.h"
33 #include "main/mtypes.h"
34 #include "main/fbobject.h"
35 #include "main/framebuffer.h"
36 #include "main/renderbuffer.h"
37 #include "main/context.h"
38 #include "main/teximage.h"
39 #include "main/image.h"
40
41 #include "swrast/swrast.h"
42 #include "drivers/common/meta.h"
43
44 #include "intel_context.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_buffers.h"
47 #include "intel_blit.h"
48 #include "intel_fbo.h"
49 #include "intel_mipmap_tree.h"
50 #include "intel_regions.h"
51 #include "intel_tex.h"
52 #include "intel_span.h"
53 #ifndef I915
54 #include "brw_context.h"
55 #endif
56
57 #define FILE_DEBUG_FLAG DEBUG_FBO
58
59 static struct gl_renderbuffer *
60 intel_new_renderbuffer(struct gl_context * ctx, GLuint name);
61
62 bool
63 intel_framebuffer_has_hiz(struct gl_framebuffer *fb)
64 {
65 struct intel_renderbuffer *rb = NULL;
66 if (fb)
67 rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
68 return rb && rb->mt && rb->mt->hiz_mt;
69 }
70
71 struct intel_region*
72 intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
73 {
74 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
75 if (irb && irb->mt) {
76 if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt)
77 return irb->mt->stencil_mt->region;
78 else
79 return irb->mt->region;
80 } else
81 return NULL;
82 }
83
84 /**
85 * Create a new framebuffer object.
86 */
87 static struct gl_framebuffer *
88 intel_new_framebuffer(struct gl_context * ctx, GLuint name)
89 {
90 /* Only drawable state in intel_framebuffer at this time, just use Mesa's
91 * class
92 */
93 return _mesa_new_framebuffer(ctx, name);
94 }
95
96
97 /** Called by gl_renderbuffer::Delete() */
98 static void
99 intel_delete_renderbuffer(struct gl_renderbuffer *rb)
100 {
101 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
102
103 ASSERT(irb);
104
105 intel_miptree_release(&irb->mt);
106
107 free(irb);
108 }
109
110 /**
111 * \see dd_function_table::MapRenderbuffer
112 */
113 static void
114 intel_map_renderbuffer(struct gl_context *ctx,
115 struct gl_renderbuffer *rb,
116 GLuint x, GLuint y, GLuint w, GLuint h,
117 GLbitfield mode,
118 GLubyte **out_map,
119 GLint *out_stride)
120 {
121 struct intel_context *intel = intel_context(ctx);
122 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
123 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
124 void *map;
125 int stride;
126
127 if (srb->Buffer) {
128 /* this is a malloc'd renderbuffer (accum buffer), not an irb */
129 GLint bpp = _mesa_get_format_bytes(rb->Format);
130 GLint rowStride = srb->RowStride;
131 *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp;
132 *out_stride = rowStride;
133 return;
134 }
135
136 /* We sometimes get called with this by our intel_span.c usage. */
137 if (!irb->mt) {
138 *out_map = NULL;
139 *out_stride = 0;
140 return;
141 }
142
143 /* For a window-system renderbuffer, we need to flip the mapping we receive
144 * upside-down. So we need to ask for a rectangle on flipped vertically, and
145 * we then return a pointer to the bottom of it with a negative stride.
146 */
147 if (rb->Name == 0) {
148 y = rb->Height - y - h;
149 }
150
151 intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
152 x, y, w, h, mode, &map, &stride);
153
154 if (rb->Name == 0) {
155 map += (h - 1) * stride;
156 stride = -stride;
157 }
158
159 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
160 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
161 x, y, w, h, map, stride);
162
163 *out_map = map;
164 *out_stride = stride;
165 }
166
167 /**
168 * \see dd_function_table::UnmapRenderbuffer
169 */
170 static void
171 intel_unmap_renderbuffer(struct gl_context *ctx,
172 struct gl_renderbuffer *rb)
173 {
174 struct intel_context *intel = intel_context(ctx);
175 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
176 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
177
178 DBG("%s: rb %d (%s)\n", __FUNCTION__,
179 rb->Name, _mesa_get_format_name(rb->Format));
180
181 if (srb->Buffer) {
182 /* this is a malloc'd renderbuffer (accum buffer) */
183 /* nothing to do */
184 return;
185 }
186
187 intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
188 }
189
190
191 /**
192 * Round up the requested multisample count to the next supported sample size.
193 */
194 static unsigned
195 quantize_num_samples(struct intel_context *intel, unsigned num_samples)
196 {
197 switch (intel->gen) {
198 case 6:
199 /* Gen6 supports only 4x multisampling. */
200 if (num_samples > 0)
201 return 4;
202 else
203 return 0;
204 case 7:
205 /* TODO: Gen7 supports only 4x multisampling at the moment. */
206 if (num_samples > 0)
207 return 4;
208 else
209 return 0;
210 return 0;
211 default:
212 /* MSAA unsupported */
213 return 0;
214 }
215 }
216
217
218 /**
219 * Called via glRenderbufferStorageEXT() to set the format and allocate
220 * storage for a user-created renderbuffer.
221 */
222 GLboolean
223 intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
224 GLenum internalFormat,
225 GLuint width, GLuint height)
226 {
227 struct intel_context *intel = intel_context(ctx);
228 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
229 rb->NumSamples = quantize_num_samples(intel, rb->NumSamples);
230
231 ASSERT(rb->Name != 0);
232
233 switch (internalFormat) {
234 default:
235 /* Use the same format-choice logic as for textures.
236 * Renderbuffers aren't any different from textures for us,
237 * except they're less useful because you can't texture with
238 * them.
239 */
240 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
241 GL_NONE, GL_NONE);
242 break;
243 case GL_STENCIL_INDEX:
244 case GL_STENCIL_INDEX1_EXT:
245 case GL_STENCIL_INDEX4_EXT:
246 case GL_STENCIL_INDEX8_EXT:
247 case GL_STENCIL_INDEX16_EXT:
248 /* These aren't actual texture formats, so force them here. */
249 if (intel->has_separate_stencil) {
250 rb->Format = MESA_FORMAT_S8;
251 } else {
252 assert(!intel->must_use_separate_stencil);
253 rb->Format = MESA_FORMAT_S8_Z24;
254 }
255 break;
256 }
257
258 rb->Width = width;
259 rb->Height = height;
260 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
261
262 intel_miptree_release(&irb->mt);
263
264 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
265 _mesa_lookup_enum_by_nr(internalFormat),
266 _mesa_get_format_name(rb->Format), width, height);
267
268 if (width == 0 || height == 0)
269 return true;
270
271 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
272 width, height,
273 rb->NumSamples);
274 if (!irb->mt)
275 return false;
276
277 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
278 bool ok = intel_miptree_alloc_hiz(intel, irb->mt, rb->NumSamples);
279 if (!ok) {
280 intel_miptree_release(&irb->mt);
281 return false;
282 }
283 }
284
285 return true;
286 }
287
288
289 #if FEATURE_OES_EGL_image
290 static void
291 intel_image_target_renderbuffer_storage(struct gl_context *ctx,
292 struct gl_renderbuffer *rb,
293 void *image_handle)
294 {
295 struct intel_context *intel = intel_context(ctx);
296 struct intel_renderbuffer *irb;
297 __DRIscreen *screen;
298 __DRIimage *image;
299
300 screen = intel->intelScreen->driScrnPriv;
301 image = screen->dri2.image->lookupEGLImage(screen, image_handle,
302 screen->loaderPrivate);
303 if (image == NULL)
304 return;
305
306 /* __DRIimage is opaque to the core so it has to be checked here */
307 switch (image->format) {
308 case MESA_FORMAT_RGBA8888_REV:
309 _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
310 "glEGLImageTargetRenderbufferStorage(unsupported image format");
311 return;
312 break;
313 default:
314 break;
315 }
316
317 irb = intel_renderbuffer(rb);
318 intel_miptree_release(&irb->mt);
319 irb->mt = intel_miptree_create_for_region(intel,
320 GL_TEXTURE_2D,
321 image->format,
322 image->region);
323 if (!irb->mt)
324 return;
325
326 rb->InternalFormat = image->internal_format;
327 rb->Width = image->region->width;
328 rb->Height = image->region->height;
329 rb->Format = image->format;
330 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
331 image->internal_format);
332 }
333 #endif
334
335 /**
336 * Called for each hardware renderbuffer when a _window_ is resized.
337 * Just update fields.
338 * Not used for user-created renderbuffers!
339 */
340 static GLboolean
341 intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
342 GLenum internalFormat, GLuint width, GLuint height)
343 {
344 ASSERT(rb->Name == 0);
345 rb->Width = width;
346 rb->Height = height;
347 rb->InternalFormat = internalFormat;
348
349 return true;
350 }
351
352
353 static void
354 intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
355 GLuint width, GLuint height)
356 {
357 int i;
358
359 _mesa_resize_framebuffer(ctx, fb, width, height);
360
361 fb->Initialized = true; /* XXX remove someday */
362
363 if (fb->Name != 0) {
364 return;
365 }
366
367
368 /* Make sure all window system renderbuffers are up to date */
369 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
370 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
371
372 /* only resize if size is changing */
373 if (rb && (rb->Width != width || rb->Height != height)) {
374 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
375 }
376 }
377 }
378
379
380 /** Dummy function for gl_renderbuffer::AllocStorage() */
381 static GLboolean
382 intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
383 GLenum internalFormat, GLuint width, GLuint height)
384 {
385 _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
386 return false;
387 }
388
389 /**
390 * Create a new intel_renderbuffer which corresponds to an on-screen window,
391 * not a user-created renderbuffer.
392 */
393 struct intel_renderbuffer *
394 intel_create_renderbuffer(gl_format format)
395 {
396 struct intel_renderbuffer *irb;
397 struct gl_renderbuffer *rb;
398
399 GET_CURRENT_CONTEXT(ctx);
400
401 irb = CALLOC_STRUCT(intel_renderbuffer);
402 if (!irb) {
403 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
404 return NULL;
405 }
406
407 rb = &irb->Base.Base;
408
409 _mesa_init_renderbuffer(rb, 0);
410 rb->ClassID = INTEL_RB_CLASS;
411 rb->_BaseFormat = _mesa_get_format_base_format(format);
412 rb->Format = format;
413 rb->InternalFormat = rb->_BaseFormat;
414
415 /* intel-specific methods */
416 rb->Delete = intel_delete_renderbuffer;
417 rb->AllocStorage = intel_alloc_window_storage;
418
419 return irb;
420 }
421
422 /**
423 * Create a new renderbuffer object.
424 * Typically called via glBindRenderbufferEXT().
425 */
426 static struct gl_renderbuffer *
427 intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
428 {
429 /*struct intel_context *intel = intel_context(ctx); */
430 struct intel_renderbuffer *irb;
431 struct gl_renderbuffer *rb;
432
433 irb = CALLOC_STRUCT(intel_renderbuffer);
434 if (!irb) {
435 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
436 return NULL;
437 }
438
439 rb = &irb->Base.Base;
440
441 _mesa_init_renderbuffer(rb, name);
442 rb->ClassID = INTEL_RB_CLASS;
443
444 /* intel-specific methods */
445 rb->Delete = intel_delete_renderbuffer;
446 rb->AllocStorage = intel_alloc_renderbuffer_storage;
447 /* span routines set in alloc_storage function */
448
449 return rb;
450 }
451
452
453 /**
454 * Called via glBindFramebufferEXT().
455 */
456 static void
457 intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
458 struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
459 {
460 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
461 intel_draw_buffer(ctx);
462 }
463 else {
464 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
465 }
466 }
467
468
469 /**
470 * Called via glFramebufferRenderbufferEXT().
471 */
472 static void
473 intel_framebuffer_renderbuffer(struct gl_context * ctx,
474 struct gl_framebuffer *fb,
475 GLenum attachment, struct gl_renderbuffer *rb)
476 {
477 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
478
479 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
480 intel_draw_buffer(ctx);
481 }
482
483 /**
484 * \par Special case for separate stencil
485 *
486 * When wrapping a depthstencil texture that uses separate stencil, this
487 * function is recursively called twice: once to create \c
488 * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the
489 * call to create \c irb->wrapped_depth, the \c format and \c
490 * internal_format parameters do not match \c mt->format. In that case, \c
491 * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
492 * MESA_FORMAT_X8_Z24.
493 *
494 * @return true on success
495 */
496
497 static bool
498 intel_renderbuffer_update_wrapper(struct intel_context *intel,
499 struct intel_renderbuffer *irb,
500 struct gl_texture_image *image,
501 uint32_t layer)
502 {
503 struct gl_renderbuffer *rb = &irb->Base.Base;
504 struct intel_texture_image *intel_image = intel_texture_image(image);
505 struct intel_mipmap_tree *mt = intel_image->mt;
506 int level = image->Level;
507
508 rb->Format = image->TexFormat;
509 rb->InternalFormat = image->InternalFormat;
510 rb->_BaseFormat = image->_BaseFormat;
511 rb->Width = mt->level[level].width;
512 rb->Height = mt->level[level].height;
513
514 rb->Delete = intel_delete_renderbuffer;
515 rb->AllocStorage = intel_nop_alloc_storage;
516
517 intel_miptree_check_level_layer(mt, level, layer);
518 irb->mt_level = level;
519 irb->mt_layer = layer;
520
521 intel_miptree_reference(&irb->mt, mt);
522
523 intel_renderbuffer_set_draw_offset(irb);
524
525 if (mt->hiz_mt == NULL &&
526 intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
527 intel_miptree_alloc_hiz(intel, mt, 0 /* num_samples */);
528 if (!mt->hiz_mt)
529 return false;
530 }
531
532 return true;
533 }
534
535 void
536 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
537 {
538 unsigned int dst_x, dst_y;
539
540 /* compute offset of the particular 2D image within the texture region */
541 intel_miptree_get_image_offset(irb->mt,
542 irb->mt_level,
543 0, /* face, which we ignore */
544 irb->mt_layer,
545 &dst_x, &dst_y);
546
547 irb->draw_x = dst_x;
548 irb->draw_y = dst_y;
549 }
550
551 /**
552 * Rendering to tiled buffers requires that the base address of the
553 * buffer be aligned to a page boundary. We generally render to
554 * textures by pointing the surface at the mipmap image level, which
555 * may not be aligned to a tile boundary.
556 *
557 * This function returns an appropriately-aligned base offset
558 * according to the tiling restrictions, plus any required x/y offset
559 * from there.
560 */
561 uint32_t
562 intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
563 uint32_t *tile_x,
564 uint32_t *tile_y)
565 {
566 struct intel_region *region = irb->mt->region;
567 uint32_t mask_x, mask_y;
568
569 intel_region_get_tile_masks(region, &mask_x, &mask_y);
570
571 *tile_x = irb->draw_x & mask_x;
572 *tile_y = irb->draw_y & mask_y;
573 return intel_region_get_aligned_offset(region, irb->draw_x & ~mask_x,
574 irb->draw_y & ~mask_y);
575 }
576
577 /**
578 * Called by glFramebufferTexture[123]DEXT() (and other places) to
579 * prepare for rendering into texture memory. This might be called
580 * many times to choose different texture levels, cube faces, etc
581 * before intel_finish_render_texture() is ever called.
582 */
583 static void
584 intel_render_texture(struct gl_context * ctx,
585 struct gl_framebuffer *fb,
586 struct gl_renderbuffer_attachment *att)
587 {
588 struct intel_context *intel = intel_context(ctx);
589 struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
590 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
591 struct intel_texture_image *intel_image = intel_texture_image(image);
592 struct intel_mipmap_tree *mt = intel_image->mt;
593 int layer;
594
595 (void) fb;
596
597 if (att->CubeMapFace > 0) {
598 assert(att->Zoffset == 0);
599 layer = att->CubeMapFace;
600 } else {
601 layer = att->Zoffset;
602 }
603
604 if (!intel_image->mt) {
605 /* Fallback on drawing to a texture that doesn't have a miptree
606 * (has a border, width/height 0, etc.)
607 */
608 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
609 _swrast_render_texture(ctx, fb, att);
610 return;
611 }
612 else if (!irb) {
613 intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
614
615 irb = (struct intel_renderbuffer *)intel_new_renderbuffer(ctx, ~0);
616
617 if (irb) {
618 /* bind the wrapper to the attachment point */
619 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base.Base);
620 }
621 else {
622 /* fallback to software rendering */
623 _swrast_render_texture(ctx, fb, att);
624 return;
625 }
626 }
627
628 if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
629 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
630 _swrast_render_texture(ctx, fb, att);
631 return;
632 }
633
634 irb->tex_image = image;
635
636 DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
637 _mesa_get_format_name(image->TexFormat),
638 att->Texture->Name, image->Width, image->Height,
639 irb->Base.Base.RefCount);
640
641 /* update drawing region, etc */
642 intel_draw_buffer(ctx);
643 }
644
645
646 /**
647 * Called by Mesa when rendering to a texture is done.
648 */
649 static void
650 intel_finish_render_texture(struct gl_context * ctx,
651 struct gl_renderbuffer_attachment *att)
652 {
653 struct intel_context *intel = intel_context(ctx);
654 struct gl_texture_object *tex_obj = att->Texture;
655 struct gl_texture_image *image =
656 tex_obj->Image[att->CubeMapFace][att->TextureLevel];
657 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
658
659 DBG("Finish render %s texture tex=%u\n",
660 _mesa_get_format_name(image->TexFormat), att->Texture->Name);
661
662 if (irb)
663 irb->tex_image = NULL;
664
665 /* Since we've (probably) rendered to the texture and will (likely) use
666 * it in the texture domain later on in this batchbuffer, flush the
667 * batch. Once again, we wish for a domain tracker in libdrm to cover
668 * usage inside of a batchbuffer like GEM does in the kernel.
669 */
670 intel_batchbuffer_emit_mi_flush(intel);
671 }
672
673 /**
674 * Do additional "completeness" testing of a framebuffer object.
675 */
676 static void
677 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
678 {
679 struct intel_context *intel = intel_context(ctx);
680 const struct intel_renderbuffer *depthRb =
681 intel_get_renderbuffer(fb, BUFFER_DEPTH);
682 const struct intel_renderbuffer *stencilRb =
683 intel_get_renderbuffer(fb, BUFFER_STENCIL);
684 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
685 int i;
686
687 DBG("%s() on fb %p (%s)\n", __FUNCTION__,
688 fb, (fb == ctx->DrawBuffer ? "drawbuffer" :
689 (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer")));
690
691 if (depthRb)
692 depth_mt = depthRb->mt;
693 if (stencilRb) {
694 stencil_mt = stencilRb->mt;
695 if (stencil_mt->stencil_mt)
696 stencil_mt = stencil_mt->stencil_mt;
697 }
698
699 if (depth_mt && stencil_mt) {
700 if (depth_mt == stencil_mt) {
701 /* For true packed depth/stencil (not faked on prefers-separate-stencil
702 * hardware) we need to be sure they're the same level/layer, since
703 * we'll be emitting a single packet describing the packed setup.
704 */
705 if (depthRb->mt_level != stencilRb->mt_level ||
706 depthRb->mt_layer != stencilRb->mt_layer) {
707 DBG("depth image level/layer %d/%d != stencil image %d/%d\n",
708 depthRb->mt_level,
709 depthRb->mt_layer,
710 stencilRb->mt_level,
711 stencilRb->mt_layer);
712 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
713 }
714 } else {
715 if (!intel->has_separate_stencil) {
716 DBG("separate stencil unsupported\n");
717 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
718 }
719 if (stencil_mt->format != MESA_FORMAT_S8) {
720 DBG("separate stencil is %s instead of S8\n",
721 _mesa_get_format_name(stencil_mt->format));
722 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
723 }
724 if (intel->gen < 7 && depth_mt->hiz_mt == NULL) {
725 /* Before Gen7, separate depth and stencil buffers can be used
726 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
727 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
728 * [DevSNB]: This field must be set to the same value (enabled
729 * or disabled) as Hierarchical Depth Buffer Enable.
730 */
731 DBG("separate stencil without HiZ\n");
732 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED;
733 }
734 }
735 }
736
737 for (i = 0; i < Elements(fb->Attachment); i++) {
738 struct gl_renderbuffer *rb;
739 struct intel_renderbuffer *irb;
740
741 if (fb->Attachment[i].Type == GL_NONE)
742 continue;
743
744 /* A supported attachment will have a Renderbuffer set either
745 * from being a Renderbuffer or being a texture that got the
746 * intel_wrap_texture() treatment.
747 */
748 rb = fb->Attachment[i].Renderbuffer;
749 if (rb == NULL) {
750 DBG("attachment without renderbuffer\n");
751 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
752 continue;
753 }
754
755 if (fb->Attachment[i].Type == GL_TEXTURE) {
756 const struct gl_texture_image *img =
757 _mesa_get_attachment_teximage_const(&fb->Attachment[i]);
758
759 if (img->Border) {
760 DBG("texture with border\n");
761 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
762 continue;
763 }
764 }
765
766 irb = intel_renderbuffer(rb);
767 if (irb == NULL) {
768 DBG("software rendering renderbuffer\n");
769 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
770 continue;
771 }
772
773 if (!intel->vtbl.render_target_supported(intel, rb)) {
774 DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
775 _mesa_get_format_name(intel_rb_format(irb)));
776 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
777 }
778 }
779 }
780
781 /**
782 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
783 * We can do this when the dst renderbuffer is actually a texture and
784 * there is no scaling, mirroring or scissoring.
785 *
786 * \return new buffer mask indicating the buffers left to blit using the
787 * normal path.
788 */
789 static GLbitfield
790 intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
791 GLint srcX0, GLint srcY0,
792 GLint srcX1, GLint srcY1,
793 GLint dstX0, GLint dstY0,
794 GLint dstX1, GLint dstY1,
795 GLbitfield mask, GLenum filter)
796 {
797 if (mask & GL_COLOR_BUFFER_BIT) {
798 const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
799 const struct gl_framebuffer *readFb = ctx->ReadBuffer;
800 const struct gl_renderbuffer_attachment *drawAtt =
801 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
802 struct intel_renderbuffer *srcRb =
803 intel_renderbuffer(readFb->_ColorReadBuffer);
804
805 /* If the source and destination are the same size with no
806 mirroring, the rectangles are within the size of the
807 texture and there is no scissor then we can use
808 glCopyTexSubimage2D to implement the blit. This will end
809 up as a fast hardware blit on some drivers */
810 if (srcRb && drawAtt && drawAtt->Texture &&
811 srcX0 - srcX1 == dstX0 - dstX1 &&
812 srcY0 - srcY1 == dstY0 - dstY1 &&
813 srcX1 >= srcX0 &&
814 srcY1 >= srcY0 &&
815 srcX0 >= 0 && srcX1 <= readFb->Width &&
816 srcY0 >= 0 && srcY1 <= readFb->Height &&
817 dstX0 >= 0 && dstX1 <= drawFb->Width &&
818 dstY0 >= 0 && dstY1 <= drawFb->Height &&
819 !ctx->Scissor.Enabled) {
820 const struct gl_texture_object *texObj = drawAtt->Texture;
821 const GLuint dstLevel = drawAtt->TextureLevel;
822 const GLenum target = texObj->Target;
823
824 struct gl_texture_image *texImage =
825 _mesa_select_tex_image(ctx, texObj, target, dstLevel);
826
827 if (intel_copy_texsubimage(intel_context(ctx),
828 intel_texture_image(texImage),
829 dstX0, dstY0,
830 srcRb,
831 srcX0, srcY0,
832 srcX1 - srcX0, /* width */
833 srcY1 - srcY0))
834 mask &= ~GL_COLOR_BUFFER_BIT;
835 }
836 }
837
838 return mask;
839 }
840
841 static void
842 intel_blit_framebuffer(struct gl_context *ctx,
843 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
844 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
845 GLbitfield mask, GLenum filter)
846 {
847 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
848 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
849 srcX0, srcY0, srcX1, srcY1,
850 dstX0, dstY0, dstX1, dstY1,
851 mask, filter);
852 if (mask == 0x0)
853 return;
854
855 #ifndef I915
856 mask = brw_blorp_framebuffer(intel_context(ctx),
857 srcX0, srcY0, srcX1, srcY1,
858 dstX0, dstY0, dstX1, dstY1,
859 mask, filter);
860 if (mask == 0x0)
861 return;
862 #endif
863
864 _mesa_meta_BlitFramebuffer(ctx,
865 srcX0, srcY0, srcX1, srcY1,
866 dstX0, dstY0, dstX1, dstY1,
867 mask, filter);
868 }
869
870 void
871 intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
872 {
873 if (irb->mt) {
874 intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
875 irb->mt_level,
876 irb->mt_layer);
877 }
878 }
879
880 void
881 intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
882 {
883 if (irb->mt) {
884 intel_miptree_slice_set_needs_depth_resolve(irb->mt,
885 irb->mt_level,
886 irb->mt_layer);
887 }
888 }
889
890 bool
891 intel_renderbuffer_resolve_hiz(struct intel_context *intel,
892 struct intel_renderbuffer *irb)
893 {
894 if (irb->mt)
895 return intel_miptree_slice_resolve_hiz(intel,
896 irb->mt,
897 irb->mt_level,
898 irb->mt_layer);
899
900 return false;
901 }
902
903 bool
904 intel_renderbuffer_resolve_depth(struct intel_context *intel,
905 struct intel_renderbuffer *irb)
906 {
907 if (irb->mt)
908 return intel_miptree_slice_resolve_depth(intel,
909 irb->mt,
910 irb->mt_level,
911 irb->mt_layer);
912
913 return false;
914 }
915
916 /**
917 * Do one-time context initializations related to GL_EXT_framebuffer_object.
918 * Hook in device driver functions.
919 */
920 void
921 intel_fbo_init(struct intel_context *intel)
922 {
923 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
924 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
925 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
926 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
927 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
928 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
929 intel->ctx.Driver.RenderTexture = intel_render_texture;
930 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
931 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
932 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
933 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
934
935 #if FEATURE_OES_EGL_image
936 intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
937 intel_image_target_renderbuffer_storage;
938 #endif
939 }