10a8c7fac392b3517938f2864071ca55851f1474
[mesa.git] / src / mesa / drivers / dri / intel / intel_fbo.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/enums.h"
30 #include "main/imports.h"
31 #include "main/macros.h"
32 #include "main/mfeatures.h"
33 #include "main/mtypes.h"
34 #include "main/fbobject.h"
35 #include "main/framebuffer.h"
36 #include "main/renderbuffer.h"
37 #include "main/context.h"
38 #include "main/teximage.h"
39 #include "main/image.h"
40
41 #include "swrast/swrast.h"
42 #include "drivers/common/meta.h"
43
44 #include "intel_context.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_buffers.h"
47 #include "intel_blit.h"
48 #include "intel_fbo.h"
49 #include "intel_mipmap_tree.h"
50 #include "intel_regions.h"
51 #include "intel_tex.h"
52 #include "intel_span.h"
53 #ifndef I915
54 #include "brw_context.h"
55 #endif
56
57 #define FILE_DEBUG_FLAG DEBUG_FBO
58
59 static struct gl_renderbuffer *
60 intel_new_renderbuffer(struct gl_context * ctx, GLuint name);
61
62 struct intel_region*
63 intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
64 {
65 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
66 if (irb && irb->mt) {
67 if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt)
68 return irb->mt->stencil_mt->region;
69 else
70 return irb->mt->region;
71 } else
72 return NULL;
73 }
74
75 /**
76 * Create a new framebuffer object.
77 */
78 static struct gl_framebuffer *
79 intel_new_framebuffer(struct gl_context * ctx, GLuint name)
80 {
81 /* Only drawable state in intel_framebuffer at this time, just use Mesa's
82 * class
83 */
84 return _mesa_new_framebuffer(ctx, name);
85 }
86
87
88 /** Called by gl_renderbuffer::Delete() */
89 static void
90 intel_delete_renderbuffer(struct gl_renderbuffer *rb)
91 {
92 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
93
94 ASSERT(irb);
95
96 intel_miptree_release(&irb->mt);
97
98 _mesa_delete_renderbuffer(rb);
99 }
100
101 /**
102 * \see dd_function_table::MapRenderbuffer
103 */
104 static void
105 intel_map_renderbuffer(struct gl_context *ctx,
106 struct gl_renderbuffer *rb,
107 GLuint x, GLuint y, GLuint w, GLuint h,
108 GLbitfield mode,
109 GLubyte **out_map,
110 GLint *out_stride)
111 {
112 struct intel_context *intel = intel_context(ctx);
113 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
114 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
115 void *map;
116 int stride;
117
118 if (srb->Buffer) {
119 /* this is a malloc'd renderbuffer (accum buffer), not an irb */
120 GLint bpp = _mesa_get_format_bytes(rb->Format);
121 GLint rowStride = srb->RowStride;
122 *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp;
123 *out_stride = rowStride;
124 return;
125 }
126
127 /* We sometimes get called with this by our intel_span.c usage. */
128 if (!irb->mt) {
129 *out_map = NULL;
130 *out_stride = 0;
131 return;
132 }
133
134 /* For a window-system renderbuffer, we need to flip the mapping we receive
135 * upside-down. So we need to ask for a rectangle on flipped vertically, and
136 * we then return a pointer to the bottom of it with a negative stride.
137 */
138 if (rb->Name == 0) {
139 y = rb->Height - y - h;
140 }
141
142 intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
143 x, y, w, h, mode, &map, &stride);
144
145 if (rb->Name == 0) {
146 map += (h - 1) * stride;
147 stride = -stride;
148 }
149
150 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
151 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
152 x, y, w, h, map, stride);
153
154 *out_map = map;
155 *out_stride = stride;
156 }
157
158 /**
159 * \see dd_function_table::UnmapRenderbuffer
160 */
161 static void
162 intel_unmap_renderbuffer(struct gl_context *ctx,
163 struct gl_renderbuffer *rb)
164 {
165 struct intel_context *intel = intel_context(ctx);
166 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
167 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
168
169 DBG("%s: rb %d (%s)\n", __FUNCTION__,
170 rb->Name, _mesa_get_format_name(rb->Format));
171
172 if (srb->Buffer) {
173 /* this is a malloc'd renderbuffer (accum buffer) */
174 /* nothing to do */
175 return;
176 }
177
178 intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
179 }
180
181
182 /**
183 * Round up the requested multisample count to the next supported sample size.
184 */
185 unsigned
186 intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples)
187 {
188 switch (intel->gen) {
189 case 6:
190 /* Gen6 supports only 4x multisampling. */
191 if (num_samples > 0)
192 return 4;
193 else
194 return 0;
195 case 7:
196 /* Gen7 supports 4x and 8x multisampling. */
197 if (num_samples > 4)
198 return 8;
199 else if (num_samples > 0)
200 return 4;
201 else
202 return 0;
203 return 0;
204 default:
205 /* MSAA unsupported. However, a careful reading of
206 * EXT_framebuffer_multisample reveals that we need to permit
207 * num_samples to be 1 (since num_samples is permitted to be as high as
208 * GL_MAX_SAMPLES, and GL_MAX_SAMPLES must be at least 1). Since
209 * platforms before Gen6 don't support MSAA, this is safe, because
210 * multisampling won't happen anyhow.
211 */
212 if (num_samples > 0)
213 return 1;
214 return 0;
215 }
216 }
217
218
219 /**
220 * Called via glRenderbufferStorageEXT() to set the format and allocate
221 * storage for a user-created renderbuffer.
222 */
223 static GLboolean
224 intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
225 GLenum internalFormat,
226 GLuint width, GLuint height)
227 {
228 struct intel_context *intel = intel_context(ctx);
229 struct intel_screen *screen = intel->intelScreen;
230 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
231 rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples);
232
233 switch (internalFormat) {
234 default:
235 /* Use the same format-choice logic as for textures.
236 * Renderbuffers aren't any different from textures for us,
237 * except they're less useful because you can't texture with
238 * them.
239 */
240 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D,
241 internalFormat,
242 GL_NONE, GL_NONE);
243 break;
244 case GL_STENCIL_INDEX:
245 case GL_STENCIL_INDEX1_EXT:
246 case GL_STENCIL_INDEX4_EXT:
247 case GL_STENCIL_INDEX8_EXT:
248 case GL_STENCIL_INDEX16_EXT:
249 /* These aren't actual texture formats, so force them here. */
250 if (intel->has_separate_stencil) {
251 rb->Format = MESA_FORMAT_S8;
252 } else {
253 assert(!intel->must_use_separate_stencil);
254 rb->Format = MESA_FORMAT_S8_Z24;
255 }
256 break;
257 }
258
259 rb->Width = width;
260 rb->Height = height;
261 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
262
263 intel_miptree_release(&irb->mt);
264
265 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
266 _mesa_lookup_enum_by_nr(internalFormat),
267 _mesa_get_format_name(rb->Format), width, height);
268
269 if (width == 0 || height == 0)
270 return true;
271
272 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
273 width, height,
274 rb->NumSamples);
275 if (!irb->mt)
276 return false;
277
278 return true;
279 }
280
281
282 static void
283 intel_image_target_renderbuffer_storage(struct gl_context *ctx,
284 struct gl_renderbuffer *rb,
285 void *image_handle)
286 {
287 struct intel_context *intel = intel_context(ctx);
288 struct intel_renderbuffer *irb;
289 __DRIscreen *screen;
290 __DRIimage *image;
291
292 screen = intel->intelScreen->driScrnPriv;
293 image = screen->dri2.image->lookupEGLImage(screen, image_handle,
294 screen->loaderPrivate);
295 if (image == NULL)
296 return;
297
298 /* __DRIimage is opaque to the core so it has to be checked here */
299 switch (image->format) {
300 case MESA_FORMAT_RGBA8888_REV:
301 _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
302 "glEGLImageTargetRenderbufferStorage(unsupported image format");
303 return;
304 break;
305 default:
306 break;
307 }
308
309 irb = intel_renderbuffer(rb);
310 intel_miptree_release(&irb->mt);
311 irb->mt = intel_miptree_create_for_region(intel,
312 GL_TEXTURE_2D,
313 image->format,
314 image->region);
315 if (!irb->mt)
316 return;
317
318 rb->InternalFormat = image->internal_format;
319 rb->Width = image->region->width;
320 rb->Height = image->region->height;
321 rb->Format = image->format;
322 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
323 image->internal_format);
324 }
325
326 /**
327 * Called for each hardware renderbuffer when a _window_ is resized.
328 * Just update fields.
329 * Not used for user-created renderbuffers!
330 */
331 static GLboolean
332 intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
333 GLenum internalFormat, GLuint width, GLuint height)
334 {
335 ASSERT(rb->Name == 0);
336 rb->Width = width;
337 rb->Height = height;
338 rb->InternalFormat = internalFormat;
339
340 return true;
341 }
342
343
344 static void
345 intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
346 GLuint width, GLuint height)
347 {
348 int i;
349
350 _mesa_resize_framebuffer(ctx, fb, width, height);
351
352 fb->Initialized = true; /* XXX remove someday */
353
354 if (_mesa_is_user_fbo(fb)) {
355 return;
356 }
357
358
359 /* Make sure all window system renderbuffers are up to date */
360 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
361 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
362
363 /* only resize if size is changing */
364 if (rb && (rb->Width != width || rb->Height != height)) {
365 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
366 }
367 }
368 }
369
370
371 /** Dummy function for gl_renderbuffer::AllocStorage() */
372 static GLboolean
373 intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
374 GLenum internalFormat, GLuint width, GLuint height)
375 {
376 _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
377 return false;
378 }
379
380 /**
381 * Create a new intel_renderbuffer which corresponds to an on-screen window,
382 * not a user-created renderbuffer.
383 *
384 * \param num_samples must be quantized.
385 */
386 struct intel_renderbuffer *
387 intel_create_renderbuffer(gl_format format, unsigned num_samples)
388 {
389 struct intel_renderbuffer *irb;
390 struct gl_renderbuffer *rb;
391
392 GET_CURRENT_CONTEXT(ctx);
393
394 irb = CALLOC_STRUCT(intel_renderbuffer);
395 if (!irb) {
396 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
397 return NULL;
398 }
399
400 rb = &irb->Base.Base;
401
402 _mesa_init_renderbuffer(rb, 0);
403 rb->ClassID = INTEL_RB_CLASS;
404 rb->_BaseFormat = _mesa_get_format_base_format(format);
405 rb->Format = format;
406 rb->InternalFormat = rb->_BaseFormat;
407 rb->NumSamples = num_samples;
408
409 /* intel-specific methods */
410 rb->Delete = intel_delete_renderbuffer;
411 rb->AllocStorage = intel_alloc_window_storage;
412
413 return irb;
414 }
415
416 /**
417 * Private window-system buffers (as opposed to ones shared with the display
418 * server created with intel_create_renderbuffer()) are most similar in their
419 * handling to user-created renderbuffers, but they have a resize handler that
420 * may be called at intel_update_renderbuffers() time.
421 *
422 * \param num_samples must be quantized.
423 */
424 struct intel_renderbuffer *
425 intel_create_private_renderbuffer(gl_format format, unsigned num_samples)
426 {
427 struct intel_renderbuffer *irb;
428
429 irb = intel_create_renderbuffer(format, num_samples);
430 irb->Base.Base.AllocStorage = intel_alloc_renderbuffer_storage;
431
432 return irb;
433 }
434
435 /**
436 * Create a new renderbuffer object.
437 * Typically called via glBindRenderbufferEXT().
438 */
439 static struct gl_renderbuffer *
440 intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
441 {
442 /*struct intel_context *intel = intel_context(ctx); */
443 struct intel_renderbuffer *irb;
444 struct gl_renderbuffer *rb;
445
446 irb = CALLOC_STRUCT(intel_renderbuffer);
447 if (!irb) {
448 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
449 return NULL;
450 }
451
452 rb = &irb->Base.Base;
453
454 _mesa_init_renderbuffer(rb, name);
455 rb->ClassID = INTEL_RB_CLASS;
456
457 /* intel-specific methods */
458 rb->Delete = intel_delete_renderbuffer;
459 rb->AllocStorage = intel_alloc_renderbuffer_storage;
460 /* span routines set in alloc_storage function */
461
462 return rb;
463 }
464
465
466 /**
467 * Called via glBindFramebufferEXT().
468 */
469 static void
470 intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
471 struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
472 {
473 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
474 intel_draw_buffer(ctx);
475 }
476 else {
477 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
478 }
479 }
480
481
482 /**
483 * Called via glFramebufferRenderbufferEXT().
484 */
485 static void
486 intel_framebuffer_renderbuffer(struct gl_context * ctx,
487 struct gl_framebuffer *fb,
488 GLenum attachment, struct gl_renderbuffer *rb)
489 {
490 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
491
492 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
493 intel_draw_buffer(ctx);
494 }
495
496 /**
497 * \par Special case for separate stencil
498 *
499 * When wrapping a depthstencil texture that uses separate stencil, this
500 * function is recursively called twice: once to create \c
501 * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the
502 * call to create \c irb->wrapped_depth, the \c format and \c
503 * internal_format parameters do not match \c mt->format. In that case, \c
504 * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
505 * MESA_FORMAT_X8_Z24.
506 *
507 * @return true on success
508 */
509
510 static bool
511 intel_renderbuffer_update_wrapper(struct intel_context *intel,
512 struct intel_renderbuffer *irb,
513 struct gl_texture_image *image,
514 uint32_t layer)
515 {
516 struct gl_renderbuffer *rb = &irb->Base.Base;
517 struct intel_texture_image *intel_image = intel_texture_image(image);
518 struct intel_mipmap_tree *mt = intel_image->mt;
519 int level = image->Level;
520
521 rb->Format = image->TexFormat;
522 rb->InternalFormat = image->InternalFormat;
523 rb->_BaseFormat = image->_BaseFormat;
524 rb->Width = mt->level[level].width;
525 rb->Height = mt->level[level].height;
526
527 rb->Delete = intel_delete_renderbuffer;
528 rb->AllocStorage = intel_nop_alloc_storage;
529
530 intel_miptree_check_level_layer(mt, level, layer);
531 irb->mt_level = level;
532 irb->mt_layer = layer;
533
534 intel_miptree_reference(&irb->mt, mt);
535
536 intel_renderbuffer_set_draw_offset(irb);
537
538 if (mt->hiz_mt == NULL &&
539 intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
540 intel_miptree_alloc_hiz(intel, mt, 0 /* num_samples */);
541 if (!mt->hiz_mt)
542 return false;
543 }
544
545 return true;
546 }
547
548 void
549 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
550 {
551 unsigned int dst_x, dst_y;
552
553 /* compute offset of the particular 2D image within the texture region */
554 intel_miptree_get_image_offset(irb->mt,
555 irb->mt_level,
556 0, /* face, which we ignore */
557 irb->mt_layer,
558 &dst_x, &dst_y);
559
560 irb->draw_x = dst_x;
561 irb->draw_y = dst_y;
562 }
563
564 /**
565 * Rendering to tiled buffers requires that the base address of the
566 * buffer be aligned to a page boundary. We generally render to
567 * textures by pointing the surface at the mipmap image level, which
568 * may not be aligned to a tile boundary.
569 *
570 * This function returns an appropriately-aligned base offset
571 * according to the tiling restrictions, plus any required x/y offset
572 * from there.
573 */
574 uint32_t
575 intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
576 uint32_t *tile_x,
577 uint32_t *tile_y)
578 {
579 struct intel_region *region = irb->mt->region;
580 uint32_t mask_x, mask_y;
581
582 intel_region_get_tile_masks(region, &mask_x, &mask_y, false);
583
584 *tile_x = irb->draw_x & mask_x;
585 *tile_y = irb->draw_y & mask_y;
586 return intel_region_get_aligned_offset(region, irb->draw_x & ~mask_x,
587 irb->draw_y & ~mask_y, false);
588 }
589
590 /**
591 * Called by glFramebufferTexture[123]DEXT() (and other places) to
592 * prepare for rendering into texture memory. This might be called
593 * many times to choose different texture levels, cube faces, etc
594 * before intel_finish_render_texture() is ever called.
595 */
596 static void
597 intel_render_texture(struct gl_context * ctx,
598 struct gl_framebuffer *fb,
599 struct gl_renderbuffer_attachment *att)
600 {
601 struct intel_context *intel = intel_context(ctx);
602 struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
603 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
604 struct intel_texture_image *intel_image = intel_texture_image(image);
605 struct intel_mipmap_tree *mt = intel_image->mt;
606 int layer;
607
608 (void) fb;
609
610 if (att->CubeMapFace > 0) {
611 assert(att->Zoffset == 0);
612 layer = att->CubeMapFace;
613 } else {
614 layer = att->Zoffset;
615 }
616
617 if (!intel_image->mt) {
618 /* Fallback on drawing to a texture that doesn't have a miptree
619 * (has a border, width/height 0, etc.)
620 */
621 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
622 _swrast_render_texture(ctx, fb, att);
623 return;
624 }
625 else if (!irb) {
626 intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
627
628 irb = (struct intel_renderbuffer *)intel_new_renderbuffer(ctx, ~0);
629
630 if (irb) {
631 /* bind the wrapper to the attachment point */
632 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base.Base);
633 }
634 else {
635 /* fallback to software rendering */
636 _swrast_render_texture(ctx, fb, att);
637 return;
638 }
639 }
640
641 if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
642 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
643 _swrast_render_texture(ctx, fb, att);
644 return;
645 }
646
647 irb->tex_image = image;
648
649 DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
650 _mesa_get_format_name(image->TexFormat),
651 att->Texture->Name, image->Width, image->Height,
652 irb->Base.Base.RefCount);
653
654 /* update drawing region, etc */
655 intel_draw_buffer(ctx);
656 }
657
658
659 /**
660 * Called by Mesa when rendering to a texture is done.
661 */
662 static void
663 intel_finish_render_texture(struct gl_context * ctx,
664 struct gl_renderbuffer_attachment *att)
665 {
666 struct intel_context *intel = intel_context(ctx);
667 struct gl_texture_object *tex_obj = att->Texture;
668 struct gl_texture_image *image =
669 tex_obj->Image[att->CubeMapFace][att->TextureLevel];
670 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
671
672 DBG("Finish render %s texture tex=%u\n",
673 _mesa_get_format_name(image->TexFormat), att->Texture->Name);
674
675 if (irb)
676 irb->tex_image = NULL;
677
678 /* Since we've (probably) rendered to the texture and will (likely) use
679 * it in the texture domain later on in this batchbuffer, flush the
680 * batch. Once again, we wish for a domain tracker in libdrm to cover
681 * usage inside of a batchbuffer like GEM does in the kernel.
682 */
683 intel_batchbuffer_emit_mi_flush(intel);
684 }
685
686 /**
687 * Do additional "completeness" testing of a framebuffer object.
688 */
689 static void
690 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
691 {
692 struct intel_context *intel = intel_context(ctx);
693 const struct intel_renderbuffer *depthRb =
694 intel_get_renderbuffer(fb, BUFFER_DEPTH);
695 const struct intel_renderbuffer *stencilRb =
696 intel_get_renderbuffer(fb, BUFFER_STENCIL);
697 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
698 int i;
699
700 DBG("%s() on fb %p (%s)\n", __FUNCTION__,
701 fb, (fb == ctx->DrawBuffer ? "drawbuffer" :
702 (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer")));
703
704 if (depthRb)
705 depth_mt = depthRb->mt;
706 if (stencilRb) {
707 stencil_mt = stencilRb->mt;
708 if (stencil_mt->stencil_mt)
709 stencil_mt = stencil_mt->stencil_mt;
710 }
711
712 if (depth_mt && stencil_mt) {
713 if (depth_mt == stencil_mt) {
714 /* For true packed depth/stencil (not faked on prefers-separate-stencil
715 * hardware) we need to be sure they're the same level/layer, since
716 * we'll be emitting a single packet describing the packed setup.
717 */
718 if (depthRb->mt_level != stencilRb->mt_level ||
719 depthRb->mt_layer != stencilRb->mt_layer) {
720 DBG("depth image level/layer %d/%d != stencil image %d/%d\n",
721 depthRb->mt_level,
722 depthRb->mt_layer,
723 stencilRb->mt_level,
724 stencilRb->mt_layer);
725 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
726 }
727 } else {
728 if (!intel->has_separate_stencil) {
729 DBG("separate stencil unsupported\n");
730 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
731 }
732 if (stencil_mt->format != MESA_FORMAT_S8) {
733 DBG("separate stencil is %s instead of S8\n",
734 _mesa_get_format_name(stencil_mt->format));
735 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
736 }
737 if (intel->gen < 7 && depth_mt->hiz_mt == NULL) {
738 /* Before Gen7, separate depth and stencil buffers can be used
739 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
740 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
741 * [DevSNB]: This field must be set to the same value (enabled
742 * or disabled) as Hierarchical Depth Buffer Enable.
743 */
744 DBG("separate stencil without HiZ\n");
745 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED;
746 }
747 }
748 }
749
750 for (i = 0; i < Elements(fb->Attachment); i++) {
751 struct gl_renderbuffer *rb;
752 struct intel_renderbuffer *irb;
753
754 if (fb->Attachment[i].Type == GL_NONE)
755 continue;
756
757 /* A supported attachment will have a Renderbuffer set either
758 * from being a Renderbuffer or being a texture that got the
759 * intel_wrap_texture() treatment.
760 */
761 rb = fb->Attachment[i].Renderbuffer;
762 if (rb == NULL) {
763 DBG("attachment without renderbuffer\n");
764 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
765 continue;
766 }
767
768 if (fb->Attachment[i].Type == GL_TEXTURE) {
769 const struct gl_texture_image *img =
770 _mesa_get_attachment_teximage_const(&fb->Attachment[i]);
771
772 if (img->Border) {
773 DBG("texture with border\n");
774 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
775 continue;
776 }
777 }
778
779 irb = intel_renderbuffer(rb);
780 if (irb == NULL) {
781 DBG("software rendering renderbuffer\n");
782 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
783 continue;
784 }
785
786 if (!intel->vtbl.render_target_supported(intel, rb)) {
787 DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
788 _mesa_get_format_name(intel_rb_format(irb)));
789 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
790 }
791 }
792 }
793
794 /**
795 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
796 * We can do this when the dst renderbuffer is actually a texture and
797 * there is no scaling, mirroring or scissoring.
798 *
799 * \return new buffer mask indicating the buffers left to blit using the
800 * normal path.
801 */
802 static GLbitfield
803 intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
804 GLint srcX0, GLint srcY0,
805 GLint srcX1, GLint srcY1,
806 GLint dstX0, GLint dstY0,
807 GLint dstX1, GLint dstY1,
808 GLbitfield mask, GLenum filter)
809 {
810 if (mask & GL_COLOR_BUFFER_BIT) {
811 const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
812 const struct gl_framebuffer *readFb = ctx->ReadBuffer;
813 const struct gl_renderbuffer_attachment *drawAtt =
814 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
815 struct intel_renderbuffer *srcRb =
816 intel_renderbuffer(readFb->_ColorReadBuffer);
817
818 /* If the source and destination are the same size with no
819 mirroring, the rectangles are within the size of the
820 texture and there is no scissor then we can use
821 glCopyTexSubimage2D to implement the blit. This will end
822 up as a fast hardware blit on some drivers */
823 if (srcRb && drawAtt && drawAtt->Texture &&
824 srcX0 - srcX1 == dstX0 - dstX1 &&
825 srcY0 - srcY1 == dstY0 - dstY1 &&
826 srcX1 >= srcX0 &&
827 srcY1 >= srcY0 &&
828 srcX0 >= 0 && srcX1 <= readFb->Width &&
829 srcY0 >= 0 && srcY1 <= readFb->Height &&
830 dstX0 >= 0 && dstX1 <= drawFb->Width &&
831 dstY0 >= 0 && dstY1 <= drawFb->Height &&
832 !ctx->Scissor.Enabled) {
833 const struct gl_texture_object *texObj = drawAtt->Texture;
834 const GLuint dstLevel = drawAtt->TextureLevel;
835 const GLenum target = texObj->Target;
836
837 struct gl_texture_image *texImage =
838 _mesa_select_tex_image(ctx, texObj, target, dstLevel);
839
840 if (intel_copy_texsubimage(intel_context(ctx),
841 intel_texture_image(texImage),
842 dstX0, dstY0,
843 srcRb,
844 srcX0, srcY0,
845 srcX1 - srcX0, /* width */
846 srcY1 - srcY0))
847 mask &= ~GL_COLOR_BUFFER_BIT;
848 }
849 }
850
851 return mask;
852 }
853
854 static void
855 intel_blit_framebuffer(struct gl_context *ctx,
856 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
857 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
858 GLbitfield mask, GLenum filter)
859 {
860 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
861 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
862 srcX0, srcY0, srcX1, srcY1,
863 dstX0, dstY0, dstX1, dstY1,
864 mask, filter);
865 if (mask == 0x0)
866 return;
867
868 #ifndef I915
869 mask = brw_blorp_framebuffer(intel_context(ctx),
870 srcX0, srcY0, srcX1, srcY1,
871 dstX0, dstY0, dstX1, dstY1,
872 mask, filter);
873 if (mask == 0x0)
874 return;
875 #endif
876
877 _mesa_meta_BlitFramebuffer(ctx,
878 srcX0, srcY0, srcX1, srcY1,
879 dstX0, dstY0, dstX1, dstY1,
880 mask, filter);
881 }
882
883 /**
884 * This is a no-op except on multisample buffers shared with DRI2.
885 */
886 void
887 intel_renderbuffer_set_needs_downsample(struct intel_renderbuffer *irb)
888 {
889 if (irb->mt && irb->mt->singlesample_mt)
890 irb->mt->need_downsample = true;
891 }
892
893 void
894 intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
895 {
896 if (irb->mt) {
897 intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
898 irb->mt_level,
899 irb->mt_layer);
900 }
901 }
902
903 void
904 intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
905 {
906 if (irb->mt) {
907 intel_miptree_slice_set_needs_depth_resolve(irb->mt,
908 irb->mt_level,
909 irb->mt_layer);
910 }
911 }
912
913 bool
914 intel_renderbuffer_resolve_hiz(struct intel_context *intel,
915 struct intel_renderbuffer *irb)
916 {
917 if (irb->mt)
918 return intel_miptree_slice_resolve_hiz(intel,
919 irb->mt,
920 irb->mt_level,
921 irb->mt_layer);
922
923 return false;
924 }
925
926 bool
927 intel_renderbuffer_resolve_depth(struct intel_context *intel,
928 struct intel_renderbuffer *irb)
929 {
930 if (irb->mt)
931 return intel_miptree_slice_resolve_depth(intel,
932 irb->mt,
933 irb->mt_level,
934 irb->mt_layer);
935
936 return false;
937 }
938
939 void
940 intel_renderbuffer_move_to_temp(struct intel_context *intel,
941 struct intel_renderbuffer *irb)
942 {
943 struct intel_texture_image *intel_image =
944 intel_texture_image(irb->tex_image);
945 struct intel_mipmap_tree *new_mt;
946 int width, height, depth;
947
948 intel_miptree_get_dimensions_for_image(irb->tex_image, &width, &height, &depth);
949
950 new_mt = intel_miptree_create(intel, irb->tex_image->TexObject->Target,
951 intel_image->base.Base.TexFormat,
952 intel_image->base.Base.Level,
953 intel_image->base.Base.Level,
954 width, height, depth,
955 true,
956 irb->mt->num_samples,
957 irb->mt->msaa_layout);
958
959 intel_miptree_copy_teximage(intel, intel_image, new_mt);
960 intel_miptree_reference(&irb->mt, intel_image->mt);
961 intel_renderbuffer_set_draw_offset(irb);
962 intel_miptree_release(&new_mt);
963 }
964
965 /**
966 * Do one-time context initializations related to GL_EXT_framebuffer_object.
967 * Hook in device driver functions.
968 */
969 void
970 intel_fbo_init(struct intel_context *intel)
971 {
972 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
973 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
974 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
975 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
976 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
977 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
978 intel->ctx.Driver.RenderTexture = intel_render_texture;
979 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
980 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
981 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
982 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
983 intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
984 intel_image_target_renderbuffer_storage;
985 }