mesa: Remove unnecessary parameters CompressedTexImage
[mesa.git] / src / mesa / drivers / dri / intel / intel_fbo.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/enums.h"
30 #include "main/imports.h"
31 #include "main/macros.h"
32 #include "main/mfeatures.h"
33 #include "main/mtypes.h"
34 #include "main/fbobject.h"
35 #include "main/framebuffer.h"
36 #include "main/renderbuffer.h"
37 #include "main/context.h"
38 #include "main/teximage.h"
39 #include "main/image.h"
40
41 #include "swrast/swrast.h"
42 #include "drivers/common/meta.h"
43
44 #include "intel_context.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_buffers.h"
47 #include "intel_blit.h"
48 #include "intel_fbo.h"
49 #include "intel_mipmap_tree.h"
50 #include "intel_regions.h"
51 #include "intel_tex.h"
52 #include "intel_span.h"
53 #ifndef I915
54 #include "brw_context.h"
55 #endif
56
57 #define FILE_DEBUG_FLAG DEBUG_FBO
58
59 static struct gl_renderbuffer *
60 intel_new_renderbuffer(struct gl_context * ctx, GLuint name);
61
62 struct intel_region*
63 intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
64 {
65 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
66 if (irb && irb->mt) {
67 if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt)
68 return irb->mt->stencil_mt->region;
69 else
70 return irb->mt->region;
71 } else
72 return NULL;
73 }
74
75 /**
76 * Create a new framebuffer object.
77 */
78 static struct gl_framebuffer *
79 intel_new_framebuffer(struct gl_context * ctx, GLuint name)
80 {
81 /* Only drawable state in intel_framebuffer at this time, just use Mesa's
82 * class
83 */
84 return _mesa_new_framebuffer(ctx, name);
85 }
86
87
88 /** Called by gl_renderbuffer::Delete() */
89 static void
90 intel_delete_renderbuffer(struct gl_renderbuffer *rb)
91 {
92 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
93
94 ASSERT(irb);
95
96 intel_miptree_release(&irb->mt);
97
98 free(irb);
99 }
100
101 /**
102 * \see dd_function_table::MapRenderbuffer
103 */
104 static void
105 intel_map_renderbuffer(struct gl_context *ctx,
106 struct gl_renderbuffer *rb,
107 GLuint x, GLuint y, GLuint w, GLuint h,
108 GLbitfield mode,
109 GLubyte **out_map,
110 GLint *out_stride)
111 {
112 struct intel_context *intel = intel_context(ctx);
113 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
114 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
115 void *map;
116 int stride;
117
118 if (srb->Buffer) {
119 /* this is a malloc'd renderbuffer (accum buffer), not an irb */
120 GLint bpp = _mesa_get_format_bytes(rb->Format);
121 GLint rowStride = srb->RowStride;
122 *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp;
123 *out_stride = rowStride;
124 return;
125 }
126
127 /* We sometimes get called with this by our intel_span.c usage. */
128 if (!irb->mt) {
129 *out_map = NULL;
130 *out_stride = 0;
131 return;
132 }
133
134 /* For a window-system renderbuffer, we need to flip the mapping we receive
135 * upside-down. So we need to ask for a rectangle on flipped vertically, and
136 * we then return a pointer to the bottom of it with a negative stride.
137 */
138 if (rb->Name == 0) {
139 y = rb->Height - y - h;
140 }
141
142 intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
143 x, y, w, h, mode, &map, &stride);
144
145 if (rb->Name == 0) {
146 map += (h - 1) * stride;
147 stride = -stride;
148 }
149
150 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
151 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
152 x, y, w, h, map, stride);
153
154 *out_map = map;
155 *out_stride = stride;
156 }
157
158 /**
159 * \see dd_function_table::UnmapRenderbuffer
160 */
161 static void
162 intel_unmap_renderbuffer(struct gl_context *ctx,
163 struct gl_renderbuffer *rb)
164 {
165 struct intel_context *intel = intel_context(ctx);
166 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
167 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
168
169 DBG("%s: rb %d (%s)\n", __FUNCTION__,
170 rb->Name, _mesa_get_format_name(rb->Format));
171
172 if (srb->Buffer) {
173 /* this is a malloc'd renderbuffer (accum buffer) */
174 /* nothing to do */
175 return;
176 }
177
178 intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
179 }
180
181
182 /**
183 * Round up the requested multisample count to the next supported sample size.
184 */
185 unsigned
186 intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples)
187 {
188 switch (intel->gen) {
189 case 6:
190 /* Gen6 supports only 4x multisampling. */
191 if (num_samples > 0)
192 return 4;
193 else
194 return 0;
195 case 7:
196 /* Gen7 supports 4x and 8x multisampling. */
197 if (num_samples > 4)
198 return 8;
199 else if (num_samples > 0)
200 return 4;
201 else
202 return 0;
203 return 0;
204 default:
205 /* MSAA unsupported. However, a careful reading of
206 * EXT_framebuffer_multisample reveals that we need to permit
207 * num_samples to be 1 (since num_samples is permitted to be as high as
208 * GL_MAX_SAMPLES, and GL_MAX_SAMPLES must be at least 1). Since
209 * platforms before Gen6 don't support MSAA, this is safe, because
210 * multisampling won't happen anyhow.
211 */
212 if (num_samples > 0)
213 return 1;
214 return 0;
215 }
216 }
217
218
219 /**
220 * Called via glRenderbufferStorageEXT() to set the format and allocate
221 * storage for a user-created renderbuffer.
222 */
223 GLboolean
224 intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
225 GLenum internalFormat,
226 GLuint width, GLuint height)
227 {
228 struct intel_context *intel = intel_context(ctx);
229 struct intel_screen *screen = intel->intelScreen;
230 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
231 rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples);
232
233 switch (internalFormat) {
234 default:
235 /* Use the same format-choice logic as for textures.
236 * Renderbuffers aren't any different from textures for us,
237 * except they're less useful because you can't texture with
238 * them.
239 */
240 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
241 GL_NONE, GL_NONE);
242 break;
243 case GL_STENCIL_INDEX:
244 case GL_STENCIL_INDEX1_EXT:
245 case GL_STENCIL_INDEX4_EXT:
246 case GL_STENCIL_INDEX8_EXT:
247 case GL_STENCIL_INDEX16_EXT:
248 /* These aren't actual texture formats, so force them here. */
249 if (intel->has_separate_stencil) {
250 rb->Format = MESA_FORMAT_S8;
251 } else {
252 assert(!intel->must_use_separate_stencil);
253 rb->Format = MESA_FORMAT_S8_Z24;
254 }
255 break;
256 }
257
258 rb->Width = width;
259 rb->Height = height;
260 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
261
262 intel_miptree_release(&irb->mt);
263
264 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
265 _mesa_lookup_enum_by_nr(internalFormat),
266 _mesa_get_format_name(rb->Format), width, height);
267
268 if (width == 0 || height == 0)
269 return true;
270
271 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
272 width, height,
273 rb->NumSamples);
274 if (!irb->mt)
275 return false;
276
277 return true;
278 }
279
280
281 #if FEATURE_OES_EGL_image
282 static void
283 intel_image_target_renderbuffer_storage(struct gl_context *ctx,
284 struct gl_renderbuffer *rb,
285 void *image_handle)
286 {
287 struct intel_context *intel = intel_context(ctx);
288 struct intel_renderbuffer *irb;
289 __DRIscreen *screen;
290 __DRIimage *image;
291
292 screen = intel->intelScreen->driScrnPriv;
293 image = screen->dri2.image->lookupEGLImage(screen, image_handle,
294 screen->loaderPrivate);
295 if (image == NULL)
296 return;
297
298 /* __DRIimage is opaque to the core so it has to be checked here */
299 switch (image->format) {
300 case MESA_FORMAT_RGBA8888_REV:
301 _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
302 "glEGLImageTargetRenderbufferStorage(unsupported image format");
303 return;
304 break;
305 default:
306 break;
307 }
308
309 irb = intel_renderbuffer(rb);
310 intel_miptree_release(&irb->mt);
311 irb->mt = intel_miptree_create_for_region(intel,
312 GL_TEXTURE_2D,
313 image->format,
314 image->region);
315 if (!irb->mt)
316 return;
317
318 rb->InternalFormat = image->internal_format;
319 rb->Width = image->region->width;
320 rb->Height = image->region->height;
321 rb->Format = image->format;
322 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
323 image->internal_format);
324 }
325 #endif
326
327 /**
328 * Called for each hardware renderbuffer when a _window_ is resized.
329 * Just update fields.
330 * Not used for user-created renderbuffers!
331 */
332 static GLboolean
333 intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
334 GLenum internalFormat, GLuint width, GLuint height)
335 {
336 ASSERT(rb->Name == 0);
337 rb->Width = width;
338 rb->Height = height;
339 rb->InternalFormat = internalFormat;
340
341 return true;
342 }
343
344
345 static void
346 intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
347 GLuint width, GLuint height)
348 {
349 int i;
350
351 _mesa_resize_framebuffer(ctx, fb, width, height);
352
353 fb->Initialized = true; /* XXX remove someday */
354
355 if (_mesa_is_user_fbo(fb)) {
356 return;
357 }
358
359
360 /* Make sure all window system renderbuffers are up to date */
361 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
362 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
363
364 /* only resize if size is changing */
365 if (rb && (rb->Width != width || rb->Height != height)) {
366 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
367 }
368 }
369 }
370
371
372 /** Dummy function for gl_renderbuffer::AllocStorage() */
373 static GLboolean
374 intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
375 GLenum internalFormat, GLuint width, GLuint height)
376 {
377 _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
378 return false;
379 }
380
381 /**
382 * Create a new intel_renderbuffer which corresponds to an on-screen window,
383 * not a user-created renderbuffer.
384 *
385 * \param num_samples must be quantized.
386 */
387 struct intel_renderbuffer *
388 intel_create_renderbuffer(gl_format format, unsigned num_samples)
389 {
390 struct intel_renderbuffer *irb;
391 struct gl_renderbuffer *rb;
392
393 GET_CURRENT_CONTEXT(ctx);
394
395 irb = CALLOC_STRUCT(intel_renderbuffer);
396 if (!irb) {
397 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
398 return NULL;
399 }
400
401 rb = &irb->Base.Base;
402
403 _mesa_init_renderbuffer(rb, 0);
404 rb->ClassID = INTEL_RB_CLASS;
405 rb->_BaseFormat = _mesa_get_format_base_format(format);
406 rb->Format = format;
407 rb->InternalFormat = rb->_BaseFormat;
408 rb->NumSamples = num_samples;
409
410 /* intel-specific methods */
411 rb->Delete = intel_delete_renderbuffer;
412 rb->AllocStorage = intel_alloc_window_storage;
413
414 return irb;
415 }
416
417 /**
418 * Private window-system buffers (as opposed to ones shared with the display
419 * server created with intel_create_renderbuffer()) are most similar in their
420 * handling to user-created renderbuffers, but they have a resize handler that
421 * may be called at intel_update_renderbuffers() time.
422 *
423 * \param num_samples must be quantized.
424 */
425 struct intel_renderbuffer *
426 intel_create_private_renderbuffer(gl_format format, unsigned num_samples)
427 {
428 struct intel_renderbuffer *irb;
429
430 irb = intel_create_renderbuffer(format, num_samples);
431 irb->Base.Base.AllocStorage = intel_alloc_renderbuffer_storage;
432
433 return irb;
434 }
435
436 /**
437 * Create a new renderbuffer object.
438 * Typically called via glBindRenderbufferEXT().
439 */
440 static struct gl_renderbuffer *
441 intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
442 {
443 /*struct intel_context *intel = intel_context(ctx); */
444 struct intel_renderbuffer *irb;
445 struct gl_renderbuffer *rb;
446
447 irb = CALLOC_STRUCT(intel_renderbuffer);
448 if (!irb) {
449 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
450 return NULL;
451 }
452
453 rb = &irb->Base.Base;
454
455 _mesa_init_renderbuffer(rb, name);
456 rb->ClassID = INTEL_RB_CLASS;
457
458 /* intel-specific methods */
459 rb->Delete = intel_delete_renderbuffer;
460 rb->AllocStorage = intel_alloc_renderbuffer_storage;
461 /* span routines set in alloc_storage function */
462
463 return rb;
464 }
465
466
467 /**
468 * Called via glBindFramebufferEXT().
469 */
470 static void
471 intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
472 struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
473 {
474 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
475 intel_draw_buffer(ctx);
476 }
477 else {
478 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
479 }
480 }
481
482
483 /**
484 * Called via glFramebufferRenderbufferEXT().
485 */
486 static void
487 intel_framebuffer_renderbuffer(struct gl_context * ctx,
488 struct gl_framebuffer *fb,
489 GLenum attachment, struct gl_renderbuffer *rb)
490 {
491 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
492
493 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
494 intel_draw_buffer(ctx);
495 }
496
497 /**
498 * \par Special case for separate stencil
499 *
500 * When wrapping a depthstencil texture that uses separate stencil, this
501 * function is recursively called twice: once to create \c
502 * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the
503 * call to create \c irb->wrapped_depth, the \c format and \c
504 * internal_format parameters do not match \c mt->format. In that case, \c
505 * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
506 * MESA_FORMAT_X8_Z24.
507 *
508 * @return true on success
509 */
510
511 static bool
512 intel_renderbuffer_update_wrapper(struct intel_context *intel,
513 struct intel_renderbuffer *irb,
514 struct gl_texture_image *image,
515 uint32_t layer)
516 {
517 struct gl_renderbuffer *rb = &irb->Base.Base;
518 struct intel_texture_image *intel_image = intel_texture_image(image);
519 struct intel_mipmap_tree *mt = intel_image->mt;
520 int level = image->Level;
521
522 rb->Format = image->TexFormat;
523 rb->InternalFormat = image->InternalFormat;
524 rb->_BaseFormat = image->_BaseFormat;
525 rb->Width = mt->level[level].width;
526 rb->Height = mt->level[level].height;
527
528 rb->Delete = intel_delete_renderbuffer;
529 rb->AllocStorage = intel_nop_alloc_storage;
530
531 intel_miptree_check_level_layer(mt, level, layer);
532 irb->mt_level = level;
533 irb->mt_layer = layer;
534
535 intel_miptree_reference(&irb->mt, mt);
536
537 intel_renderbuffer_set_draw_offset(irb);
538
539 if (mt->hiz_mt == NULL &&
540 intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
541 intel_miptree_alloc_hiz(intel, mt, 0 /* num_samples */);
542 if (!mt->hiz_mt)
543 return false;
544 }
545
546 return true;
547 }
548
549 void
550 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
551 {
552 unsigned int dst_x, dst_y;
553
554 /* compute offset of the particular 2D image within the texture region */
555 intel_miptree_get_image_offset(irb->mt,
556 irb->mt_level,
557 0, /* face, which we ignore */
558 irb->mt_layer,
559 &dst_x, &dst_y);
560
561 irb->draw_x = dst_x;
562 irb->draw_y = dst_y;
563 }
564
565 /**
566 * Rendering to tiled buffers requires that the base address of the
567 * buffer be aligned to a page boundary. We generally render to
568 * textures by pointing the surface at the mipmap image level, which
569 * may not be aligned to a tile boundary.
570 *
571 * This function returns an appropriately-aligned base offset
572 * according to the tiling restrictions, plus any required x/y offset
573 * from there.
574 */
575 uint32_t
576 intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
577 uint32_t *tile_x,
578 uint32_t *tile_y)
579 {
580 struct intel_region *region = irb->mt->region;
581 uint32_t mask_x, mask_y;
582
583 intel_region_get_tile_masks(region, &mask_x, &mask_y);
584
585 *tile_x = irb->draw_x & mask_x;
586 *tile_y = irb->draw_y & mask_y;
587 return intel_region_get_aligned_offset(region, irb->draw_x & ~mask_x,
588 irb->draw_y & ~mask_y);
589 }
590
591 /**
592 * Called by glFramebufferTexture[123]DEXT() (and other places) to
593 * prepare for rendering into texture memory. This might be called
594 * many times to choose different texture levels, cube faces, etc
595 * before intel_finish_render_texture() is ever called.
596 */
597 static void
598 intel_render_texture(struct gl_context * ctx,
599 struct gl_framebuffer *fb,
600 struct gl_renderbuffer_attachment *att)
601 {
602 struct intel_context *intel = intel_context(ctx);
603 struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
604 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
605 struct intel_texture_image *intel_image = intel_texture_image(image);
606 struct intel_mipmap_tree *mt = intel_image->mt;
607 int layer;
608
609 (void) fb;
610
611 if (att->CubeMapFace > 0) {
612 assert(att->Zoffset == 0);
613 layer = att->CubeMapFace;
614 } else {
615 layer = att->Zoffset;
616 }
617
618 if (!intel_image->mt) {
619 /* Fallback on drawing to a texture that doesn't have a miptree
620 * (has a border, width/height 0, etc.)
621 */
622 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
623 _swrast_render_texture(ctx, fb, att);
624 return;
625 }
626 else if (!irb) {
627 intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
628
629 irb = (struct intel_renderbuffer *)intel_new_renderbuffer(ctx, ~0);
630
631 if (irb) {
632 /* bind the wrapper to the attachment point */
633 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base.Base);
634 }
635 else {
636 /* fallback to software rendering */
637 _swrast_render_texture(ctx, fb, att);
638 return;
639 }
640 }
641
642 if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
643 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
644 _swrast_render_texture(ctx, fb, att);
645 return;
646 }
647
648 irb->tex_image = image;
649
650 DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
651 _mesa_get_format_name(image->TexFormat),
652 att->Texture->Name, image->Width, image->Height,
653 irb->Base.Base.RefCount);
654
655 /* update drawing region, etc */
656 intel_draw_buffer(ctx);
657 }
658
659
660 /**
661 * Called by Mesa when rendering to a texture is done.
662 */
663 static void
664 intel_finish_render_texture(struct gl_context * ctx,
665 struct gl_renderbuffer_attachment *att)
666 {
667 struct intel_context *intel = intel_context(ctx);
668 struct gl_texture_object *tex_obj = att->Texture;
669 struct gl_texture_image *image =
670 tex_obj->Image[att->CubeMapFace][att->TextureLevel];
671 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
672
673 DBG("Finish render %s texture tex=%u\n",
674 _mesa_get_format_name(image->TexFormat), att->Texture->Name);
675
676 if (irb)
677 irb->tex_image = NULL;
678
679 /* Since we've (probably) rendered to the texture and will (likely) use
680 * it in the texture domain later on in this batchbuffer, flush the
681 * batch. Once again, we wish for a domain tracker in libdrm to cover
682 * usage inside of a batchbuffer like GEM does in the kernel.
683 */
684 intel_batchbuffer_emit_mi_flush(intel);
685 }
686
687 /**
688 * Do additional "completeness" testing of a framebuffer object.
689 */
690 static void
691 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
692 {
693 struct intel_context *intel = intel_context(ctx);
694 const struct intel_renderbuffer *depthRb =
695 intel_get_renderbuffer(fb, BUFFER_DEPTH);
696 const struct intel_renderbuffer *stencilRb =
697 intel_get_renderbuffer(fb, BUFFER_STENCIL);
698 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
699 int i;
700
701 DBG("%s() on fb %p (%s)\n", __FUNCTION__,
702 fb, (fb == ctx->DrawBuffer ? "drawbuffer" :
703 (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer")));
704
705 if (depthRb)
706 depth_mt = depthRb->mt;
707 if (stencilRb) {
708 stencil_mt = stencilRb->mt;
709 if (stencil_mt->stencil_mt)
710 stencil_mt = stencil_mt->stencil_mt;
711 }
712
713 if (depth_mt && stencil_mt) {
714 if (depth_mt == stencil_mt) {
715 /* For true packed depth/stencil (not faked on prefers-separate-stencil
716 * hardware) we need to be sure they're the same level/layer, since
717 * we'll be emitting a single packet describing the packed setup.
718 */
719 if (depthRb->mt_level != stencilRb->mt_level ||
720 depthRb->mt_layer != stencilRb->mt_layer) {
721 DBG("depth image level/layer %d/%d != stencil image %d/%d\n",
722 depthRb->mt_level,
723 depthRb->mt_layer,
724 stencilRb->mt_level,
725 stencilRb->mt_layer);
726 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
727 }
728 } else {
729 if (!intel->has_separate_stencil) {
730 DBG("separate stencil unsupported\n");
731 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
732 }
733 if (stencil_mt->format != MESA_FORMAT_S8) {
734 DBG("separate stencil is %s instead of S8\n",
735 _mesa_get_format_name(stencil_mt->format));
736 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
737 }
738 if (intel->gen < 7 && depth_mt->hiz_mt == NULL) {
739 /* Before Gen7, separate depth and stencil buffers can be used
740 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
741 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
742 * [DevSNB]: This field must be set to the same value (enabled
743 * or disabled) as Hierarchical Depth Buffer Enable.
744 */
745 DBG("separate stencil without HiZ\n");
746 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED;
747 }
748 }
749 }
750
751 for (i = 0; i < Elements(fb->Attachment); i++) {
752 struct gl_renderbuffer *rb;
753 struct intel_renderbuffer *irb;
754
755 if (fb->Attachment[i].Type == GL_NONE)
756 continue;
757
758 /* A supported attachment will have a Renderbuffer set either
759 * from being a Renderbuffer or being a texture that got the
760 * intel_wrap_texture() treatment.
761 */
762 rb = fb->Attachment[i].Renderbuffer;
763 if (rb == NULL) {
764 DBG("attachment without renderbuffer\n");
765 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
766 continue;
767 }
768
769 if (fb->Attachment[i].Type == GL_TEXTURE) {
770 const struct gl_texture_image *img =
771 _mesa_get_attachment_teximage_const(&fb->Attachment[i]);
772
773 if (img->Border) {
774 DBG("texture with border\n");
775 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
776 continue;
777 }
778 }
779
780 irb = intel_renderbuffer(rb);
781 if (irb == NULL) {
782 DBG("software rendering renderbuffer\n");
783 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
784 continue;
785 }
786
787 if (!intel->vtbl.render_target_supported(intel, rb)) {
788 DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
789 _mesa_get_format_name(intel_rb_format(irb)));
790 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
791 }
792 }
793 }
794
795 /**
796 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
797 * We can do this when the dst renderbuffer is actually a texture and
798 * there is no scaling, mirroring or scissoring.
799 *
800 * \return new buffer mask indicating the buffers left to blit using the
801 * normal path.
802 */
803 static GLbitfield
804 intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
805 GLint srcX0, GLint srcY0,
806 GLint srcX1, GLint srcY1,
807 GLint dstX0, GLint dstY0,
808 GLint dstX1, GLint dstY1,
809 GLbitfield mask, GLenum filter)
810 {
811 if (mask & GL_COLOR_BUFFER_BIT) {
812 const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
813 const struct gl_framebuffer *readFb = ctx->ReadBuffer;
814 const struct gl_renderbuffer_attachment *drawAtt =
815 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
816 struct intel_renderbuffer *srcRb =
817 intel_renderbuffer(readFb->_ColorReadBuffer);
818
819 /* If the source and destination are the same size with no
820 mirroring, the rectangles are within the size of the
821 texture and there is no scissor then we can use
822 glCopyTexSubimage2D to implement the blit. This will end
823 up as a fast hardware blit on some drivers */
824 if (srcRb && drawAtt && drawAtt->Texture &&
825 srcX0 - srcX1 == dstX0 - dstX1 &&
826 srcY0 - srcY1 == dstY0 - dstY1 &&
827 srcX1 >= srcX0 &&
828 srcY1 >= srcY0 &&
829 srcX0 >= 0 && srcX1 <= readFb->Width &&
830 srcY0 >= 0 && srcY1 <= readFb->Height &&
831 dstX0 >= 0 && dstX1 <= drawFb->Width &&
832 dstY0 >= 0 && dstY1 <= drawFb->Height &&
833 !ctx->Scissor.Enabled) {
834 const struct gl_texture_object *texObj = drawAtt->Texture;
835 const GLuint dstLevel = drawAtt->TextureLevel;
836 const GLenum target = texObj->Target;
837
838 struct gl_texture_image *texImage =
839 _mesa_select_tex_image(ctx, texObj, target, dstLevel);
840
841 if (intel_copy_texsubimage(intel_context(ctx),
842 intel_texture_image(texImage),
843 dstX0, dstY0,
844 srcRb,
845 srcX0, srcY0,
846 srcX1 - srcX0, /* width */
847 srcY1 - srcY0))
848 mask &= ~GL_COLOR_BUFFER_BIT;
849 }
850 }
851
852 return mask;
853 }
854
855 static void
856 intel_blit_framebuffer(struct gl_context *ctx,
857 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
858 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
859 GLbitfield mask, GLenum filter)
860 {
861 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
862 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
863 srcX0, srcY0, srcX1, srcY1,
864 dstX0, dstY0, dstX1, dstY1,
865 mask, filter);
866 if (mask == 0x0)
867 return;
868
869 #ifndef I915
870 mask = brw_blorp_framebuffer(intel_context(ctx),
871 srcX0, srcY0, srcX1, srcY1,
872 dstX0, dstY0, dstX1, dstY1,
873 mask, filter);
874 if (mask == 0x0)
875 return;
876 #endif
877
878 _mesa_meta_BlitFramebuffer(ctx,
879 srcX0, srcY0, srcX1, srcY1,
880 dstX0, dstY0, dstX1, dstY1,
881 mask, filter);
882 }
883
884 /**
885 * This is a no-op except on multisample buffers shared with DRI2.
886 */
887 void
888 intel_renderbuffer_set_needs_downsample(struct intel_renderbuffer *irb)
889 {
890 if (irb->mt && irb->mt->singlesample_mt)
891 irb->mt->need_downsample = true;
892 }
893
894 void
895 intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
896 {
897 if (irb->mt) {
898 intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
899 irb->mt_level,
900 irb->mt_layer);
901 }
902 }
903
904 void
905 intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
906 {
907 if (irb->mt) {
908 intel_miptree_slice_set_needs_depth_resolve(irb->mt,
909 irb->mt_level,
910 irb->mt_layer);
911 }
912 }
913
914 bool
915 intel_renderbuffer_resolve_hiz(struct intel_context *intel,
916 struct intel_renderbuffer *irb)
917 {
918 if (irb->mt)
919 return intel_miptree_slice_resolve_hiz(intel,
920 irb->mt,
921 irb->mt_level,
922 irb->mt_layer);
923
924 return false;
925 }
926
927 bool
928 intel_renderbuffer_resolve_depth(struct intel_context *intel,
929 struct intel_renderbuffer *irb)
930 {
931 if (irb->mt)
932 return intel_miptree_slice_resolve_depth(intel,
933 irb->mt,
934 irb->mt_level,
935 irb->mt_layer);
936
937 return false;
938 }
939
940 /**
941 * Do one-time context initializations related to GL_EXT_framebuffer_object.
942 * Hook in device driver functions.
943 */
944 void
945 intel_fbo_init(struct intel_context *intel)
946 {
947 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
948 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
949 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
950 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
951 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
952 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
953 intel->ctx.Driver.RenderTexture = intel_render_texture;
954 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
955 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
956 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
957 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
958
959 #if FEATURE_OES_EGL_image
960 intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
961 intel_image_target_renderbuffer_storage;
962 #endif
963 }