i965: Remove redundant check in blitter-based glBlitFramebuffer().
[mesa.git] / src / mesa / drivers / dri / i965 / intel_fbo.c
1 /**************************************************************************
2 *
3 * Copyright 2006 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/enums.h"
30 #include "main/imports.h"
31 #include "main/macros.h"
32 #include "main/mtypes.h"
33 #include "main/fbobject.h"
34 #include "main/framebuffer.h"
35 #include "main/renderbuffer.h"
36 #include "main/context.h"
37 #include "main/teximage.h"
38 #include "main/image.h"
39
40 #include "swrast/swrast.h"
41 #include "drivers/common/meta.h"
42
43 #include "intel_batchbuffer.h"
44 #include "intel_buffers.h"
45 #include "intel_blit.h"
46 #include "intel_fbo.h"
47 #include "intel_mipmap_tree.h"
48 #include "intel_regions.h"
49 #include "intel_screen.h"
50 #include "intel_tex.h"
51 #include "brw_context.h"
52
53 #define FILE_DEBUG_FLAG DEBUG_FBO
54
55 /**
56 * Create a new framebuffer object.
57 */
58 static struct gl_framebuffer *
59 intel_new_framebuffer(struct gl_context * ctx, GLuint name)
60 {
61 /* Only drawable state in intel_framebuffer at this time, just use Mesa's
62 * class
63 */
64 return _mesa_new_framebuffer(ctx, name);
65 }
66
67
68 /** Called by gl_renderbuffer::Delete() */
69 static void
70 intel_delete_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb)
71 {
72 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
73
74 ASSERT(irb);
75
76 intel_miptree_release(&irb->mt);
77
78 _mesa_delete_renderbuffer(ctx, rb);
79 }
80
81 /**
82 * \see dd_function_table::MapRenderbuffer
83 */
84 static void
85 intel_map_renderbuffer(struct gl_context *ctx,
86 struct gl_renderbuffer *rb,
87 GLuint x, GLuint y, GLuint w, GLuint h,
88 GLbitfield mode,
89 GLubyte **out_map,
90 GLint *out_stride)
91 {
92 struct brw_context *brw = brw_context(ctx);
93 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
94 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
95 void *map;
96 int stride;
97
98 if (srb->Buffer) {
99 /* this is a malloc'd renderbuffer (accum buffer), not an irb */
100 GLint bpp = _mesa_get_format_bytes(rb->Format);
101 GLint rowStride = srb->RowStride;
102 *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp;
103 *out_stride = rowStride;
104 return;
105 }
106
107 intel_prepare_render(brw);
108
109 /* For a window-system renderbuffer, we need to flip the mapping we receive
110 * upside-down. So we need to ask for a rectangle on flipped vertically, and
111 * we then return a pointer to the bottom of it with a negative stride.
112 */
113 if (rb->Name == 0) {
114 y = rb->Height - y - h;
115 }
116
117 intel_miptree_map(brw, irb->mt, irb->mt_level, irb->mt_layer,
118 x, y, w, h, mode, &map, &stride);
119
120 if (rb->Name == 0) {
121 map += (h - 1) * stride;
122 stride = -stride;
123 }
124
125 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
126 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
127 x, y, w, h, map, stride);
128
129 *out_map = map;
130 *out_stride = stride;
131 }
132
133 /**
134 * \see dd_function_table::UnmapRenderbuffer
135 */
136 static void
137 intel_unmap_renderbuffer(struct gl_context *ctx,
138 struct gl_renderbuffer *rb)
139 {
140 struct brw_context *brw = brw_context(ctx);
141 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
142 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
143
144 DBG("%s: rb %d (%s)\n", __FUNCTION__,
145 rb->Name, _mesa_get_format_name(rb->Format));
146
147 if (srb->Buffer) {
148 /* this is a malloc'd renderbuffer (accum buffer) */
149 /* nothing to do */
150 return;
151 }
152
153 intel_miptree_unmap(brw, irb->mt, irb->mt_level, irb->mt_layer);
154 }
155
156
157 /**
158 * Round up the requested multisample count to the next supported sample size.
159 */
160 unsigned
161 intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples)
162 {
163 const int *msaa_modes = intel_supported_msaa_modes(intel);
164 int quantized_samples = 0;
165
166 for (int i = 0; msaa_modes[i] != -1; ++i) {
167 if (msaa_modes[i] >= num_samples)
168 quantized_samples = msaa_modes[i];
169 else
170 break;
171 }
172
173 return quantized_samples;
174 }
175
176
177 /**
178 * Called via glRenderbufferStorageEXT() to set the format and allocate
179 * storage for a user-created renderbuffer.
180 */
181 static GLboolean
182 intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
183 GLenum internalFormat,
184 GLuint width, GLuint height)
185 {
186 struct brw_context *brw = brw_context(ctx);
187 struct intel_screen *screen = brw->intelScreen;
188 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
189 rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples);
190
191 switch (internalFormat) {
192 default:
193 /* Use the same format-choice logic as for textures.
194 * Renderbuffers aren't any different from textures for us,
195 * except they're less useful because you can't texture with
196 * them.
197 */
198 rb->Format = ctx->Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D,
199 internalFormat,
200 GL_NONE, GL_NONE);
201 break;
202 case GL_STENCIL_INDEX:
203 case GL_STENCIL_INDEX1_EXT:
204 case GL_STENCIL_INDEX4_EXT:
205 case GL_STENCIL_INDEX8_EXT:
206 case GL_STENCIL_INDEX16_EXT:
207 /* These aren't actual texture formats, so force them here. */
208 if (brw->has_separate_stencil) {
209 rb->Format = MESA_FORMAT_S_UINT8;
210 } else {
211 assert(!brw->must_use_separate_stencil);
212 rb->Format = MESA_FORMAT_Z24_UNORM_X8_UINT;
213 }
214 break;
215 }
216
217 rb->Width = width;
218 rb->Height = height;
219 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
220
221 intel_miptree_release(&irb->mt);
222
223 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
224 _mesa_lookup_enum_by_nr(internalFormat),
225 _mesa_get_format_name(rb->Format), width, height);
226
227 if (width == 0 || height == 0)
228 return true;
229
230 irb->mt = intel_miptree_create_for_renderbuffer(brw, rb->Format,
231 width, height,
232 rb->NumSamples);
233 if (!irb->mt)
234 return false;
235
236 return true;
237 }
238
239
240 static void
241 intel_image_target_renderbuffer_storage(struct gl_context *ctx,
242 struct gl_renderbuffer *rb,
243 void *image_handle)
244 {
245 struct brw_context *brw = brw_context(ctx);
246 struct intel_renderbuffer *irb;
247 __DRIscreen *screen;
248 __DRIimage *image;
249
250 screen = brw->intelScreen->driScrnPriv;
251 image = screen->dri2.image->lookupEGLImage(screen, image_handle,
252 screen->loaderPrivate);
253 if (image == NULL)
254 return;
255
256 if (image->planar_format && image->planar_format->nplanes > 1) {
257 _mesa_error(ctx, GL_INVALID_OPERATION,
258 "glEGLImageTargetRenderbufferStorage(planar buffers are not "
259 "supported as render targets.");
260 return;
261 }
262
263 /* Buffers originating from outside are for read-only. */
264 if (image->dma_buf_imported) {
265 _mesa_error(ctx, GL_INVALID_OPERATION,
266 "glEGLImageTargetRenderbufferStorage(dma buffers are read-only)");
267 return;
268 }
269
270 /* __DRIimage is opaque to the core so it has to be checked here */
271 switch (image->format) {
272 case MESA_FORMAT_R8G8B8A8_UNORM:
273 _mesa_error(ctx, GL_INVALID_OPERATION,
274 "glEGLImageTargetRenderbufferStorage(unsupported image format");
275 return;
276 break;
277 default:
278 break;
279 }
280
281 irb = intel_renderbuffer(rb);
282 intel_miptree_release(&irb->mt);
283 irb->mt = intel_miptree_create_for_bo(brw,
284 image->region->bo,
285 image->format,
286 image->offset,
287 image->region->width,
288 image->region->height,
289 image->region->pitch,
290 image->region->tiling);
291 if (!irb->mt)
292 return;
293
294 rb->InternalFormat = image->internal_format;
295 rb->Width = image->region->width;
296 rb->Height = image->region->height;
297 rb->Format = image->format;
298 rb->_BaseFormat = _mesa_base_fbo_format(ctx, image->internal_format);
299 rb->NeedsFinishRenderTexture = true;
300 }
301
302 /**
303 * Called by _mesa_resize_framebuffer() for each hardware renderbuffer when a
304 * window system framebuffer is resized.
305 *
306 * Any actual buffer reallocations for hardware renderbuffers (which would
307 * have triggered _mesa_resize_framebuffer()) were done by
308 * intel_process_dri2_buffer().
309 */
310 static GLboolean
311 intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
312 GLenum internalFormat, GLuint width, GLuint height)
313 {
314 ASSERT(rb->Name == 0);
315 rb->Width = width;
316 rb->Height = height;
317 rb->InternalFormat = internalFormat;
318
319 return true;
320 }
321
322 /** Dummy function for gl_renderbuffer::AllocStorage() */
323 static GLboolean
324 intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
325 GLenum internalFormat, GLuint width, GLuint height)
326 {
327 _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
328 return false;
329 }
330
331 /**
332 * Create a new intel_renderbuffer which corresponds to an on-screen window,
333 * not a user-created renderbuffer.
334 *
335 * \param num_samples must be quantized.
336 */
337 struct intel_renderbuffer *
338 intel_create_renderbuffer(mesa_format format, unsigned num_samples)
339 {
340 struct intel_renderbuffer *irb;
341 struct gl_renderbuffer *rb;
342
343 GET_CURRENT_CONTEXT(ctx);
344
345 irb = CALLOC_STRUCT(intel_renderbuffer);
346 if (!irb) {
347 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
348 return NULL;
349 }
350
351 rb = &irb->Base.Base;
352
353 _mesa_init_renderbuffer(rb, 0);
354 rb->ClassID = INTEL_RB_CLASS;
355 rb->_BaseFormat = _mesa_get_format_base_format(format);
356 rb->Format = format;
357 rb->InternalFormat = rb->_BaseFormat;
358 rb->NumSamples = num_samples;
359
360 /* intel-specific methods */
361 rb->Delete = intel_delete_renderbuffer;
362 rb->AllocStorage = intel_alloc_window_storage;
363
364 return irb;
365 }
366
367 /**
368 * Private window-system buffers (as opposed to ones shared with the display
369 * server created with intel_create_renderbuffer()) are most similar in their
370 * handling to user-created renderbuffers, but they have a resize handler that
371 * may be called at intel_update_renderbuffers() time.
372 *
373 * \param num_samples must be quantized.
374 */
375 struct intel_renderbuffer *
376 intel_create_private_renderbuffer(mesa_format format, unsigned num_samples)
377 {
378 struct intel_renderbuffer *irb;
379
380 irb = intel_create_renderbuffer(format, num_samples);
381 irb->Base.Base.AllocStorage = intel_alloc_renderbuffer_storage;
382
383 return irb;
384 }
385
386 /**
387 * Create a new renderbuffer object.
388 * Typically called via glBindRenderbufferEXT().
389 */
390 static struct gl_renderbuffer *
391 intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
392 {
393 struct intel_renderbuffer *irb;
394 struct gl_renderbuffer *rb;
395
396 irb = CALLOC_STRUCT(intel_renderbuffer);
397 if (!irb) {
398 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
399 return NULL;
400 }
401
402 rb = &irb->Base.Base;
403
404 _mesa_init_renderbuffer(rb, name);
405 rb->ClassID = INTEL_RB_CLASS;
406
407 /* intel-specific methods */
408 rb->Delete = intel_delete_renderbuffer;
409 rb->AllocStorage = intel_alloc_renderbuffer_storage;
410 /* span routines set in alloc_storage function */
411
412 return rb;
413 }
414
415 static bool
416 intel_renderbuffer_update_wrapper(struct brw_context *brw,
417 struct intel_renderbuffer *irb,
418 struct gl_texture_image *image,
419 uint32_t layer)
420 {
421 struct gl_renderbuffer *rb = &irb->Base.Base;
422 struct intel_texture_image *intel_image = intel_texture_image(image);
423 struct intel_mipmap_tree *mt = intel_image->mt;
424 int level = image->Level;
425
426 rb->AllocStorage = intel_nop_alloc_storage;
427
428 intel_miptree_check_level_layer(mt, level, layer);
429 irb->mt_level = level;
430
431 switch (mt->msaa_layout) {
432 case INTEL_MSAA_LAYOUT_UMS:
433 case INTEL_MSAA_LAYOUT_CMS:
434 irb->mt_layer = layer * mt->num_samples;
435 break;
436
437 default:
438 irb->mt_layer = layer;
439 }
440
441 intel_miptree_reference(&irb->mt, mt);
442
443 intel_renderbuffer_set_draw_offset(irb);
444
445 if (mt->hiz_mt == NULL && brw_is_hiz_depth_format(brw, rb->Format)) {
446 intel_miptree_alloc_hiz(brw, mt);
447 if (!mt->hiz_mt)
448 return false;
449 }
450
451 return true;
452 }
453
454 void
455 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
456 {
457 unsigned int dst_x, dst_y;
458
459 /* compute offset of the particular 2D image within the texture region */
460 intel_miptree_get_image_offset(irb->mt,
461 irb->mt_level,
462 irb->mt_layer,
463 &dst_x, &dst_y);
464
465 irb->draw_x = dst_x;
466 irb->draw_y = dst_y;
467 }
468
469 /**
470 * Called by glFramebufferTexture[123]DEXT() (and other places) to
471 * prepare for rendering into texture memory. This might be called
472 * many times to choose different texture levels, cube faces, etc
473 * before intel_finish_render_texture() is ever called.
474 */
475 static void
476 intel_render_texture(struct gl_context * ctx,
477 struct gl_framebuffer *fb,
478 struct gl_renderbuffer_attachment *att)
479 {
480 struct brw_context *brw = brw_context(ctx);
481 struct gl_renderbuffer *rb = att->Renderbuffer;
482 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
483 struct gl_texture_image *image = rb->TexImage;
484 struct intel_texture_image *intel_image = intel_texture_image(image);
485 struct intel_mipmap_tree *mt = intel_image->mt;
486 int layer;
487
488 (void) fb;
489
490 if (att->CubeMapFace > 0) {
491 assert(att->Zoffset == 0);
492 layer = att->CubeMapFace;
493 } else {
494 layer = att->Zoffset;
495 }
496
497 if (!intel_image->mt) {
498 /* Fallback on drawing to a texture that doesn't have a miptree
499 * (has a border, width/height 0, etc.)
500 */
501 _swrast_render_texture(ctx, fb, att);
502 return;
503 }
504
505 intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
506
507 if (!intel_renderbuffer_update_wrapper(brw, irb, image, layer)) {
508 _swrast_render_texture(ctx, fb, att);
509 return;
510 }
511
512 DBG("Begin render %s texture tex=%u w=%d h=%d d=%d refcount=%d\n",
513 _mesa_get_format_name(image->TexFormat),
514 att->Texture->Name, image->Width, image->Height, image->Depth,
515 rb->RefCount);
516 }
517
518
519 /**
520 * Called by Mesa when rendering to a texture is done.
521 */
522 static void
523 intel_finish_render_texture(struct gl_context * ctx, struct gl_renderbuffer *rb)
524 {
525 struct brw_context *brw = brw_context(ctx);
526
527 DBG("Finish render %s texture\n", _mesa_get_format_name(rb->Format));
528
529 /* Since we've (probably) rendered to the texture and will (likely) use
530 * it in the texture domain later on in this batchbuffer, flush the
531 * batch. Once again, we wish for a domain tracker in libdrm to cover
532 * usage inside of a batchbuffer like GEM does in the kernel.
533 */
534 intel_batchbuffer_emit_mi_flush(brw);
535 }
536
537 #define fbo_incomplete(fb, ...) do { \
538 static GLuint msg_id = 0; \
539 if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \
540 _mesa_gl_debug(ctx, &msg_id, \
541 MESA_DEBUG_TYPE_OTHER, \
542 MESA_DEBUG_SEVERITY_MEDIUM, \
543 __VA_ARGS__); \
544 } \
545 DBG(__VA_ARGS__); \
546 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; \
547 } while (0)
548
549 /**
550 * Do additional "completeness" testing of a framebuffer object.
551 */
552 static void
553 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
554 {
555 struct brw_context *brw = brw_context(ctx);
556 struct intel_renderbuffer *depthRb =
557 intel_get_renderbuffer(fb, BUFFER_DEPTH);
558 struct intel_renderbuffer *stencilRb =
559 intel_get_renderbuffer(fb, BUFFER_STENCIL);
560 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
561 int i;
562
563 DBG("%s() on fb %p (%s)\n", __FUNCTION__,
564 fb, (fb == ctx->DrawBuffer ? "drawbuffer" :
565 (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer")));
566
567 if (depthRb)
568 depth_mt = depthRb->mt;
569 if (stencilRb) {
570 stencil_mt = stencilRb->mt;
571 if (stencil_mt->stencil_mt)
572 stencil_mt = stencil_mt->stencil_mt;
573 }
574
575 if (depth_mt && stencil_mt) {
576 if (brw->gen >= 7) {
577 /* For gen >= 7, we are using the lod/minimum-array-element fields
578 * and supportting layered rendering. This means that we must restrict
579 * the depth & stencil attachments to match in various more retrictive
580 * ways. (width, height, depth, LOD and layer)
581 */
582 if (depth_mt->physical_width0 != stencil_mt->physical_width0 ||
583 depth_mt->physical_height0 != stencil_mt->physical_height0 ||
584 depth_mt->physical_depth0 != stencil_mt->physical_depth0 ||
585 depthRb->mt_level != stencilRb->mt_level ||
586 depthRb->mt_layer != stencilRb->mt_layer) {
587 fbo_incomplete(fb,
588 "FBO incomplete: depth and stencil must match in"
589 "width, height, depth, LOD and layer\n");
590 }
591 }
592 if (depth_mt == stencil_mt) {
593 /* For true packed depth/stencil (not faked on prefers-separate-stencil
594 * hardware) we need to be sure they're the same level/layer, since
595 * we'll be emitting a single packet describing the packed setup.
596 */
597 if (depthRb->mt_level != stencilRb->mt_level ||
598 depthRb->mt_layer != stencilRb->mt_layer) {
599 fbo_incomplete(fb,
600 "FBO incomplete: depth image level/layer %d/%d != "
601 "stencil image %d/%d\n",
602 depthRb->mt_level,
603 depthRb->mt_layer,
604 stencilRb->mt_level,
605 stencilRb->mt_layer);
606 }
607 } else {
608 if (!brw->has_separate_stencil) {
609 fbo_incomplete(fb, "FBO incomplete: separate stencil "
610 "unsupported\n");
611 }
612 if (stencil_mt->format != MESA_FORMAT_S_UINT8) {
613 fbo_incomplete(fb, "FBO incomplete: separate stencil is %s "
614 "instead of S8\n",
615 _mesa_get_format_name(stencil_mt->format));
616 }
617 if (brw->gen < 7 && !intel_renderbuffer_has_hiz(depthRb)) {
618 /* Before Gen7, separate depth and stencil buffers can be used
619 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
620 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
621 * [DevSNB]: This field must be set to the same value (enabled
622 * or disabled) as Hierarchical Depth Buffer Enable.
623 */
624 fbo_incomplete(fb, "FBO incomplete: separate stencil "
625 "without HiZ\n");
626 }
627 }
628 }
629
630 for (i = 0; i < Elements(fb->Attachment); i++) {
631 struct gl_renderbuffer *rb;
632 struct intel_renderbuffer *irb;
633
634 if (fb->Attachment[i].Type == GL_NONE)
635 continue;
636
637 /* A supported attachment will have a Renderbuffer set either
638 * from being a Renderbuffer or being a texture that got the
639 * intel_wrap_texture() treatment.
640 */
641 rb = fb->Attachment[i].Renderbuffer;
642 if (rb == NULL) {
643 fbo_incomplete(fb, "FBO incomplete: attachment without "
644 "renderbuffer\n");
645 continue;
646 }
647
648 if (fb->Attachment[i].Type == GL_TEXTURE) {
649 if (rb->TexImage->Border) {
650 fbo_incomplete(fb, "FBO incomplete: texture with border\n");
651 continue;
652 }
653 }
654
655 irb = intel_renderbuffer(rb);
656 if (irb == NULL) {
657 fbo_incomplete(fb, "FBO incomplete: software rendering "
658 "renderbuffer\n");
659 continue;
660 }
661
662 if (!brw_render_target_supported(brw, rb)) {
663 fbo_incomplete(fb, "FBO incomplete: Unsupported HW "
664 "texture/renderbuffer format attached: %s\n",
665 _mesa_get_format_name(intel_rb_format(irb)));
666 }
667 }
668 }
669
670 /**
671 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
672 * We can do this when the dst renderbuffer is actually a texture and
673 * there is no scaling, mirroring or scissoring.
674 *
675 * \return new buffer mask indicating the buffers left to blit using the
676 * normal path.
677 */
678 static GLbitfield
679 intel_blit_framebuffer_with_blitter(struct gl_context *ctx,
680 GLint srcX0, GLint srcY0,
681 GLint srcX1, GLint srcY1,
682 GLint dstX0, GLint dstY0,
683 GLint dstX1, GLint dstY1,
684 GLbitfield mask, GLenum filter)
685 {
686 struct brw_context *brw = brw_context(ctx);
687
688 /* Sync up the state of window system buffers. We need to do this before
689 * we go looking for the buffers.
690 */
691 intel_prepare_render(brw);
692
693 if (mask & GL_COLOR_BUFFER_BIT) {
694 GLint i;
695 const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
696 const struct gl_framebuffer *readFb = ctx->ReadBuffer;
697 struct gl_renderbuffer *src_rb = readFb->_ColorReadBuffer;
698 struct intel_renderbuffer *src_irb = intel_renderbuffer(src_rb);
699
700 if (!src_irb) {
701 perf_debug("glBlitFramebuffer(): missing src renderbuffer. "
702 "Falling back to software rendering.\n");
703 return mask;
704 }
705
706 /* If the source and destination are the same size with no mirroring,
707 * the rectangles are within the size of the texture and there is no
708 * scissor, then we can probably use the blit engine.
709 */
710 if (!(srcX0 - srcX1 == dstX0 - dstX1 &&
711 srcY0 - srcY1 == dstY0 - dstY1 &&
712 srcX1 >= srcX0 &&
713 srcY1 >= srcY0 &&
714 srcX0 >= 0 && srcX1 <= readFb->Width &&
715 srcY0 >= 0 && srcY1 <= readFb->Height &&
716 dstX0 >= 0 && dstX1 <= drawFb->Width &&
717 dstY0 >= 0 && dstY1 <= drawFb->Height &&
718 !(ctx->Scissor.EnableFlags))) {
719 perf_debug("glBlitFramebuffer(): non-1:1 blit. "
720 "Falling back to software rendering.\n");
721 return mask;
722 }
723
724 /* Blit to all active draw buffers. We don't do any pre-checking,
725 * because we assume that copying to MRTs is rare, and failure midway
726 * through copying is even more rare. Even if it was to occur, it's
727 * safe to let meta start the copy over from scratch, because
728 * glBlitFramebuffer completely overwrites the destination pixels, and
729 * results are undefined if any destination pixels have a dependency on
730 * source pixels.
731 */
732 for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
733 struct gl_renderbuffer *dst_rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
734 struct intel_renderbuffer *dst_irb = intel_renderbuffer(dst_rb);
735
736 if (!dst_irb) {
737 perf_debug("glBlitFramebuffer(): missing dst renderbuffer. "
738 "Falling back to software rendering.\n");
739 return mask;
740 }
741
742 if (!intel_miptree_blit(brw,
743 src_irb->mt,
744 src_irb->mt_level, src_irb->mt_layer,
745 srcX0, srcY0, src_rb->Name == 0,
746 dst_irb->mt,
747 dst_irb->mt_level, dst_irb->mt_layer,
748 dstX0, dstY0, dst_rb->Name == 0,
749 dstX1 - dstX0, dstY1 - dstY0, GL_COPY)) {
750 perf_debug("glBlitFramebuffer(): unknown blit failure. "
751 "Falling back to software rendering.\n");
752 return mask;
753 }
754 }
755
756 mask &= ~GL_COLOR_BUFFER_BIT;
757 }
758
759 return mask;
760 }
761
762 static void
763 intel_blit_framebuffer(struct gl_context *ctx,
764 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
765 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
766 GLbitfield mask, GLenum filter)
767 {
768 mask = brw_blorp_framebuffer(brw_context(ctx),
769 srcX0, srcY0, srcX1, srcY1,
770 dstX0, dstY0, dstX1, dstY1,
771 mask, filter);
772 if (mask == 0x0)
773 return;
774
775 /* Try using the BLT engine. */
776 mask = intel_blit_framebuffer_with_blitter(ctx,
777 srcX0, srcY0, srcX1, srcY1,
778 dstX0, dstY0, dstX1, dstY1,
779 mask, filter);
780 if (mask == 0x0)
781 return;
782
783
784 _mesa_meta_BlitFramebuffer(ctx,
785 srcX0, srcY0, srcX1, srcY1,
786 dstX0, dstY0, dstX1, dstY1,
787 mask, filter);
788 }
789
790 /**
791 * This is a no-op except on multisample buffers shared with DRI2.
792 */
793 void
794 intel_renderbuffer_set_needs_downsample(struct intel_renderbuffer *irb)
795 {
796 if (irb->mt && irb->mt->singlesample_mt)
797 irb->mt->need_downsample = true;
798 }
799
800 /**
801 * Does the renderbuffer have hiz enabled?
802 */
803 bool
804 intel_renderbuffer_has_hiz(struct intel_renderbuffer *irb)
805 {
806 return intel_miptree_slice_has_hiz(irb->mt, irb->mt_level, irb->mt_layer);
807 }
808
809 bool
810 intel_renderbuffer_resolve_hiz(struct brw_context *brw,
811 struct intel_renderbuffer *irb)
812 {
813 if (irb->mt)
814 return intel_miptree_slice_resolve_hiz(brw,
815 irb->mt,
816 irb->mt_level,
817 irb->mt_layer);
818
819 return false;
820 }
821
822 void
823 intel_renderbuffer_att_set_needs_depth_resolve(struct gl_renderbuffer_attachment *att)
824 {
825 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
826 if (irb->mt) {
827 if (att->Layered) {
828 intel_miptree_set_all_slices_need_depth_resolve(irb->mt, irb->mt_level);
829 } else {
830 intel_miptree_slice_set_needs_depth_resolve(irb->mt,
831 irb->mt_level,
832 irb->mt_layer);
833 }
834 }
835 }
836
837 bool
838 intel_renderbuffer_resolve_depth(struct brw_context *brw,
839 struct intel_renderbuffer *irb)
840 {
841 if (irb->mt)
842 return intel_miptree_slice_resolve_depth(brw,
843 irb->mt,
844 irb->mt_level,
845 irb->mt_layer);
846
847 return false;
848 }
849
850 void
851 intel_renderbuffer_move_to_temp(struct brw_context *brw,
852 struct intel_renderbuffer *irb,
853 bool invalidate)
854 {
855 struct gl_renderbuffer *rb =&irb->Base.Base;
856 struct intel_texture_image *intel_image = intel_texture_image(rb->TexImage);
857 struct intel_mipmap_tree *new_mt;
858 int width, height, depth;
859
860 intel_miptree_get_dimensions_for_image(rb->TexImage, &width, &height, &depth);
861
862 new_mt = intel_miptree_create(brw, rb->TexImage->TexObject->Target,
863 intel_image->base.Base.TexFormat,
864 intel_image->base.Base.Level,
865 intel_image->base.Base.Level,
866 width, height, depth,
867 true,
868 irb->mt->num_samples,
869 INTEL_MIPTREE_TILING_ANY);
870
871 if (brw_is_hiz_depth_format(brw, new_mt->format)) {
872 intel_miptree_alloc_hiz(brw, new_mt);
873 }
874
875 intel_miptree_copy_teximage(brw, intel_image, new_mt, invalidate);
876
877 intel_miptree_reference(&irb->mt, intel_image->mt);
878 intel_renderbuffer_set_draw_offset(irb);
879 intel_miptree_release(&new_mt);
880 }
881
882 /**
883 * Do one-time context initializations related to GL_EXT_framebuffer_object.
884 * Hook in device driver functions.
885 */
886 void
887 intel_fbo_init(struct brw_context *brw)
888 {
889 struct dd_function_table *dd = &brw->ctx.Driver;
890 dd->NewFramebuffer = intel_new_framebuffer;
891 dd->NewRenderbuffer = intel_new_renderbuffer;
892 dd->MapRenderbuffer = intel_map_renderbuffer;
893 dd->UnmapRenderbuffer = intel_unmap_renderbuffer;
894 dd->RenderTexture = intel_render_texture;
895 dd->FinishRenderTexture = intel_finish_render_texture;
896 dd->ValidateFramebuffer = intel_validate_framebuffer;
897 dd->BlitFramebuffer = intel_blit_framebuffer;
898 dd->EGLImageTargetRenderbufferStorage =
899 intel_image_target_renderbuffer_storage;
900 }