a5d5c5832fb1a2b96cb1a70b30d0e943bfb393a4
[mesa.git] / src / mesa / drivers / dri / i915 / intel_fbo.c
1 /**************************************************************************
2 *
3 * Copyright 2006 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/enums.h"
30 #include "main/imports.h"
31 #include "main/macros.h"
32 #include "main/mtypes.h"
33 #include "main/fbobject.h"
34 #include "main/framebuffer.h"
35 #include "main/renderbuffer.h"
36 #include "main/context.h"
37 #include "main/teximage.h"
38 #include "main/image.h"
39
40 #include "swrast/swrast.h"
41 #include "drivers/common/meta.h"
42
43 #include "intel_context.h"
44 #include "intel_batchbuffer.h"
45 #include "intel_buffers.h"
46 #include "intel_blit.h"
47 #include "intel_fbo.h"
48 #include "intel_mipmap_tree.h"
49 #include "intel_regions.h"
50 #include "intel_tex.h"
51
52 #define FILE_DEBUG_FLAG DEBUG_FBO
53
54 static struct gl_renderbuffer *
55 intel_new_renderbuffer(struct gl_context * ctx, GLuint name);
56
57 struct intel_region*
58 intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
59 {
60 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
61 if (irb && irb->mt)
62 return irb->mt->region;
63 else
64 return NULL;
65 }
66
67 /**
68 * Create a new framebuffer object.
69 */
70 static struct gl_framebuffer *
71 intel_new_framebuffer(struct gl_context * ctx, GLuint name)
72 {
73 /* Only drawable state in intel_framebuffer at this time, just use Mesa's
74 * class
75 */
76 return _mesa_new_framebuffer(ctx, name);
77 }
78
79
80 /** Called by gl_renderbuffer::Delete() */
81 static void
82 intel_delete_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb)
83 {
84 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
85
86 assert(irb);
87
88 intel_miptree_release(&irb->mt);
89
90 _mesa_delete_renderbuffer(ctx, rb);
91 }
92
93 /**
94 * \see dd_function_table::MapRenderbuffer
95 */
96 static void
97 intel_map_renderbuffer(struct gl_context *ctx,
98 struct gl_renderbuffer *rb,
99 GLuint x, GLuint y, GLuint w, GLuint h,
100 GLbitfield mode,
101 GLubyte **out_map,
102 GLint *out_stride)
103 {
104 struct intel_context *intel = intel_context(ctx);
105 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
106 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
107 void *map;
108 int stride;
109
110 if (srb->Buffer) {
111 /* this is a malloc'd renderbuffer (accum buffer), not an irb */
112 GLint bpp = _mesa_get_format_bytes(rb->Format);
113 GLint rowStride = srb->RowStride;
114 *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp;
115 *out_stride = rowStride;
116 return;
117 }
118
119 intel_prepare_render(intel);
120
121 /* For a window-system renderbuffer, we need to flip the mapping we receive
122 * upside-down. So we need to ask for a rectangle on flipped vertically, and
123 * we then return a pointer to the bottom of it with a negative stride.
124 */
125 if (rb->Name == 0) {
126 y = rb->Height - y - h;
127 }
128
129 intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
130 x, y, w, h, mode, &map, &stride);
131
132 if (rb->Name == 0) {
133 map += (h - 1) * stride;
134 stride = -stride;
135 }
136
137 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
138 __func__, rb->Name, _mesa_get_format_name(rb->Format),
139 x, y, w, h, map, stride);
140
141 *out_map = map;
142 *out_stride = stride;
143 }
144
145 /**
146 * \see dd_function_table::UnmapRenderbuffer
147 */
148 static void
149 intel_unmap_renderbuffer(struct gl_context *ctx,
150 struct gl_renderbuffer *rb)
151 {
152 struct intel_context *intel = intel_context(ctx);
153 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
154 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
155
156 DBG("%s: rb %d (%s)\n", __func__,
157 rb->Name, _mesa_get_format_name(rb->Format));
158
159 if (srb->Buffer) {
160 /* this is a malloc'd renderbuffer (accum buffer) */
161 /* nothing to do */
162 return;
163 }
164
165 intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
166 }
167
168 static mesa_format
169 intel_renderbuffer_format(struct gl_context * ctx, GLenum internalFormat)
170 {
171 struct intel_context *intel = intel_context(ctx);
172
173 switch (internalFormat) {
174 default:
175 /* Use the same format-choice logic as for textures.
176 * Renderbuffers aren't any different from textures for us,
177 * except they're less useful because you can't texture with
178 * them.
179 */
180 return intel->ctx.Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D,
181 internalFormat,
182 GL_NONE, GL_NONE);
183
184 case GL_DEPTH_COMPONENT16:
185 return MESA_FORMAT_Z_UNORM16;
186 case GL_DEPTH_COMPONENT:
187 case GL_DEPTH_COMPONENT24:
188 case GL_DEPTH_COMPONENT32:
189 return MESA_FORMAT_Z24_UNORM_X8_UINT;
190 case GL_DEPTH_STENCIL_EXT:
191 case GL_DEPTH24_STENCIL8_EXT:
192 case GL_STENCIL_INDEX:
193 case GL_STENCIL_INDEX1_EXT:
194 case GL_STENCIL_INDEX4_EXT:
195 case GL_STENCIL_INDEX8_EXT:
196 case GL_STENCIL_INDEX16_EXT:
197 /* These aren't actual texture formats, so force them here. */
198 return MESA_FORMAT_Z24_UNORM_S8_UINT;
199 }
200 }
201
202 static GLboolean
203 intel_alloc_private_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
204 GLenum internalFormat,
205 GLuint width, GLuint height)
206 {
207 struct intel_context *intel = intel_context(ctx);
208 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
209
210 assert(rb->Format != MESA_FORMAT_NONE);
211
212 rb->Width = width;
213 rb->Height = height;
214 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
215
216 intel_miptree_release(&irb->mt);
217
218 DBG("%s: %s: %s (%dx%d)\n", __func__,
219 _mesa_lookup_enum_by_nr(internalFormat),
220 _mesa_get_format_name(rb->Format), width, height);
221
222 if (width == 0 || height == 0)
223 return true;
224
225 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
226 width, height);
227 if (!irb->mt)
228 return false;
229
230 return true;
231 }
232
233 /**
234 * Called via glRenderbufferStorageEXT() to set the format and allocate
235 * storage for a user-created renderbuffer.
236 */
237 static GLboolean
238 intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
239 GLenum internalFormat,
240 GLuint width, GLuint height)
241 {
242 rb->Format = intel_renderbuffer_format(ctx, internalFormat);
243 return intel_alloc_private_renderbuffer_storage(ctx, rb, internalFormat, width, height);
244 }
245
246 static void
247 intel_image_target_renderbuffer_storage(struct gl_context *ctx,
248 struct gl_renderbuffer *rb,
249 void *image_handle)
250 {
251 struct intel_context *intel = intel_context(ctx);
252 struct intel_renderbuffer *irb;
253 __DRIscreen *screen;
254 __DRIimage *image;
255
256 screen = intel->intelScreen->driScrnPriv;
257 image = screen->dri2.image->lookupEGLImage(screen, image_handle,
258 screen->loaderPrivate);
259 if (image == NULL)
260 return;
261
262 /* __DRIimage is opaque to the core so it has to be checked here */
263 switch (image->format) {
264 case MESA_FORMAT_R8G8B8A8_UNORM:
265 _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
266 "glEGLImageTargetRenderbufferStorage(unsupported image format");
267 return;
268 break;
269 default:
270 break;
271 }
272
273 irb = intel_renderbuffer(rb);
274 intel_miptree_release(&irb->mt);
275 irb->mt = intel_miptree_create_for_bo(intel,
276 image->region->bo,
277 image->format,
278 image->offset,
279 image->region->width,
280 image->region->height,
281 image->region->pitch,
282 image->region->tiling);
283 if (!irb->mt)
284 return;
285
286 rb->InternalFormat = image->internal_format;
287 rb->Width = image->region->width;
288 rb->Height = image->region->height;
289 rb->Format = image->format;
290 rb->_BaseFormat = _mesa_get_format_base_format(image->format);
291 rb->NeedsFinishRenderTexture = true;
292 }
293
294 /**
295 * Called by _mesa_resize_framebuffer() for each hardware renderbuffer when a
296 * window system framebuffer is resized.
297 *
298 * Any actual buffer reallocations for hardware renderbuffers (which would
299 * have triggered _mesa_resize_framebuffer()) were done by
300 * intel_process_dri2_buffer().
301 */
302 static GLboolean
303 intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
304 GLenum internalFormat, GLuint width, GLuint height)
305 {
306 assert(rb->Name == 0);
307 rb->Width = width;
308 rb->Height = height;
309 rb->InternalFormat = internalFormat;
310
311 return true;
312 }
313
314 /** Dummy function for gl_renderbuffer::AllocStorage() */
315 static GLboolean
316 intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
317 GLenum internalFormat, GLuint width, GLuint height)
318 {
319 _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
320 return false;
321 }
322
323 /**
324 * Create a new intel_renderbuffer which corresponds to an on-screen window,
325 * not a user-created renderbuffer.
326 */
327 struct intel_renderbuffer *
328 intel_create_renderbuffer(mesa_format format)
329 {
330 struct intel_renderbuffer *irb;
331 struct gl_renderbuffer *rb;
332
333 GET_CURRENT_CONTEXT(ctx);
334
335 irb = CALLOC_STRUCT(intel_renderbuffer);
336 if (!irb) {
337 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
338 return NULL;
339 }
340
341 rb = &irb->Base.Base;
342
343 _mesa_init_renderbuffer(rb, 0);
344 rb->ClassID = INTEL_RB_CLASS;
345 rb->_BaseFormat = _mesa_get_format_base_format(format);
346 rb->Format = format;
347 rb->InternalFormat = rb->_BaseFormat;
348
349 /* intel-specific methods */
350 rb->Delete = intel_delete_renderbuffer;
351 rb->AllocStorage = intel_alloc_window_storage;
352
353 return irb;
354 }
355
356 /**
357 * Private window-system buffers (as opposed to ones shared with the display
358 * server created with intel_create_renderbuffer()) are most similar in their
359 * handling to user-created renderbuffers, but they have a resize handler that
360 * may be called at intel_update_renderbuffers() time.
361 */
362 struct intel_renderbuffer *
363 intel_create_private_renderbuffer(mesa_format format)
364 {
365 struct intel_renderbuffer *irb;
366
367 irb = intel_create_renderbuffer(format);
368 irb->Base.Base.AllocStorage = intel_alloc_private_renderbuffer_storage;
369
370 return irb;
371 }
372
373 /**
374 * Create a new renderbuffer object.
375 * Typically called via glBindRenderbufferEXT().
376 */
377 static struct gl_renderbuffer *
378 intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
379 {
380 /*struct intel_context *intel = intel_context(ctx); */
381 struct intel_renderbuffer *irb;
382 struct gl_renderbuffer *rb;
383
384 irb = CALLOC_STRUCT(intel_renderbuffer);
385 if (!irb) {
386 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
387 return NULL;
388 }
389
390 rb = &irb->Base.Base;
391
392 _mesa_init_renderbuffer(rb, name);
393 rb->ClassID = INTEL_RB_CLASS;
394
395 /* intel-specific methods */
396 rb->Delete = intel_delete_renderbuffer;
397 rb->AllocStorage = intel_alloc_renderbuffer_storage;
398 /* span routines set in alloc_storage function */
399
400 return rb;
401 }
402
403
404 /**
405 * Called via glBindFramebufferEXT().
406 */
407 static void
408 intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
409 struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
410 {
411 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
412 intel_draw_buffer(ctx);
413 }
414 else {
415 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
416 }
417 }
418
419
420 /**
421 * Called via glFramebufferRenderbufferEXT().
422 */
423 static void
424 intel_framebuffer_renderbuffer(struct gl_context * ctx,
425 struct gl_framebuffer *fb,
426 GLenum attachment, struct gl_renderbuffer *rb)
427 {
428 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
429
430 _mesa_FramebufferRenderbuffer_sw(ctx, fb, attachment, rb);
431 intel_draw_buffer(ctx);
432 }
433
434 static bool
435 intel_renderbuffer_update_wrapper(struct intel_context *intel,
436 struct intel_renderbuffer *irb,
437 struct gl_texture_image *image,
438 uint32_t layer)
439 {
440 struct gl_renderbuffer *rb = &irb->Base.Base;
441 struct intel_texture_image *intel_image = intel_texture_image(image);
442 struct intel_mipmap_tree *mt = intel_image->mt;
443 int level = image->Level;
444
445 rb->AllocStorage = intel_nop_alloc_storage;
446
447 intel_miptree_check_level_layer(mt, level, layer);
448 irb->mt_level = level;
449 irb->mt_layer = layer;
450
451 intel_miptree_reference(&irb->mt, mt);
452
453 intel_renderbuffer_set_draw_offset(irb);
454
455 return true;
456 }
457
458 void
459 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
460 {
461 unsigned int dst_x, dst_y;
462
463 /* compute offset of the particular 2D image within the texture region */
464 intel_miptree_get_image_offset(irb->mt,
465 irb->mt_level,
466 irb->mt_layer,
467 &dst_x, &dst_y);
468
469 irb->draw_x = dst_x;
470 irb->draw_y = dst_y;
471 }
472
473 /**
474 * Called by glFramebufferTexture[123]DEXT() (and other places) to
475 * prepare for rendering into texture memory. This might be called
476 * many times to choose different texture levels, cube faces, etc
477 * before intel_finish_render_texture() is ever called.
478 */
479 static void
480 intel_render_texture(struct gl_context * ctx,
481 struct gl_framebuffer *fb,
482 struct gl_renderbuffer_attachment *att)
483 {
484 struct intel_context *intel = intel_context(ctx);
485 struct gl_renderbuffer *rb = att->Renderbuffer;
486 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
487 struct gl_texture_image *image = rb->TexImage;
488 struct intel_texture_image *intel_image = intel_texture_image(image);
489 struct intel_mipmap_tree *mt = intel_image->mt;
490 int layer;
491
492 (void) fb;
493
494 if (att->CubeMapFace > 0) {
495 assert(att->Zoffset == 0);
496 layer = att->CubeMapFace;
497 } else {
498 layer = att->Zoffset;
499 }
500
501 if (!intel_image->mt) {
502 /* Fallback on drawing to a texture that doesn't have a miptree
503 * (has a border, width/height 0, etc.)
504 */
505 _swrast_render_texture(ctx, fb, att);
506 return;
507 }
508
509 intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
510
511 if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
512 _swrast_render_texture(ctx, fb, att);
513 return;
514 }
515
516 DBG("Begin render %s texture tex=%u w=%d h=%d d=%d refcount=%d\n",
517 _mesa_get_format_name(image->TexFormat),
518 att->Texture->Name, image->Width, image->Height, image->Depth,
519 rb->RefCount);
520
521 /* update drawing region, etc */
522 intel_draw_buffer(ctx);
523 }
524
525
526 /**
527 * Called by Mesa when rendering to a texture is done.
528 */
529 static void
530 intel_finish_render_texture(struct gl_context * ctx, struct gl_renderbuffer *rb)
531 {
532 struct intel_context *intel = intel_context(ctx);
533
534 DBG("Finish render %s texture\n", _mesa_get_format_name(rb->Format));
535
536 /* Since we've (probably) rendered to the texture and will (likely) use
537 * it in the texture domain later on in this batchbuffer, flush the
538 * batch. Once again, we wish for a domain tracker in libdrm to cover
539 * usage inside of a batchbuffer like GEM does in the kernel.
540 */
541 intel_batchbuffer_emit_mi_flush(intel);
542 }
543
544 #define fbo_incomplete(fb, ...) do { \
545 static GLuint msg_id = 0; \
546 if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \
547 _mesa_gl_debug(ctx, &msg_id, \
548 MESA_DEBUG_SOURCE_API, \
549 MESA_DEBUG_TYPE_OTHER, \
550 MESA_DEBUG_SEVERITY_MEDIUM, \
551 __VA_ARGS__); \
552 } \
553 DBG(__VA_ARGS__); \
554 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; \
555 } while (0)
556
557 /**
558 * Do additional "completeness" testing of a framebuffer object.
559 */
560 static void
561 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
562 {
563 struct intel_context *intel = intel_context(ctx);
564 struct intel_renderbuffer *depthRb =
565 intel_get_renderbuffer(fb, BUFFER_DEPTH);
566 struct intel_renderbuffer *stencilRb =
567 intel_get_renderbuffer(fb, BUFFER_STENCIL);
568 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
569 int i;
570
571 DBG("%s() on fb %p (%s)\n", __func__,
572 fb, (fb == ctx->DrawBuffer ? "drawbuffer" :
573 (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer")));
574
575 if (depthRb)
576 depth_mt = depthRb->mt;
577 if (stencilRb)
578 stencil_mt = stencilRb->mt;
579
580 if (depth_mt && stencil_mt) {
581 /* Make sure that the depth and stencil buffers are actually the same
582 * slice of the same miptree, since we only support packed
583 * depth/stencil.
584 */
585 if (depth_mt == stencil_mt) {
586 if (depthRb->mt_level != stencilRb->mt_level ||
587 depthRb->mt_layer != stencilRb->mt_layer) {
588 fbo_incomplete(fb,
589 "FBO incomplete: depth image level/layer %d/%d != "
590 "stencil image %d/%d\n",
591 depthRb->mt_level,
592 depthRb->mt_layer,
593 stencilRb->mt_level,
594 stencilRb->mt_layer);
595 }
596 } else {
597 fbo_incomplete(fb, "FBO incomplete: separate stencil unsupported\n");
598 }
599 }
600
601 for (i = 0; i < ARRAY_SIZE(fb->Attachment); i++) {
602 struct gl_renderbuffer *rb;
603 struct intel_renderbuffer *irb;
604
605 if (fb->Attachment[i].Type == GL_NONE)
606 continue;
607
608 /* A supported attachment will have a Renderbuffer set either
609 * from being a Renderbuffer or being a texture that got the
610 * intel_wrap_texture() treatment.
611 */
612 rb = fb->Attachment[i].Renderbuffer;
613 if (rb == NULL) {
614 fbo_incomplete(fb, "FBO incomplete: attachment without "
615 "renderbuffer\n");
616 continue;
617 }
618
619 if (fb->Attachment[i].Type == GL_TEXTURE) {
620 if (rb->TexImage->Border) {
621 fbo_incomplete(fb, "FBO incomplete: texture with border\n");
622 continue;
623 }
624 }
625
626 irb = intel_renderbuffer(rb);
627 if (irb == NULL) {
628 fbo_incomplete(fb, "FBO incomplete: software rendering "
629 "renderbuffer\n");
630 continue;
631 }
632
633 if (!intel->vtbl.render_target_supported(intel, rb)) {
634 fbo_incomplete(fb, "FBO incomplete: Unsupported HW "
635 "texture/renderbuffer format attached: %s\n",
636 _mesa_get_format_name(intel_rb_format(irb)));
637 }
638 }
639 }
640
641 /**
642 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
643 * We can do this when the dst renderbuffer is actually a texture and
644 * there is no scaling, mirroring or scissoring.
645 *
646 * \return new buffer mask indicating the buffers left to blit using the
647 * normal path.
648 */
649 static GLbitfield
650 intel_blit_framebuffer_with_blitter(struct gl_context *ctx,
651 const struct gl_framebuffer *readFb,
652 const struct gl_framebuffer *drawFb,
653 GLint srcX0, GLint srcY0,
654 GLint srcX1, GLint srcY1,
655 GLint dstX0, GLint dstY0,
656 GLint dstX1, GLint dstY1,
657 GLbitfield mask, GLenum filter)
658 {
659 struct intel_context *intel = intel_context(ctx);
660
661 if (mask & GL_COLOR_BUFFER_BIT) {
662 GLint i;
663 struct gl_renderbuffer *src_rb = readFb->_ColorReadBuffer;
664 struct intel_renderbuffer *src_irb = intel_renderbuffer(src_rb);
665
666 if (!src_irb) {
667 perf_debug("glBlitFramebuffer(): missing src renderbuffer. "
668 "Falling back to software rendering.\n");
669 return mask;
670 }
671
672 /* If the source and destination are the same size with no mirroring,
673 * the rectangles are within the size of the texture and there is no
674 * scissor, then we can probably use the blit engine.
675 */
676 if (!(srcX0 - srcX1 == dstX0 - dstX1 &&
677 srcY0 - srcY1 == dstY0 - dstY1 &&
678 srcX1 >= srcX0 &&
679 srcY1 >= srcY0 &&
680 srcX0 >= 0 && srcX1 <= readFb->Width &&
681 srcY0 >= 0 && srcY1 <= readFb->Height &&
682 dstX0 >= 0 && dstX1 <= drawFb->Width &&
683 dstY0 >= 0 && dstY1 <= drawFb->Height &&
684 !ctx->Scissor.EnableFlags)) {
685 perf_debug("glBlitFramebuffer(): non-1:1 blit. "
686 "Falling back to software rendering.\n");
687 return mask;
688 }
689
690 /* Blit to all active draw buffers. We don't do any pre-checking,
691 * because we assume that copying to MRTs is rare, and failure midway
692 * through copying is even more rare. Even if it was to occur, it's
693 * safe to let meta start the copy over from scratch, because
694 * glBlitFramebuffer completely overwrites the destination pixels, and
695 * results are undefined if any destination pixels have a dependency on
696 * source pixels.
697 */
698 for (i = 0; i < drawFb->_NumColorDrawBuffers; i++) {
699 struct gl_renderbuffer *dst_rb = drawFb->_ColorDrawBuffers[i];
700 struct intel_renderbuffer *dst_irb = intel_renderbuffer(dst_rb);
701
702 if (!dst_irb) {
703 perf_debug("glBlitFramebuffer(): missing dst renderbuffer. "
704 "Falling back to software rendering.\n");
705 return mask;
706 }
707
708 mesa_format src_format = _mesa_get_srgb_format_linear(src_rb->Format);
709 mesa_format dst_format = _mesa_get_srgb_format_linear(dst_rb->Format);
710 if (src_format != dst_format) {
711 perf_debug("glBlitFramebuffer(): unsupported blit from %s to %s. "
712 "Falling back to software rendering.\n",
713 _mesa_get_format_name(src_format),
714 _mesa_get_format_name(dst_format));
715 return mask;
716 }
717
718 if (!intel_miptree_blit(intel,
719 src_irb->mt,
720 src_irb->mt_level, src_irb->mt_layer,
721 srcX0, srcY0, src_rb->Name == 0,
722 dst_irb->mt,
723 dst_irb->mt_level, dst_irb->mt_layer,
724 dstX0, dstY0, dst_rb->Name == 0,
725 dstX1 - dstX0, dstY1 - dstY0, GL_COPY)) {
726 perf_debug("glBlitFramebuffer(): unknown blit failure. "
727 "Falling back to software rendering.\n");
728 return mask;
729 }
730 }
731
732 mask &= ~GL_COLOR_BUFFER_BIT;
733 }
734
735 return mask;
736 }
737
738 static void
739 intel_blit_framebuffer(struct gl_context *ctx,
740 struct gl_framebuffer *readFb,
741 struct gl_framebuffer *drawFb,
742 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
743 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
744 GLbitfield mask, GLenum filter)
745 {
746 /* Try using the BLT engine. */
747 mask = intel_blit_framebuffer_with_blitter(ctx, readFb, drawFb,
748 srcX0, srcY0, srcX1, srcY1,
749 dstX0, dstY0, dstX1, dstY1,
750 mask, filter);
751 if (mask == 0x0)
752 return;
753
754
755 _mesa_meta_and_swrast_BlitFramebuffer(ctx, readFb, drawFb,
756 srcX0, srcY0, srcX1, srcY1,
757 dstX0, dstY0, dstX1, dstY1,
758 mask, filter);
759 }
760
761 /**
762 * Do one-time context initializations related to GL_EXT_framebuffer_object.
763 * Hook in device driver functions.
764 */
765 void
766 intel_fbo_init(struct intel_context *intel)
767 {
768 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
769 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
770 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
771 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
772 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
773 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
774 intel->ctx.Driver.RenderTexture = intel_render_texture;
775 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
776 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
777 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
778 intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
779 intel_image_target_renderbuffer_storage;
780 }