d3c29244ddfc9328d3f1895d9dc12b7cbce00636
[mesa.git] / src / mesa / drivers / dri / intel / intel_fbo.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/enums.h"
30 #include "main/imports.h"
31 #include "main/macros.h"
32 #include "main/mfeatures.h"
33 #include "main/mtypes.h"
34 #include "main/fbobject.h"
35 #include "main/framebuffer.h"
36 #include "main/renderbuffer.h"
37 #include "main/context.h"
38 #include "main/teximage.h"
39 #include "main/image.h"
40
41 #include "swrast/swrast.h"
42 #include "drivers/common/meta.h"
43
44 #include "intel_context.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_buffers.h"
47 #include "intel_blit.h"
48 #include "intel_fbo.h"
49 #include "intel_mipmap_tree.h"
50 #include "intel_regions.h"
51 #include "intel_tex.h"
52 #include "intel_span.h"
53 #ifndef I915
54 #include "brw_context.h"
55 #endif
56
57 #define FILE_DEBUG_FLAG DEBUG_FBO
58
59 static struct gl_renderbuffer *
60 intel_new_renderbuffer(struct gl_context * ctx, GLuint name);
61
62 static bool
63 intel_renderbuffer_update_wrapper(struct intel_context *intel,
64 struct intel_renderbuffer *irb,
65 struct intel_mipmap_tree *mt,
66 uint32_t level,
67 uint32_t layer,
68 gl_format format,
69 GLenum internal_format);
70
71 bool
72 intel_framebuffer_has_hiz(struct gl_framebuffer *fb)
73 {
74 struct intel_renderbuffer *rb = NULL;
75 if (fb)
76 rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
77 return rb && rb->mt && rb->mt->hiz_mt;
78 }
79
80 struct intel_region*
81 intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
82 {
83 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
84 if (irb && irb->mt) {
85 if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt)
86 return irb->mt->stencil_mt->region;
87 else
88 return irb->mt->region;
89 } else
90 return NULL;
91 }
92
93 /**
94 * Create a new framebuffer object.
95 */
96 static struct gl_framebuffer *
97 intel_new_framebuffer(struct gl_context * ctx, GLuint name)
98 {
99 /* Only drawable state in intel_framebuffer at this time, just use Mesa's
100 * class
101 */
102 return _mesa_new_framebuffer(ctx, name);
103 }
104
105
106 /** Called by gl_renderbuffer::Delete() */
107 static void
108 intel_delete_renderbuffer(struct gl_renderbuffer *rb)
109 {
110 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
111
112 ASSERT(irb);
113
114 intel_miptree_release(&irb->mt);
115
116 free(irb);
117 }
118
119 /**
120 * \see dd_function_table::MapRenderbuffer
121 */
122 static void
123 intel_map_renderbuffer(struct gl_context *ctx,
124 struct gl_renderbuffer *rb,
125 GLuint x, GLuint y, GLuint w, GLuint h,
126 GLbitfield mode,
127 GLubyte **out_map,
128 GLint *out_stride)
129 {
130 struct intel_context *intel = intel_context(ctx);
131 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
132 void *map;
133 int stride;
134
135 if (!irb && rb->Data) {
136 /* this is a malloc'd renderbuffer (accum buffer) */
137 GLint bpp = _mesa_get_format_bytes(rb->Format);
138 GLint rowStride = rb->RowStride * bpp;
139 *out_map = (GLubyte *) rb->Data + y * rowStride + x * bpp;
140 *out_stride = rowStride;
141 return;
142 }
143
144 /* We sometimes get called with this by our intel_span.c usage. */
145 if (!irb->mt) {
146 *out_map = NULL;
147 *out_stride = 0;
148 return;
149 }
150
151 /* For a window-system renderbuffer, we need to flip the mapping we receive
152 * upside-down. So we need to ask for a rectangle on flipped vertically, and
153 * we then return a pointer to the bottom of it with a negative stride.
154 */
155 if (rb->Name == 0) {
156 y = rb->Height - y - h;
157 }
158
159 intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
160 x, y, w, h, mode, &map, &stride);
161
162 if (rb->Name == 0) {
163 map += (h - 1) * stride;
164 stride = -stride;
165 }
166
167 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
168 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
169 x, y, w, h, map, stride);
170
171 *out_map = map;
172 *out_stride = stride;
173 }
174
175 /**
176 * \see dd_function_table::UnmapRenderbuffer
177 */
178 static void
179 intel_unmap_renderbuffer(struct gl_context *ctx,
180 struct gl_renderbuffer *rb)
181 {
182 struct intel_context *intel = intel_context(ctx);
183 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
184
185 DBG("%s: rb %d (%s)\n", __FUNCTION__,
186 rb->Name, _mesa_get_format_name(rb->Format));
187
188 if (!irb && rb->Data) {
189 /* this is a malloc'd renderbuffer (accum buffer) */
190 /* nothing to do */
191 return;
192 }
193
194 intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
195 }
196
197
198 /**
199 * Called via glRenderbufferStorageEXT() to set the format and allocate
200 * storage for a user-created renderbuffer.
201 */
202 GLboolean
203 intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
204 GLenum internalFormat,
205 GLuint width, GLuint height)
206 {
207 struct intel_context *intel = intel_context(ctx);
208 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
209
210 ASSERT(rb->Name != 0);
211
212 switch (internalFormat) {
213 default:
214 /* Use the same format-choice logic as for textures.
215 * Renderbuffers aren't any different from textures for us,
216 * except they're less useful because you can't texture with
217 * them.
218 */
219 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
220 GL_NONE, GL_NONE);
221 break;
222 case GL_STENCIL_INDEX:
223 case GL_STENCIL_INDEX1_EXT:
224 case GL_STENCIL_INDEX4_EXT:
225 case GL_STENCIL_INDEX8_EXT:
226 case GL_STENCIL_INDEX16_EXT:
227 /* These aren't actual texture formats, so force them here. */
228 if (intel->has_separate_stencil) {
229 rb->Format = MESA_FORMAT_S8;
230 } else {
231 assert(!intel->must_use_separate_stencil);
232 rb->Format = MESA_FORMAT_S8_Z24;
233 }
234 break;
235 }
236
237 rb->Width = width;
238 rb->Height = height;
239 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
240 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
241
242 intel_miptree_release(&irb->mt);
243
244 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
245 _mesa_lookup_enum_by_nr(internalFormat),
246 _mesa_get_format_name(rb->Format), width, height);
247
248 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
249 width, height);
250 if (!irb->mt)
251 return false;
252
253 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
254 bool ok = intel_miptree_alloc_hiz(intel, irb->mt);
255 if (!ok) {
256 intel_miptree_release(&irb->mt);
257 return false;
258 }
259 }
260
261 return true;
262 }
263
264
265 #if FEATURE_OES_EGL_image
266 static void
267 intel_image_target_renderbuffer_storage(struct gl_context *ctx,
268 struct gl_renderbuffer *rb,
269 void *image_handle)
270 {
271 struct intel_context *intel = intel_context(ctx);
272 struct intel_renderbuffer *irb;
273 __DRIscreen *screen;
274 __DRIimage *image;
275
276 screen = intel->intelScreen->driScrnPriv;
277 image = screen->dri2.image->lookupEGLImage(screen, image_handle,
278 screen->loaderPrivate);
279 if (image == NULL)
280 return;
281
282 /* __DRIimage is opaque to the core so it has to be checked here */
283 switch (image->format) {
284 case MESA_FORMAT_RGBA8888_REV:
285 _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
286 "glEGLImageTargetRenderbufferStorage(unsupported image format");
287 return;
288 break;
289 default:
290 break;
291 }
292
293 irb = intel_renderbuffer(rb);
294 intel_miptree_release(&irb->mt);
295 irb->mt = intel_miptree_create_for_region(intel,
296 GL_TEXTURE_2D,
297 image->format,
298 image->region);
299 if (!irb->mt)
300 return;
301
302 rb->InternalFormat = image->internal_format;
303 rb->Width = image->region->width;
304 rb->Height = image->region->height;
305 rb->Format = image->format;
306 rb->DataType = image->data_type;
307 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
308 image->internal_format);
309 }
310 #endif
311
312 /**
313 * Called for each hardware renderbuffer when a _window_ is resized.
314 * Just update fields.
315 * Not used for user-created renderbuffers!
316 */
317 static GLboolean
318 intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
319 GLenum internalFormat, GLuint width, GLuint height)
320 {
321 ASSERT(rb->Name == 0);
322 rb->Width = width;
323 rb->Height = height;
324 rb->InternalFormat = internalFormat;
325
326 return true;
327 }
328
329
330 static void
331 intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
332 GLuint width, GLuint height)
333 {
334 int i;
335
336 _mesa_resize_framebuffer(ctx, fb, width, height);
337
338 fb->Initialized = true; /* XXX remove someday */
339
340 if (fb->Name != 0) {
341 return;
342 }
343
344
345 /* Make sure all window system renderbuffers are up to date */
346 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
347 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
348
349 /* only resize if size is changing */
350 if (rb && (rb->Width != width || rb->Height != height)) {
351 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
352 }
353 }
354 }
355
356
357 /** Dummy function for gl_renderbuffer::AllocStorage() */
358 static GLboolean
359 intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
360 GLenum internalFormat, GLuint width, GLuint height)
361 {
362 _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
363 return false;
364 }
365
366 /**
367 * Create a new intel_renderbuffer which corresponds to an on-screen window,
368 * not a user-created renderbuffer.
369 */
370 struct intel_renderbuffer *
371 intel_create_renderbuffer(gl_format format)
372 {
373 GET_CURRENT_CONTEXT(ctx);
374
375 struct intel_renderbuffer *irb;
376
377 irb = CALLOC_STRUCT(intel_renderbuffer);
378 if (!irb) {
379 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
380 return NULL;
381 }
382
383 _mesa_init_renderbuffer(&irb->Base, 0);
384 irb->Base.ClassID = INTEL_RB_CLASS;
385 irb->Base._BaseFormat = _mesa_get_format_base_format(format);
386 irb->Base.Format = format;
387 irb->Base.InternalFormat = irb->Base._BaseFormat;
388 irb->Base.DataType = intel_mesa_format_to_rb_datatype(format);
389
390 /* intel-specific methods */
391 irb->Base.Delete = intel_delete_renderbuffer;
392 irb->Base.AllocStorage = intel_alloc_window_storage;
393
394 return irb;
395 }
396
397 /**
398 * Create a new renderbuffer object.
399 * Typically called via glBindRenderbufferEXT().
400 */
401 static struct gl_renderbuffer *
402 intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
403 {
404 /*struct intel_context *intel = intel_context(ctx); */
405 struct intel_renderbuffer *irb;
406
407 irb = CALLOC_STRUCT(intel_renderbuffer);
408 if (!irb) {
409 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
410 return NULL;
411 }
412
413 _mesa_init_renderbuffer(&irb->Base, name);
414 irb->Base.ClassID = INTEL_RB_CLASS;
415
416 /* intel-specific methods */
417 irb->Base.Delete = intel_delete_renderbuffer;
418 irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
419 /* span routines set in alloc_storage function */
420
421 return &irb->Base;
422 }
423
424
425 /**
426 * Called via glBindFramebufferEXT().
427 */
428 static void
429 intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
430 struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
431 {
432 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
433 intel_draw_buffer(ctx);
434 }
435 else {
436 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
437 }
438 }
439
440
441 /**
442 * Called via glFramebufferRenderbufferEXT().
443 */
444 static void
445 intel_framebuffer_renderbuffer(struct gl_context * ctx,
446 struct gl_framebuffer *fb,
447 GLenum attachment, struct gl_renderbuffer *rb)
448 {
449 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
450
451 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
452 intel_draw_buffer(ctx);
453 }
454
455 static struct intel_renderbuffer*
456 intel_renderbuffer_wrap_miptree(struct intel_context *intel,
457 struct intel_mipmap_tree *mt,
458 uint32_t level,
459 uint32_t layer,
460 gl_format format,
461 GLenum internal_format);
462
463 /**
464 * \par Special case for separate stencil
465 *
466 * When wrapping a depthstencil texture that uses separate stencil, this
467 * function is recursively called twice: once to create \c
468 * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the
469 * call to create \c irb->wrapped_depth, the \c format and \c
470 * internal_format parameters do not match \c mt->format. In that case, \c
471 * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
472 * MESA_FORMAT_X8_Z24.
473 *
474 * @return true on success
475 */
476 static bool
477 intel_renderbuffer_update_wrapper(struct intel_context *intel,
478 struct intel_renderbuffer *irb,
479 struct intel_mipmap_tree *mt,
480 uint32_t level,
481 uint32_t layer,
482 gl_format format,
483 GLenum internal_format)
484 {
485 struct gl_renderbuffer *rb = &irb->Base;
486
487 rb->Format = format;
488 rb->InternalFormat = internal_format;
489 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
490 rb->_BaseFormat = _mesa_get_format_base_format(rb->Format);
491 rb->Width = mt->level[level].width;
492 rb->Height = mt->level[level].height;
493
494 irb->Base.Delete = intel_delete_renderbuffer;
495 irb->Base.AllocStorage = intel_nop_alloc_storage;
496
497 intel_miptree_check_level_layer(mt, level, layer);
498 irb->mt_level = level;
499 irb->mt_layer = layer;
500
501 intel_miptree_reference(&irb->mt, mt);
502
503 intel_renderbuffer_set_draw_offset(irb);
504
505 if (mt->hiz_mt == NULL &&
506 intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
507 intel_miptree_alloc_hiz(intel, mt);
508 if (!mt->hiz_mt)
509 return false;
510 }
511
512 return true;
513 }
514
515 /**
516 * \brief Wrap a renderbuffer around a single slice of a miptree.
517 *
518 * Called by glFramebufferTexture*(). This just allocates a
519 * ``struct intel_renderbuffer`` then calls
520 * intel_renderbuffer_update_wrapper() to do the real work.
521 *
522 * \see intel_renderbuffer_update_wrapper()
523 */
524 static struct intel_renderbuffer*
525 intel_renderbuffer_wrap_miptree(struct intel_context *intel,
526 struct intel_mipmap_tree *mt,
527 uint32_t level,
528 uint32_t layer,
529 gl_format format,
530 GLenum internal_format)
531
532 {
533 struct gl_context *ctx = &intel->ctx;
534 struct gl_renderbuffer *rb;
535 struct intel_renderbuffer *irb;
536
537 intel_miptree_check_level_layer(mt, level, layer);
538
539 rb = intel_new_renderbuffer(ctx, ~0);
540 irb = intel_renderbuffer(rb);
541 if (!irb)
542 return NULL;
543
544 if (!intel_renderbuffer_update_wrapper(intel, irb,
545 mt, level, layer,
546 format, internal_format)) {
547 free(irb);
548 return NULL;
549 }
550
551 return irb;
552 }
553
554 void
555 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
556 {
557 unsigned int dst_x, dst_y;
558
559 /* compute offset of the particular 2D image within the texture region */
560 intel_miptree_get_image_offset(irb->mt,
561 irb->mt_level,
562 0, /* face, which we ignore */
563 irb->mt_layer,
564 &dst_x, &dst_y);
565
566 irb->draw_x = dst_x;
567 irb->draw_y = dst_y;
568 }
569
570 /**
571 * Rendering to tiled buffers requires that the base address of the
572 * buffer be aligned to a page boundary. We generally render to
573 * textures by pointing the surface at the mipmap image level, which
574 * may not be aligned to a tile boundary.
575 *
576 * This function returns an appropriately-aligned base offset
577 * according to the tiling restrictions, plus any required x/y offset
578 * from there.
579 */
580 uint32_t
581 intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
582 uint32_t *tile_x,
583 uint32_t *tile_y)
584 {
585 struct intel_region *region = irb->mt->region;
586 int cpp = region->cpp;
587 uint32_t pitch = region->pitch * cpp;
588
589 if (region->tiling == I915_TILING_NONE) {
590 *tile_x = 0;
591 *tile_y = 0;
592 return irb->draw_x * cpp + irb->draw_y * pitch;
593 } else if (region->tiling == I915_TILING_X) {
594 *tile_x = irb->draw_x % (512 / cpp);
595 *tile_y = irb->draw_y % 8;
596 return ((irb->draw_y / 8) * (8 * pitch) +
597 (irb->draw_x - *tile_x) / (512 / cpp) * 4096);
598 } else {
599 assert(region->tiling == I915_TILING_Y);
600 *tile_x = irb->draw_x % (128 / cpp);
601 *tile_y = irb->draw_y % 32;
602 return ((irb->draw_y / 32) * (32 * pitch) +
603 (irb->draw_x - *tile_x) / (128 / cpp) * 4096);
604 }
605 }
606
607 #ifndef I915
608 static bool
609 need_tile_offset_workaround(struct brw_context *brw,
610 struct intel_renderbuffer *irb)
611 {
612 uint32_t tile_x, tile_y;
613
614 if (brw->has_surface_tile_offset)
615 return false;
616
617 intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y);
618
619 return tile_x != 0 || tile_y != 0;
620 }
621 #endif
622
623 /**
624 * Called by glFramebufferTexture[123]DEXT() (and other places) to
625 * prepare for rendering into texture memory. This might be called
626 * many times to choose different texture levels, cube faces, etc
627 * before intel_finish_render_texture() is ever called.
628 */
629 static void
630 intel_render_texture(struct gl_context * ctx,
631 struct gl_framebuffer *fb,
632 struct gl_renderbuffer_attachment *att)
633 {
634 struct intel_context *intel = intel_context(ctx);
635 struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
636 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
637 struct intel_texture_image *intel_image = intel_texture_image(image);
638 struct intel_mipmap_tree *mt = intel_image->mt;
639 int layer;
640
641 (void) fb;
642
643 if (att->CubeMapFace > 0) {
644 assert(att->Zoffset == 0);
645 layer = att->CubeMapFace;
646 } else {
647 layer = att->Zoffset;
648 }
649
650 if (!intel_image->mt) {
651 /* Fallback on drawing to a texture that doesn't have a miptree
652 * (has a border, width/height 0, etc.)
653 */
654 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
655 _swrast_render_texture(ctx, fb, att);
656 return;
657 }
658 else if (!irb) {
659 irb = intel_renderbuffer_wrap_miptree(intel,
660 mt,
661 att->TextureLevel,
662 layer,
663 image->TexFormat,
664 image->InternalFormat);
665
666 if (irb) {
667 /* bind the wrapper to the attachment point */
668 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
669 }
670 else {
671 /* fallback to software rendering */
672 _swrast_render_texture(ctx, fb, att);
673 return;
674 }
675 }
676
677 if (!intel_renderbuffer_update_wrapper(intel, irb,
678 mt, att->TextureLevel, layer,
679 image->TexFormat,
680 image->InternalFormat)) {
681 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
682 _swrast_render_texture(ctx, fb, att);
683 return;
684 }
685
686 DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
687 _mesa_get_format_name(image->TexFormat),
688 att->Texture->Name, image->Width, image->Height,
689 irb->Base.RefCount);
690
691 intel_image->used_as_render_target = true;
692
693 #ifndef I915
694 if (need_tile_offset_workaround(brw_context(ctx), irb)) {
695 /* Original gen4 hardware couldn't draw to a non-tile-aligned
696 * destination in a miptree unless you actually setup your
697 * renderbuffer as a miptree and used the fragile
698 * lod/array_index/etc. controls to select the image. So,
699 * instead, we just make a new single-level miptree and render
700 * into that.
701 */
702 struct intel_context *intel = intel_context(ctx);
703 struct intel_mipmap_tree *new_mt;
704 int width, height, depth;
705
706 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
707
708 new_mt = intel_miptree_create(intel, image->TexObject->Target,
709 intel_image->base.Base.TexFormat,
710 intel_image->base.Base.Level,
711 intel_image->base.Base.Level,
712 width, height, depth,
713 true);
714
715 intel_miptree_copy_teximage(intel, intel_image, new_mt);
716 intel_renderbuffer_set_draw_offset(irb);
717
718 intel_miptree_reference(&irb->mt, intel_image->mt);
719 intel_miptree_release(&new_mt);
720 }
721 #endif
722 /* update drawing region, etc */
723 intel_draw_buffer(ctx);
724 }
725
726
727 /**
728 * Called by Mesa when rendering to a texture is done.
729 */
730 static void
731 intel_finish_render_texture(struct gl_context * ctx,
732 struct gl_renderbuffer_attachment *att)
733 {
734 struct intel_context *intel = intel_context(ctx);
735 struct gl_texture_object *tex_obj = att->Texture;
736 struct gl_texture_image *image =
737 tex_obj->Image[att->CubeMapFace][att->TextureLevel];
738 struct intel_texture_image *intel_image = intel_texture_image(image);
739
740 DBG("Finish render %s texture tex=%u\n",
741 _mesa_get_format_name(image->TexFormat), att->Texture->Name);
742
743 /* Flag that this image may now be validated into the object's miptree. */
744 if (intel_image)
745 intel_image->used_as_render_target = false;
746
747 /* Since we've (probably) rendered to the texture and will (likely) use
748 * it in the texture domain later on in this batchbuffer, flush the
749 * batch. Once again, we wish for a domain tracker in libdrm to cover
750 * usage inside of a batchbuffer like GEM does in the kernel.
751 */
752 intel_batchbuffer_emit_mi_flush(intel);
753 }
754
755 /**
756 * Do additional "completeness" testing of a framebuffer object.
757 */
758 static void
759 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
760 {
761 struct intel_context *intel = intel_context(ctx);
762 const struct intel_renderbuffer *depthRb =
763 intel_get_renderbuffer(fb, BUFFER_DEPTH);
764 const struct intel_renderbuffer *stencilRb =
765 intel_get_renderbuffer(fb, BUFFER_STENCIL);
766 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
767 int i;
768
769 if (depthRb)
770 depth_mt = depthRb->mt;
771 if (stencilRb) {
772 stencil_mt = stencilRb->mt;
773 if (stencil_mt->stencil_mt)
774 stencil_mt = stencil_mt->stencil_mt;
775 }
776
777 if (depth_mt && stencil_mt) {
778 if (depth_mt == stencil_mt) {
779 /* For true packed depth/stencil (not faked on prefers-separate-stencil
780 * hardware) we need to be sure they're the same level/layer, since
781 * we'll be emitting a single packet describing the packed setup.
782 */
783 if (depthRb->mt_level != stencilRb->mt_level ||
784 depthRb->mt_layer != stencilRb->mt_layer) {
785 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
786 }
787 } else {
788 if (!intel->has_separate_stencil)
789 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
790 if (stencil_mt->format != MESA_FORMAT_S8)
791 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
792 if (intel->gen < 7 && depth_mt->hiz_mt == NULL) {
793 /* Before Gen7, separate depth and stencil buffers can be used
794 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
795 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
796 * [DevSNB]: This field must be set to the same value (enabled
797 * or disabled) as Hierarchical Depth Buffer Enable.
798 */
799 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED;
800 }
801 }
802 }
803
804 for (i = 0; i < Elements(fb->Attachment); i++) {
805 struct gl_renderbuffer *rb;
806 struct intel_renderbuffer *irb;
807
808 if (fb->Attachment[i].Type == GL_NONE)
809 continue;
810
811 /* A supported attachment will have a Renderbuffer set either
812 * from being a Renderbuffer or being a texture that got the
813 * intel_wrap_texture() treatment.
814 */
815 rb = fb->Attachment[i].Renderbuffer;
816 if (rb == NULL) {
817 DBG("attachment without renderbuffer\n");
818 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
819 continue;
820 }
821
822 irb = intel_renderbuffer(rb);
823 if (irb == NULL) {
824 DBG("software rendering renderbuffer\n");
825 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
826 continue;
827 }
828
829 if (!intel->vtbl.render_target_supported(intel, irb->Base.Format)) {
830 DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
831 _mesa_get_format_name(irb->Base.Format));
832 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
833 }
834
835 #ifdef I915
836 if (!intel_span_supports_format(irb->Base.Format)) {
837 DBG("Unsupported swrast texture/renderbuffer format attached: %s\n",
838 _mesa_get_format_name(irb->Base.Format));
839 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
840 }
841 #endif
842 }
843 }
844
845 /**
846 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
847 * We can do this when the dst renderbuffer is actually a texture and
848 * there is no scaling, mirroring or scissoring.
849 *
850 * \return new buffer mask indicating the buffers left to blit using the
851 * normal path.
852 */
853 static GLbitfield
854 intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
855 GLint srcX0, GLint srcY0,
856 GLint srcX1, GLint srcY1,
857 GLint dstX0, GLint dstY0,
858 GLint dstX1, GLint dstY1,
859 GLbitfield mask, GLenum filter)
860 {
861 if (mask & GL_COLOR_BUFFER_BIT) {
862 const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
863 const struct gl_framebuffer *readFb = ctx->ReadBuffer;
864 const struct gl_renderbuffer_attachment *drawAtt =
865 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
866 struct intel_renderbuffer *srcRb =
867 intel_renderbuffer(readFb->_ColorReadBuffer);
868
869 /* If the source and destination are the same size with no
870 mirroring, the rectangles are within the size of the
871 texture and there is no scissor then we can use
872 glCopyTexSubimage2D to implement the blit. This will end
873 up as a fast hardware blit on some drivers */
874 if (srcRb && drawAtt && drawAtt->Texture &&
875 srcX0 - srcX1 == dstX0 - dstX1 &&
876 srcY0 - srcY1 == dstY0 - dstY1 &&
877 srcX1 >= srcX0 &&
878 srcY1 >= srcY0 &&
879 srcX0 >= 0 && srcX1 <= readFb->Width &&
880 srcY0 >= 0 && srcY1 <= readFb->Height &&
881 dstX0 >= 0 && dstX1 <= drawFb->Width &&
882 dstY0 >= 0 && dstY1 <= drawFb->Height &&
883 !ctx->Scissor.Enabled) {
884 const struct gl_texture_object *texObj = drawAtt->Texture;
885 const GLuint dstLevel = drawAtt->TextureLevel;
886 const GLenum target = texObj->Target;
887
888 struct gl_texture_image *texImage =
889 _mesa_select_tex_image(ctx, texObj, target, dstLevel);
890
891 if (intel_copy_texsubimage(intel_context(ctx),
892 intel_texture_image(texImage),
893 dstX0, dstY0,
894 srcRb,
895 srcX0, srcY0,
896 srcX1 - srcX0, /* width */
897 srcY1 - srcY0))
898 mask &= ~GL_COLOR_BUFFER_BIT;
899 }
900 }
901
902 return mask;
903 }
904
905 static void
906 intel_blit_framebuffer(struct gl_context *ctx,
907 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
908 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
909 GLbitfield mask, GLenum filter)
910 {
911 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
912 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
913 srcX0, srcY0, srcX1, srcY1,
914 dstX0, dstY0, dstX1, dstY1,
915 mask, filter);
916 if (mask == 0x0)
917 return;
918
919 _mesa_meta_BlitFramebuffer(ctx,
920 srcX0, srcY0, srcX1, srcY1,
921 dstX0, dstY0, dstX1, dstY1,
922 mask, filter);
923 }
924
925 void
926 intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
927 {
928 if (irb->mt) {
929 intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
930 irb->mt_level,
931 irb->mt_layer);
932 }
933 }
934
935 void
936 intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
937 {
938 if (irb->mt) {
939 intel_miptree_slice_set_needs_depth_resolve(irb->mt,
940 irb->mt_level,
941 irb->mt_layer);
942 }
943 }
944
945 bool
946 intel_renderbuffer_resolve_hiz(struct intel_context *intel,
947 struct intel_renderbuffer *irb)
948 {
949 if (irb->mt)
950 return intel_miptree_slice_resolve_hiz(intel,
951 irb->mt,
952 irb->mt_level,
953 irb->mt_layer);
954
955 return false;
956 }
957
958 bool
959 intel_renderbuffer_resolve_depth(struct intel_context *intel,
960 struct intel_renderbuffer *irb)
961 {
962 if (irb->mt)
963 return intel_miptree_slice_resolve_depth(intel,
964 irb->mt,
965 irb->mt_level,
966 irb->mt_layer);
967
968 return false;
969 }
970
971 /**
972 * Do one-time context initializations related to GL_EXT_framebuffer_object.
973 * Hook in device driver functions.
974 */
975 void
976 intel_fbo_init(struct intel_context *intel)
977 {
978 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
979 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
980 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
981 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
982 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
983 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
984 intel->ctx.Driver.RenderTexture = intel_render_texture;
985 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
986 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
987 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
988 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
989
990 #if FEATURE_OES_EGL_image
991 intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
992 intel_image_target_renderbuffer_storage;
993 #endif
994 }