mesa: add/update comments in _mesa_copy_buffer_subdata()
[mesa.git] / src / mesa / drivers / dri / intel / intel_fbo.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/enums.h"
30 #include "main/imports.h"
31 #include "main/macros.h"
32 #include "main/mfeatures.h"
33 #include "main/mtypes.h"
34 #include "main/fbobject.h"
35 #include "main/framebuffer.h"
36 #include "main/renderbuffer.h"
37 #include "main/context.h"
38 #include "main/teximage.h"
39 #include "main/image.h"
40
41 #include "swrast/swrast.h"
42 #include "drivers/common/meta.h"
43
44 #include "intel_context.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_buffers.h"
47 #include "intel_blit.h"
48 #include "intel_fbo.h"
49 #include "intel_mipmap_tree.h"
50 #include "intel_regions.h"
51 #include "intel_tex.h"
52 #include "intel_span.h"
53 #ifndef I915
54 #include "brw_context.h"
55 #endif
56
57 #define FILE_DEBUG_FLAG DEBUG_FBO
58
59 static struct gl_renderbuffer *
60 intel_new_renderbuffer(struct gl_context * ctx, GLuint name);
61
62 static bool
63 intel_renderbuffer_update_wrapper(struct intel_context *intel,
64 struct intel_renderbuffer *irb,
65 struct intel_mipmap_tree *mt,
66 uint32_t level,
67 uint32_t layer,
68 gl_format format,
69 GLenum internal_format);
70
71 bool
72 intel_framebuffer_has_hiz(struct gl_framebuffer *fb)
73 {
74 struct intel_renderbuffer *rb = NULL;
75 if (fb)
76 rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
77 return rb && rb->mt && rb->mt->hiz_mt;
78 }
79
80 struct intel_region*
81 intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
82 {
83 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
84 if (irb && irb->mt) {
85 if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt)
86 return irb->mt->stencil_mt->region;
87 else
88 return irb->mt->region;
89 } else
90 return NULL;
91 }
92
93 /**
94 * Create a new framebuffer object.
95 */
96 static struct gl_framebuffer *
97 intel_new_framebuffer(struct gl_context * ctx, GLuint name)
98 {
99 /* Only drawable state in intel_framebuffer at this time, just use Mesa's
100 * class
101 */
102 return _mesa_new_framebuffer(ctx, name);
103 }
104
105
106 /** Called by gl_renderbuffer::Delete() */
107 static void
108 intel_delete_renderbuffer(struct gl_renderbuffer *rb)
109 {
110 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
111
112 ASSERT(irb);
113
114 intel_miptree_release(&irb->mt);
115
116 free(irb);
117 }
118
119 /**
120 * \see dd_function_table::MapRenderbuffer
121 */
122 static void
123 intel_map_renderbuffer(struct gl_context *ctx,
124 struct gl_renderbuffer *rb,
125 GLuint x, GLuint y, GLuint w, GLuint h,
126 GLbitfield mode,
127 GLubyte **out_map,
128 GLint *out_stride)
129 {
130 struct intel_context *intel = intel_context(ctx);
131 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
132 void *map;
133 int stride;
134
135 /* We sometimes get called with this by our intel_span.c usage. */
136 if (!irb->mt) {
137 *out_map = NULL;
138 *out_stride = 0;
139 return;
140 }
141
142 /* For a window-system renderbuffer, we need to flip the mapping we receive
143 * upside-down. So we need to ask for a rectangle on flipped vertically, and
144 * we then return a pointer to the bottom of it with a negative stride.
145 */
146 if (rb->Name == 0) {
147 y = rb->Height - y - h;
148 }
149
150 intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
151 x, y, w, h, mode, &map, &stride);
152
153 if (rb->Name == 0) {
154 map += (h - 1) * stride;
155 stride = -stride;
156 }
157
158 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
159 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
160 x, y, w, h, map, stride);
161
162 *out_map = map;
163 *out_stride = stride;
164 }
165
166 /**
167 * \see dd_function_table::UnmapRenderbuffer
168 */
169 static void
170 intel_unmap_renderbuffer(struct gl_context *ctx,
171 struct gl_renderbuffer *rb)
172 {
173 struct intel_context *intel = intel_context(ctx);
174 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
175
176 DBG("%s: rb %d (%s)\n", __FUNCTION__,
177 rb->Name, _mesa_get_format_name(rb->Format));
178
179 intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
180 }
181
182 /**
183 * Return a pointer to a specific pixel in a renderbuffer.
184 */
185 static void *
186 intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb,
187 GLint x, GLint y)
188 {
189 /* By returning NULL we force all software rendering to go through
190 * the span routines.
191 */
192 return NULL;
193 }
194
195
196 /**
197 * Called via glRenderbufferStorageEXT() to set the format and allocate
198 * storage for a user-created renderbuffer.
199 */
200 GLboolean
201 intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
202 GLenum internalFormat,
203 GLuint width, GLuint height)
204 {
205 struct intel_context *intel = intel_context(ctx);
206 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
207
208 ASSERT(rb->Name != 0);
209
210 switch (internalFormat) {
211 default:
212 /* Use the same format-choice logic as for textures.
213 * Renderbuffers aren't any different from textures for us,
214 * except they're less useful because you can't texture with
215 * them.
216 */
217 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
218 GL_NONE, GL_NONE);
219 break;
220 case GL_STENCIL_INDEX:
221 case GL_STENCIL_INDEX1_EXT:
222 case GL_STENCIL_INDEX4_EXT:
223 case GL_STENCIL_INDEX8_EXT:
224 case GL_STENCIL_INDEX16_EXT:
225 /* These aren't actual texture formats, so force them here. */
226 if (intel->has_separate_stencil) {
227 rb->Format = MESA_FORMAT_S8;
228 } else {
229 assert(!intel->must_use_separate_stencil);
230 rb->Format = MESA_FORMAT_S8_Z24;
231 }
232 break;
233 }
234
235 rb->Width = width;
236 rb->Height = height;
237 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
238 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
239
240 intel_miptree_release(&irb->mt);
241
242 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
243 _mesa_lookup_enum_by_nr(internalFormat),
244 _mesa_get_format_name(rb->Format), width, height);
245
246 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
247 width, height);
248 if (!irb->mt)
249 return false;
250
251 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
252 bool ok = intel_miptree_alloc_hiz(intel, irb->mt);
253 if (!ok) {
254 intel_miptree_release(&irb->mt);
255 return false;
256 }
257 }
258
259 return true;
260 }
261
262
263 #if FEATURE_OES_EGL_image
264 static void
265 intel_image_target_renderbuffer_storage(struct gl_context *ctx,
266 struct gl_renderbuffer *rb,
267 void *image_handle)
268 {
269 struct intel_context *intel = intel_context(ctx);
270 struct intel_renderbuffer *irb;
271 __DRIscreen *screen;
272 __DRIimage *image;
273
274 screen = intel->intelScreen->driScrnPriv;
275 image = screen->dri2.image->lookupEGLImage(screen, image_handle,
276 screen->loaderPrivate);
277 if (image == NULL)
278 return;
279
280 /* __DRIimage is opaque to the core so it has to be checked here */
281 switch (image->format) {
282 case MESA_FORMAT_RGBA8888_REV:
283 _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
284 "glEGLImageTargetRenderbufferStorage(unsupported image format");
285 return;
286 break;
287 default:
288 break;
289 }
290
291 irb = intel_renderbuffer(rb);
292 intel_miptree_release(&irb->mt);
293 irb->mt = intel_miptree_create_for_region(intel,
294 GL_TEXTURE_2D,
295 image->format,
296 image->region);
297 if (!irb->mt)
298 return;
299
300 rb->InternalFormat = image->internal_format;
301 rb->Width = image->region->width;
302 rb->Height = image->region->height;
303 rb->Format = image->format;
304 rb->DataType = image->data_type;
305 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
306 image->internal_format);
307 }
308 #endif
309
310 /**
311 * Called for each hardware renderbuffer when a _window_ is resized.
312 * Just update fields.
313 * Not used for user-created renderbuffers!
314 */
315 static GLboolean
316 intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
317 GLenum internalFormat, GLuint width, GLuint height)
318 {
319 ASSERT(rb->Name == 0);
320 rb->Width = width;
321 rb->Height = height;
322 rb->InternalFormat = internalFormat;
323
324 return true;
325 }
326
327
328 static void
329 intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
330 GLuint width, GLuint height)
331 {
332 int i;
333
334 _mesa_resize_framebuffer(ctx, fb, width, height);
335
336 fb->Initialized = true; /* XXX remove someday */
337
338 if (fb->Name != 0) {
339 return;
340 }
341
342
343 /* Make sure all window system renderbuffers are up to date */
344 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
345 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
346
347 /* only resize if size is changing */
348 if (rb && (rb->Width != width || rb->Height != height)) {
349 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
350 }
351 }
352 }
353
354
355 /** Dummy function for gl_renderbuffer::AllocStorage() */
356 static GLboolean
357 intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
358 GLenum internalFormat, GLuint width, GLuint height)
359 {
360 _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
361 return false;
362 }
363
364 /**
365 * Create a new intel_renderbuffer which corresponds to an on-screen window,
366 * not a user-created renderbuffer.
367 */
368 struct intel_renderbuffer *
369 intel_create_renderbuffer(gl_format format)
370 {
371 GET_CURRENT_CONTEXT(ctx);
372
373 struct intel_renderbuffer *irb;
374
375 irb = CALLOC_STRUCT(intel_renderbuffer);
376 if (!irb) {
377 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
378 return NULL;
379 }
380
381 _mesa_init_renderbuffer(&irb->Base, 0);
382 irb->Base.ClassID = INTEL_RB_CLASS;
383 irb->Base._BaseFormat = _mesa_get_format_base_format(format);
384 irb->Base.Format = format;
385 irb->Base.InternalFormat = irb->Base._BaseFormat;
386 irb->Base.DataType = intel_mesa_format_to_rb_datatype(format);
387
388 /* intel-specific methods */
389 irb->Base.Delete = intel_delete_renderbuffer;
390 irb->Base.AllocStorage = intel_alloc_window_storage;
391 irb->Base.GetPointer = intel_get_pointer;
392
393 return irb;
394 }
395
396 /**
397 * Create a new renderbuffer object.
398 * Typically called via glBindRenderbufferEXT().
399 */
400 static struct gl_renderbuffer *
401 intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
402 {
403 /*struct intel_context *intel = intel_context(ctx); */
404 struct intel_renderbuffer *irb;
405
406 irb = CALLOC_STRUCT(intel_renderbuffer);
407 if (!irb) {
408 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
409 return NULL;
410 }
411
412 _mesa_init_renderbuffer(&irb->Base, name);
413 irb->Base.ClassID = INTEL_RB_CLASS;
414
415 /* intel-specific methods */
416 irb->Base.Delete = intel_delete_renderbuffer;
417 irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
418 irb->Base.GetPointer = intel_get_pointer;
419 /* span routines set in alloc_storage function */
420
421 return &irb->Base;
422 }
423
424
425 /**
426 * Called via glBindFramebufferEXT().
427 */
428 static void
429 intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
430 struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
431 {
432 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
433 intel_draw_buffer(ctx);
434 }
435 else {
436 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
437 }
438 }
439
440
441 /**
442 * Called via glFramebufferRenderbufferEXT().
443 */
444 static void
445 intel_framebuffer_renderbuffer(struct gl_context * ctx,
446 struct gl_framebuffer *fb,
447 GLenum attachment, struct gl_renderbuffer *rb)
448 {
449 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
450
451 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
452 intel_draw_buffer(ctx);
453 }
454
455 static struct intel_renderbuffer*
456 intel_renderbuffer_wrap_miptree(struct intel_context *intel,
457 struct intel_mipmap_tree *mt,
458 uint32_t level,
459 uint32_t layer,
460 gl_format format,
461 GLenum internal_format);
462
463 /**
464 * \par Special case for separate stencil
465 *
466 * When wrapping a depthstencil texture that uses separate stencil, this
467 * function is recursively called twice: once to create \c
468 * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the
469 * call to create \c irb->wrapped_depth, the \c format and \c
470 * internal_format parameters do not match \c mt->format. In that case, \c
471 * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
472 * MESA_FORMAT_X8_Z24.
473 *
474 * @return true on success
475 */
476 static bool
477 intel_renderbuffer_update_wrapper(struct intel_context *intel,
478 struct intel_renderbuffer *irb,
479 struct intel_mipmap_tree *mt,
480 uint32_t level,
481 uint32_t layer,
482 gl_format format,
483 GLenum internal_format)
484 {
485 struct gl_renderbuffer *rb = &irb->Base;
486
487 rb->Format = format;
488 rb->InternalFormat = internal_format;
489 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
490 rb->_BaseFormat = _mesa_get_format_base_format(rb->Format);
491 rb->Width = mt->level[level].width;
492 rb->Height = mt->level[level].height;
493
494 irb->Base.Delete = intel_delete_renderbuffer;
495 irb->Base.AllocStorage = intel_nop_alloc_storage;
496
497 intel_miptree_check_level_layer(mt, level, layer);
498 irb->mt_level = level;
499 irb->mt_layer = layer;
500
501 intel_miptree_reference(&irb->mt, mt);
502
503 intel_renderbuffer_set_draw_offset(irb);
504
505 if (mt->hiz_mt == NULL &&
506 intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
507 intel_miptree_alloc_hiz(intel, mt);
508 if (!mt->hiz_mt)
509 return false;
510 }
511
512 return true;
513 }
514
515 /**
516 * \brief Wrap a renderbuffer around a single slice of a miptree.
517 *
518 * Called by glFramebufferTexture*(). This just allocates a
519 * ``struct intel_renderbuffer`` then calls
520 * intel_renderbuffer_update_wrapper() to do the real work.
521 *
522 * \see intel_renderbuffer_update_wrapper()
523 */
524 static struct intel_renderbuffer*
525 intel_renderbuffer_wrap_miptree(struct intel_context *intel,
526 struct intel_mipmap_tree *mt,
527 uint32_t level,
528 uint32_t layer,
529 gl_format format,
530 GLenum internal_format)
531
532 {
533 struct gl_context *ctx = &intel->ctx;
534 struct gl_renderbuffer *rb;
535 struct intel_renderbuffer *irb;
536
537 intel_miptree_check_level_layer(mt, level, layer);
538
539 rb = intel_new_renderbuffer(ctx, ~0);
540 irb = intel_renderbuffer(rb);
541 if (!irb)
542 return NULL;
543
544 if (!intel_renderbuffer_update_wrapper(intel, irb,
545 mt, level, layer,
546 format, internal_format)) {
547 free(irb);
548 return NULL;
549 }
550
551 return irb;
552 }
553
554 void
555 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
556 {
557 unsigned int dst_x, dst_y;
558
559 /* compute offset of the particular 2D image within the texture region */
560 intel_miptree_get_image_offset(irb->mt,
561 irb->mt_level,
562 0, /* face, which we ignore */
563 irb->mt_layer,
564 &dst_x, &dst_y);
565
566 irb->draw_x = dst_x;
567 irb->draw_y = dst_y;
568 }
569
570 /**
571 * Rendering to tiled buffers requires that the base address of the
572 * buffer be aligned to a page boundary. We generally render to
573 * textures by pointing the surface at the mipmap image level, which
574 * may not be aligned to a tile boundary.
575 *
576 * This function returns an appropriately-aligned base offset
577 * according to the tiling restrictions, plus any required x/y offset
578 * from there.
579 */
580 uint32_t
581 intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
582 uint32_t *tile_x,
583 uint32_t *tile_y)
584 {
585 struct intel_region *region = irb->mt->region;
586 int cpp = region->cpp;
587 uint32_t pitch = region->pitch * cpp;
588
589 if (region->tiling == I915_TILING_NONE) {
590 *tile_x = 0;
591 *tile_y = 0;
592 return irb->draw_x * cpp + irb->draw_y * pitch;
593 } else if (region->tiling == I915_TILING_X) {
594 *tile_x = irb->draw_x % (512 / cpp);
595 *tile_y = irb->draw_y % 8;
596 return ((irb->draw_y / 8) * (8 * pitch) +
597 (irb->draw_x - *tile_x) / (512 / cpp) * 4096);
598 } else {
599 assert(region->tiling == I915_TILING_Y);
600 *tile_x = irb->draw_x % (128 / cpp);
601 *tile_y = irb->draw_y % 32;
602 return ((irb->draw_y / 32) * (32 * pitch) +
603 (irb->draw_x - *tile_x) / (128 / cpp) * 4096);
604 }
605 }
606
607 #ifndef I915
608 static bool
609 need_tile_offset_workaround(struct brw_context *brw,
610 struct intel_renderbuffer *irb)
611 {
612 uint32_t tile_x, tile_y;
613
614 if (brw->has_surface_tile_offset)
615 return false;
616
617 intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y);
618
619 return tile_x != 0 || tile_y != 0;
620 }
621 #endif
622
623 /**
624 * Called by glFramebufferTexture[123]DEXT() (and other places) to
625 * prepare for rendering into texture memory. This might be called
626 * many times to choose different texture levels, cube faces, etc
627 * before intel_finish_render_texture() is ever called.
628 */
629 static void
630 intel_render_texture(struct gl_context * ctx,
631 struct gl_framebuffer *fb,
632 struct gl_renderbuffer_attachment *att)
633 {
634 struct intel_context *intel = intel_context(ctx);
635 struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
636 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
637 struct intel_texture_image *intel_image = intel_texture_image(image);
638 struct intel_mipmap_tree *mt = intel_image->mt;
639
640 (void) fb;
641
642 int layer;
643 if (att->CubeMapFace > 0) {
644 assert(att->Zoffset == 0);
645 layer = att->CubeMapFace;
646 } else {
647 layer = att->Zoffset;
648 }
649
650 if (!intel_image->mt) {
651 /* Fallback on drawing to a texture that doesn't have a miptree
652 * (has a border, width/height 0, etc.)
653 */
654 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
655 _swrast_render_texture(ctx, fb, att);
656 return;
657 }
658 else if (!irb) {
659 irb = intel_renderbuffer_wrap_miptree(intel,
660 mt,
661 att->TextureLevel,
662 layer,
663 image->TexFormat,
664 image->InternalFormat);
665
666 if (irb) {
667 /* bind the wrapper to the attachment point */
668 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
669 }
670 else {
671 /* fallback to software rendering */
672 _swrast_render_texture(ctx, fb, att);
673 return;
674 }
675 }
676
677 if (!intel_renderbuffer_update_wrapper(intel, irb,
678 mt, att->TextureLevel, layer,
679 image->TexFormat,
680 image->InternalFormat)) {
681 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
682 _swrast_render_texture(ctx, fb, att);
683 return;
684 }
685
686 DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
687 _mesa_get_format_name(image->TexFormat),
688 att->Texture->Name, image->Width, image->Height,
689 irb->Base.RefCount);
690
691 intel_image->used_as_render_target = true;
692
693 #ifndef I915
694 if (need_tile_offset_workaround(brw_context(ctx), irb)) {
695 /* Original gen4 hardware couldn't draw to a non-tile-aligned
696 * destination in a miptree unless you actually setup your
697 * renderbuffer as a miptree and used the fragile
698 * lod/array_index/etc. controls to select the image. So,
699 * instead, we just make a new single-level miptree and render
700 * into that.
701 */
702 struct intel_context *intel = intel_context(ctx);
703 struct intel_mipmap_tree *new_mt;
704 int width, height, depth;
705
706 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
707
708 new_mt = intel_miptree_create(intel, image->TexObject->Target,
709 intel_image->base.Base.TexFormat,
710 intel_image->base.Base.Level,
711 intel_image->base.Base.Level,
712 width, height, depth,
713 true);
714
715 intel_miptree_copy_teximage(intel, intel_image, new_mt);
716 intel_renderbuffer_set_draw_offset(irb);
717
718 intel_miptree_reference(&irb->mt, intel_image->mt);
719 intel_miptree_release(&new_mt);
720 }
721 #endif
722 /* update drawing region, etc */
723 intel_draw_buffer(ctx);
724 }
725
726
727 /**
728 * Called by Mesa when rendering to a texture is done.
729 */
730 static void
731 intel_finish_render_texture(struct gl_context * ctx,
732 struct gl_renderbuffer_attachment *att)
733 {
734 struct intel_context *intel = intel_context(ctx);
735 struct gl_texture_object *tex_obj = att->Texture;
736 struct gl_texture_image *image =
737 tex_obj->Image[att->CubeMapFace][att->TextureLevel];
738 struct intel_texture_image *intel_image = intel_texture_image(image);
739
740 DBG("Finish render %s texture tex=%u\n",
741 _mesa_get_format_name(image->TexFormat), att->Texture->Name);
742
743 /* Flag that this image may now be validated into the object's miptree. */
744 if (intel_image)
745 intel_image->used_as_render_target = false;
746
747 /* Since we've (probably) rendered to the texture and will (likely) use
748 * it in the texture domain later on in this batchbuffer, flush the
749 * batch. Once again, we wish for a domain tracker in libdrm to cover
750 * usage inside of a batchbuffer like GEM does in the kernel.
751 */
752 intel_batchbuffer_emit_mi_flush(intel);
753 }
754
755 /**
756 * Do additional "completeness" testing of a framebuffer object.
757 */
758 static void
759 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
760 {
761 struct intel_context *intel = intel_context(ctx);
762 const struct intel_renderbuffer *depthRb =
763 intel_get_renderbuffer(fb, BUFFER_DEPTH);
764 const struct intel_renderbuffer *stencilRb =
765 intel_get_renderbuffer(fb, BUFFER_STENCIL);
766 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
767 int i;
768
769 if (depthRb)
770 depth_mt = depthRb->mt;
771 if (stencilRb) {
772 stencil_mt = stencilRb->mt;
773 if (stencil_mt->stencil_mt)
774 stencil_mt = stencil_mt->stencil_mt;
775 }
776
777 if (depth_mt && stencil_mt) {
778 if (depth_mt == stencil_mt) {
779 /* For true packed depth/stencil (not faked on prefers-separate-stencil
780 * hardware) we need to be sure they're the same level/layer, since
781 * we'll be emitting a single packet describing the packed setup.
782 */
783 if (depthRb->mt_level != stencilRb->mt_level ||
784 depthRb->mt_layer != stencilRb->mt_layer) {
785 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
786 }
787 } else {
788 if (!intel->has_separate_stencil)
789 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
790 if (stencil_mt->format != MESA_FORMAT_S8)
791 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
792 }
793 }
794
795 for (i = 0; i < Elements(fb->Attachment); i++) {
796 struct gl_renderbuffer *rb;
797 struct intel_renderbuffer *irb;
798
799 if (fb->Attachment[i].Type == GL_NONE)
800 continue;
801
802 /* A supported attachment will have a Renderbuffer set either
803 * from being a Renderbuffer or being a texture that got the
804 * intel_wrap_texture() treatment.
805 */
806 rb = fb->Attachment[i].Renderbuffer;
807 if (rb == NULL) {
808 DBG("attachment without renderbuffer\n");
809 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
810 continue;
811 }
812
813 irb = intel_renderbuffer(rb);
814 if (irb == NULL) {
815 DBG("software rendering renderbuffer\n");
816 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
817 continue;
818 }
819
820 if (!intel->vtbl.render_target_supported(intel, irb->Base.Format)) {
821 DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
822 _mesa_get_format_name(irb->Base.Format));
823 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
824 }
825
826 #ifdef I915
827 if (!intel_span_supports_format(irb->Base.Format)) {
828 DBG("Unsupported swrast texture/renderbuffer format attached: %s\n",
829 _mesa_get_format_name(irb->Base.Format));
830 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
831 }
832 #endif
833 }
834 }
835
836 /**
837 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
838 * We can do this when the dst renderbuffer is actually a texture and
839 * there is no scaling, mirroring or scissoring.
840 *
841 * \return new buffer mask indicating the buffers left to blit using the
842 * normal path.
843 */
844 static GLbitfield
845 intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
846 GLint srcX0, GLint srcY0,
847 GLint srcX1, GLint srcY1,
848 GLint dstX0, GLint dstY0,
849 GLint dstX1, GLint dstY1,
850 GLbitfield mask, GLenum filter)
851 {
852 if (mask & GL_COLOR_BUFFER_BIT) {
853 const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
854 const struct gl_framebuffer *readFb = ctx->ReadBuffer;
855 const struct gl_renderbuffer_attachment *drawAtt =
856 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
857
858 /* If the source and destination are the same size with no
859 mirroring, the rectangles are within the size of the
860 texture and there is no scissor then we can use
861 glCopyTexSubimage2D to implement the blit. This will end
862 up as a fast hardware blit on some drivers */
863 if (drawAtt && drawAtt->Texture &&
864 srcX0 - srcX1 == dstX0 - dstX1 &&
865 srcY0 - srcY1 == dstY0 - dstY1 &&
866 srcX1 >= srcX0 &&
867 srcY1 >= srcY0 &&
868 srcX0 >= 0 && srcX1 <= readFb->Width &&
869 srcY0 >= 0 && srcY1 <= readFb->Height &&
870 dstX0 >= 0 && dstX1 <= drawFb->Width &&
871 dstY0 >= 0 && dstY1 <= drawFb->Height &&
872 !ctx->Scissor.Enabled) {
873 const struct gl_texture_object *texObj = drawAtt->Texture;
874 const GLuint dstLevel = drawAtt->TextureLevel;
875 const GLenum target = texObj->Target;
876
877 struct gl_texture_image *texImage =
878 _mesa_select_tex_image(ctx, texObj, target, dstLevel);
879
880 if (intel_copy_texsubimage(intel_context(ctx),
881 intel_texture_image(texImage),
882 dstX0, dstY0,
883 srcX0, srcY0,
884 srcX1 - srcX0, /* width */
885 srcY1 - srcY0))
886 mask &= ~GL_COLOR_BUFFER_BIT;
887 }
888 }
889
890 return mask;
891 }
892
893 static void
894 intel_blit_framebuffer(struct gl_context *ctx,
895 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
896 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
897 GLbitfield mask, GLenum filter)
898 {
899 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
900 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
901 srcX0, srcY0, srcX1, srcY1,
902 dstX0, dstY0, dstX1, dstY1,
903 mask, filter);
904 if (mask == 0x0)
905 return;
906
907 _mesa_meta_BlitFramebuffer(ctx,
908 srcX0, srcY0, srcX1, srcY1,
909 dstX0, dstY0, dstX1, dstY1,
910 mask, filter);
911 }
912
913 void
914 intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
915 {
916 if (irb->mt) {
917 intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
918 irb->mt_level,
919 irb->mt_layer);
920 }
921 }
922
923 void
924 intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
925 {
926 if (irb->mt) {
927 intel_miptree_slice_set_needs_depth_resolve(irb->mt,
928 irb->mt_level,
929 irb->mt_layer);
930 }
931 }
932
933 bool
934 intel_renderbuffer_resolve_hiz(struct intel_context *intel,
935 struct intel_renderbuffer *irb)
936 {
937 if (irb->mt)
938 return intel_miptree_slice_resolve_hiz(intel,
939 irb->mt,
940 irb->mt_level,
941 irb->mt_layer);
942
943 return false;
944 }
945
946 bool
947 intel_renderbuffer_resolve_depth(struct intel_context *intel,
948 struct intel_renderbuffer *irb)
949 {
950 if (irb->mt)
951 return intel_miptree_slice_resolve_depth(intel,
952 irb->mt,
953 irb->mt_level,
954 irb->mt_layer);
955
956 return false;
957 }
958
959 /**
960 * Do one-time context initializations related to GL_EXT_framebuffer_object.
961 * Hook in device driver functions.
962 */
963 void
964 intel_fbo_init(struct intel_context *intel)
965 {
966 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
967 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
968 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
969 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
970 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
971 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
972 intel->ctx.Driver.RenderTexture = intel_render_texture;
973 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
974 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
975 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
976 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
977
978 #if FEATURE_OES_EGL_image
979 intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
980 intel_image_target_renderbuffer_storage;
981 #endif
982 }