i965/gen7: Move SOL stage disable to gen7_sol_state.c
[mesa.git] / src / mesa / drivers / dri / intel / intel_fbo.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/enums.h"
30 #include "main/imports.h"
31 #include "main/macros.h"
32 #include "main/mfeatures.h"
33 #include "main/mtypes.h"
34 #include "main/fbobject.h"
35 #include "main/framebuffer.h"
36 #include "main/renderbuffer.h"
37 #include "main/context.h"
38 #include "main/teximage.h"
39 #include "main/image.h"
40
41 #include "swrast/swrast.h"
42 #include "drivers/common/meta.h"
43
44 #include "intel_context.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_buffers.h"
47 #include "intel_blit.h"
48 #include "intel_fbo.h"
49 #include "intel_mipmap_tree.h"
50 #include "intel_regions.h"
51 #include "intel_tex.h"
52 #include "intel_span.h"
53 #ifndef I915
54 #include "brw_context.h"
55 #endif
56
57 #define FILE_DEBUG_FLAG DEBUG_FBO
58
59 static struct gl_renderbuffer *
60 intel_new_renderbuffer(struct gl_context * ctx, GLuint name);
61
62 static bool
63 intel_renderbuffer_update_wrapper(struct intel_context *intel,
64 struct intel_renderbuffer *irb,
65 struct intel_mipmap_tree *mt,
66 uint32_t level,
67 uint32_t layer,
68 gl_format format,
69 GLenum internal_format);
70
71 bool
72 intel_framebuffer_has_hiz(struct gl_framebuffer *fb)
73 {
74 struct intel_renderbuffer *rb = NULL;
75 if (fb)
76 rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
77 return rb && rb->mt && rb->mt->hiz_mt;
78 }
79
80 struct intel_region*
81 intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
82 {
83 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
84 if (irb && irb->mt) {
85 if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt)
86 return irb->mt->stencil_mt->region;
87 else
88 return irb->mt->region;
89 } else
90 return NULL;
91 }
92
93 /**
94 * Create a new framebuffer object.
95 */
96 static struct gl_framebuffer *
97 intel_new_framebuffer(struct gl_context * ctx, GLuint name)
98 {
99 /* Only drawable state in intel_framebuffer at this time, just use Mesa's
100 * class
101 */
102 return _mesa_new_framebuffer(ctx, name);
103 }
104
105
106 /** Called by gl_renderbuffer::Delete() */
107 static void
108 intel_delete_renderbuffer(struct gl_renderbuffer *rb)
109 {
110 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
111
112 ASSERT(irb);
113
114 intel_miptree_release(&irb->mt);
115
116 free(irb);
117 }
118
119 /**
120 * \see dd_function_table::MapRenderbuffer
121 */
122 static void
123 intel_map_renderbuffer(struct gl_context *ctx,
124 struct gl_renderbuffer *rb,
125 GLuint x, GLuint y, GLuint w, GLuint h,
126 GLbitfield mode,
127 GLubyte **out_map,
128 GLint *out_stride)
129 {
130 struct intel_context *intel = intel_context(ctx);
131 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
132 void *map;
133 int stride;
134
135 /* We sometimes get called with this by our intel_span.c usage. */
136 if (!irb->mt) {
137 *out_map = NULL;
138 *out_stride = 0;
139 return;
140 }
141
142 /* For a window-system renderbuffer, we need to flip the mapping we receive
143 * upside-down. So we need to ask for a rectangle on flipped vertically, and
144 * we then return a pointer to the bottom of it with a negative stride.
145 */
146 if (rb->Name == 0) {
147 y = rb->Height - y - h;
148 }
149
150 intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
151 x, y, w, h, mode, &map, &stride);
152
153 if (rb->Name == 0) {
154 map += (h - 1) * stride;
155 stride = -stride;
156 }
157
158 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
159 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
160 x, y, w, h, map, stride);
161
162 *out_map = map;
163 *out_stride = stride;
164 }
165
166 /**
167 * \see dd_function_table::UnmapRenderbuffer
168 */
169 static void
170 intel_unmap_renderbuffer(struct gl_context *ctx,
171 struct gl_renderbuffer *rb)
172 {
173 struct intel_context *intel = intel_context(ctx);
174 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
175
176 DBG("%s: rb %d (%s)\n", __FUNCTION__,
177 rb->Name, _mesa_get_format_name(rb->Format));
178
179 intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
180 }
181
182 /**
183 * Return a pointer to a specific pixel in a renderbuffer.
184 */
185 static void *
186 intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb,
187 GLint x, GLint y)
188 {
189 /* By returning NULL we force all software rendering to go through
190 * the span routines.
191 */
192 return NULL;
193 }
194
195
196 /**
197 * Called via glRenderbufferStorageEXT() to set the format and allocate
198 * storage for a user-created renderbuffer.
199 */
200 GLboolean
201 intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
202 GLenum internalFormat,
203 GLuint width, GLuint height)
204 {
205 struct intel_context *intel = intel_context(ctx);
206 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
207
208 ASSERT(rb->Name != 0);
209
210 switch (internalFormat) {
211 default:
212 /* Use the same format-choice logic as for textures.
213 * Renderbuffers aren't any different from textures for us,
214 * except they're less useful because you can't texture with
215 * them.
216 */
217 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
218 GL_NONE, GL_NONE);
219 break;
220 case GL_STENCIL_INDEX:
221 case GL_STENCIL_INDEX1_EXT:
222 case GL_STENCIL_INDEX4_EXT:
223 case GL_STENCIL_INDEX8_EXT:
224 case GL_STENCIL_INDEX16_EXT:
225 /* These aren't actual texture formats, so force them here. */
226 if (intel->has_separate_stencil) {
227 rb->Format = MESA_FORMAT_S8;
228 } else {
229 assert(!intel->must_use_separate_stencil);
230 rb->Format = MESA_FORMAT_S8_Z24;
231 }
232 break;
233 }
234
235 rb->Width = width;
236 rb->Height = height;
237 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
238 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
239
240 intel_flush(ctx);
241
242 intel_miptree_release(&irb->mt);
243
244 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
245 _mesa_lookup_enum_by_nr(internalFormat),
246 _mesa_get_format_name(rb->Format), width, height);
247
248 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
249 width, height);
250 if (!irb->mt)
251 return false;
252
253 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
254 bool ok = intel_miptree_alloc_hiz(intel, irb->mt);
255 if (!ok) {
256 intel_miptree_release(&irb->mt);
257 return false;
258 }
259 }
260
261 return true;
262 }
263
264
265 #if FEATURE_OES_EGL_image
266 static void
267 intel_image_target_renderbuffer_storage(struct gl_context *ctx,
268 struct gl_renderbuffer *rb,
269 void *image_handle)
270 {
271 struct intel_context *intel = intel_context(ctx);
272 struct intel_renderbuffer *irb;
273 __DRIscreen *screen;
274 __DRIimage *image;
275
276 screen = intel->intelScreen->driScrnPriv;
277 image = screen->dri2.image->lookupEGLImage(screen, image_handle,
278 screen->loaderPrivate);
279 if (image == NULL)
280 return;
281
282 /* __DRIimage is opaque to the core so it has to be checked here */
283 switch (image->format) {
284 case MESA_FORMAT_RGBA8888_REV:
285 _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
286 "glEGLImageTargetRenderbufferStorage(unsupported image format");
287 return;
288 break;
289 default:
290 break;
291 }
292
293 irb = intel_renderbuffer(rb);
294 intel_miptree_release(&irb->mt);
295 irb->mt = intel_miptree_create_for_region(intel,
296 GL_TEXTURE_2D,
297 image->format,
298 image->region);
299 if (!irb->mt)
300 return;
301
302 rb->InternalFormat = image->internal_format;
303 rb->Width = image->region->width;
304 rb->Height = image->region->height;
305 rb->Format = image->format;
306 rb->DataType = image->data_type;
307 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
308 image->internal_format);
309 }
310 #endif
311
312 /**
313 * Called for each hardware renderbuffer when a _window_ is resized.
314 * Just update fields.
315 * Not used for user-created renderbuffers!
316 */
317 static GLboolean
318 intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
319 GLenum internalFormat, GLuint width, GLuint height)
320 {
321 ASSERT(rb->Name == 0);
322 rb->Width = width;
323 rb->Height = height;
324 rb->InternalFormat = internalFormat;
325
326 return true;
327 }
328
329
330 static void
331 intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
332 GLuint width, GLuint height)
333 {
334 int i;
335
336 _mesa_resize_framebuffer(ctx, fb, width, height);
337
338 fb->Initialized = true; /* XXX remove someday */
339
340 if (fb->Name != 0) {
341 return;
342 }
343
344
345 /* Make sure all window system renderbuffers are up to date */
346 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
347 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
348
349 /* only resize if size is changing */
350 if (rb && (rb->Width != width || rb->Height != height)) {
351 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
352 }
353 }
354 }
355
356
357 /** Dummy function for gl_renderbuffer::AllocStorage() */
358 static GLboolean
359 intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
360 GLenum internalFormat, GLuint width, GLuint height)
361 {
362 _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
363 return false;
364 }
365
366 /**
367 * Create a new intel_renderbuffer which corresponds to an on-screen window,
368 * not a user-created renderbuffer.
369 */
370 struct intel_renderbuffer *
371 intel_create_renderbuffer(gl_format format)
372 {
373 GET_CURRENT_CONTEXT(ctx);
374
375 struct intel_renderbuffer *irb;
376
377 irb = CALLOC_STRUCT(intel_renderbuffer);
378 if (!irb) {
379 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
380 return NULL;
381 }
382
383 _mesa_init_renderbuffer(&irb->Base, 0);
384 irb->Base.ClassID = INTEL_RB_CLASS;
385 irb->Base._BaseFormat = _mesa_get_format_base_format(format);
386 irb->Base.Format = format;
387 irb->Base.InternalFormat = irb->Base._BaseFormat;
388 irb->Base.DataType = intel_mesa_format_to_rb_datatype(format);
389
390 /* intel-specific methods */
391 irb->Base.Delete = intel_delete_renderbuffer;
392 irb->Base.AllocStorage = intel_alloc_window_storage;
393 irb->Base.GetPointer = intel_get_pointer;
394
395 return irb;
396 }
397
398 /**
399 * Create a new renderbuffer object.
400 * Typically called via glBindRenderbufferEXT().
401 */
402 static struct gl_renderbuffer *
403 intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
404 {
405 /*struct intel_context *intel = intel_context(ctx); */
406 struct intel_renderbuffer *irb;
407
408 irb = CALLOC_STRUCT(intel_renderbuffer);
409 if (!irb) {
410 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
411 return NULL;
412 }
413
414 _mesa_init_renderbuffer(&irb->Base, name);
415 irb->Base.ClassID = INTEL_RB_CLASS;
416
417 /* intel-specific methods */
418 irb->Base.Delete = intel_delete_renderbuffer;
419 irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
420 irb->Base.GetPointer = intel_get_pointer;
421 /* span routines set in alloc_storage function */
422
423 return &irb->Base;
424 }
425
426
427 /**
428 * Called via glBindFramebufferEXT().
429 */
430 static void
431 intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
432 struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
433 {
434 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
435 intel_draw_buffer(ctx);
436 }
437 else {
438 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
439 }
440 }
441
442
443 /**
444 * Called via glFramebufferRenderbufferEXT().
445 */
446 static void
447 intel_framebuffer_renderbuffer(struct gl_context * ctx,
448 struct gl_framebuffer *fb,
449 GLenum attachment, struct gl_renderbuffer *rb)
450 {
451 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
452
453 intel_flush(ctx);
454
455 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
456 intel_draw_buffer(ctx);
457 }
458
459 static struct intel_renderbuffer*
460 intel_renderbuffer_wrap_miptree(struct intel_context *intel,
461 struct intel_mipmap_tree *mt,
462 uint32_t level,
463 uint32_t layer,
464 gl_format format,
465 GLenum internal_format);
466
467 /**
468 * \par Special case for separate stencil
469 *
470 * When wrapping a depthstencil texture that uses separate stencil, this
471 * function is recursively called twice: once to create \c
472 * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the
473 * call to create \c irb->wrapped_depth, the \c format and \c
474 * internal_format parameters do not match \c mt->format. In that case, \c
475 * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
476 * MESA_FORMAT_X8_Z24.
477 *
478 * @return true on success
479 */
480 static bool
481 intel_renderbuffer_update_wrapper(struct intel_context *intel,
482 struct intel_renderbuffer *irb,
483 struct intel_mipmap_tree *mt,
484 uint32_t level,
485 uint32_t layer,
486 gl_format format,
487 GLenum internal_format)
488 {
489 struct gl_renderbuffer *rb = &irb->Base;
490
491 rb->Format = format;
492 rb->InternalFormat = internal_format;
493 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
494 rb->_BaseFormat = _mesa_get_format_base_format(rb->Format);
495 rb->Width = mt->level[level].width;
496 rb->Height = mt->level[level].height;
497
498 irb->Base.Delete = intel_delete_renderbuffer;
499 irb->Base.AllocStorage = intel_nop_alloc_storage;
500
501 intel_miptree_check_level_layer(mt, level, layer);
502 irb->mt_level = level;
503 irb->mt_layer = layer;
504
505 intel_miptree_reference(&irb->mt, mt);
506
507 intel_renderbuffer_set_draw_offset(irb);
508
509 if (mt->hiz_mt == NULL &&
510 intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
511 intel_miptree_alloc_hiz(intel, mt);
512 if (!mt->hiz_mt)
513 return false;
514 }
515
516 return true;
517 }
518
519 /**
520 * \brief Wrap a renderbuffer around a single slice of a miptree.
521 *
522 * Called by glFramebufferTexture*(). This just allocates a
523 * ``struct intel_renderbuffer`` then calls
524 * intel_renderbuffer_update_wrapper() to do the real work.
525 *
526 * \see intel_renderbuffer_update_wrapper()
527 */
528 static struct intel_renderbuffer*
529 intel_renderbuffer_wrap_miptree(struct intel_context *intel,
530 struct intel_mipmap_tree *mt,
531 uint32_t level,
532 uint32_t layer,
533 gl_format format,
534 GLenum internal_format)
535
536 {
537 struct gl_context *ctx = &intel->ctx;
538 struct gl_renderbuffer *rb;
539 struct intel_renderbuffer *irb;
540
541 intel_miptree_check_level_layer(mt, level, layer);
542
543 rb = intel_new_renderbuffer(ctx, ~0);
544 irb = intel_renderbuffer(rb);
545 if (!irb)
546 return NULL;
547
548 if (!intel_renderbuffer_update_wrapper(intel, irb,
549 mt, level, layer,
550 format, internal_format)) {
551 free(irb);
552 return NULL;
553 }
554
555 return irb;
556 }
557
558 void
559 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
560 {
561 unsigned int dst_x, dst_y;
562
563 /* compute offset of the particular 2D image within the texture region */
564 intel_miptree_get_image_offset(irb->mt,
565 irb->mt_level,
566 0, /* face, which we ignore */
567 irb->mt_layer,
568 &dst_x, &dst_y);
569
570 irb->draw_x = dst_x;
571 irb->draw_y = dst_y;
572 }
573
574 /**
575 * Rendering to tiled buffers requires that the base address of the
576 * buffer be aligned to a page boundary. We generally render to
577 * textures by pointing the surface at the mipmap image level, which
578 * may not be aligned to a tile boundary.
579 *
580 * This function returns an appropriately-aligned base offset
581 * according to the tiling restrictions, plus any required x/y offset
582 * from there.
583 */
584 uint32_t
585 intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
586 uint32_t *tile_x,
587 uint32_t *tile_y)
588 {
589 struct intel_region *region = irb->mt->region;
590 int cpp = region->cpp;
591 uint32_t pitch = region->pitch * cpp;
592
593 if (region->tiling == I915_TILING_NONE) {
594 *tile_x = 0;
595 *tile_y = 0;
596 return irb->draw_x * cpp + irb->draw_y * pitch;
597 } else if (region->tiling == I915_TILING_X) {
598 *tile_x = irb->draw_x % (512 / cpp);
599 *tile_y = irb->draw_y % 8;
600 return ((irb->draw_y / 8) * (8 * pitch) +
601 (irb->draw_x - *tile_x) / (512 / cpp) * 4096);
602 } else {
603 assert(region->tiling == I915_TILING_Y);
604 *tile_x = irb->draw_x % (128 / cpp);
605 *tile_y = irb->draw_y % 32;
606 return ((irb->draw_y / 32) * (32 * pitch) +
607 (irb->draw_x - *tile_x) / (128 / cpp) * 4096);
608 }
609 }
610
611 #ifndef I915
612 static bool
613 need_tile_offset_workaround(struct brw_context *brw,
614 struct intel_renderbuffer *irb)
615 {
616 uint32_t tile_x, tile_y;
617
618 if (brw->has_surface_tile_offset)
619 return false;
620
621 intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y);
622
623 return tile_x != 0 || tile_y != 0;
624 }
625 #endif
626
627 /**
628 * Called by glFramebufferTexture[123]DEXT() (and other places) to
629 * prepare for rendering into texture memory. This might be called
630 * many times to choose different texture levels, cube faces, etc
631 * before intel_finish_render_texture() is ever called.
632 */
633 static void
634 intel_render_texture(struct gl_context * ctx,
635 struct gl_framebuffer *fb,
636 struct gl_renderbuffer_attachment *att)
637 {
638 struct intel_context *intel = intel_context(ctx);
639 struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
640 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
641 struct intel_texture_image *intel_image = intel_texture_image(image);
642 struct intel_mipmap_tree *mt = intel_image->mt;
643
644 (void) fb;
645
646 int layer;
647 if (att->CubeMapFace > 0) {
648 assert(att->Zoffset == 0);
649 layer = att->CubeMapFace;
650 } else {
651 layer = att->Zoffset;
652 }
653
654 if (!intel_image->mt) {
655 /* Fallback on drawing to a texture that doesn't have a miptree
656 * (has a border, width/height 0, etc.)
657 */
658 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
659 _swrast_render_texture(ctx, fb, att);
660 return;
661 }
662 else if (!irb) {
663 irb = intel_renderbuffer_wrap_miptree(intel,
664 mt,
665 att->TextureLevel,
666 layer,
667 image->TexFormat,
668 image->InternalFormat);
669
670 if (irb) {
671 /* bind the wrapper to the attachment point */
672 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
673 }
674 else {
675 /* fallback to software rendering */
676 _swrast_render_texture(ctx, fb, att);
677 return;
678 }
679 }
680
681 if (!intel_renderbuffer_update_wrapper(intel, irb,
682 mt, att->TextureLevel, layer,
683 image->TexFormat,
684 image->InternalFormat)) {
685 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
686 _swrast_render_texture(ctx, fb, att);
687 return;
688 }
689
690 DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
691 _mesa_get_format_name(image->TexFormat),
692 att->Texture->Name, image->Width, image->Height,
693 irb->Base.RefCount);
694
695 intel_image->used_as_render_target = true;
696
697 #ifndef I915
698 if (need_tile_offset_workaround(brw_context(ctx), irb)) {
699 /* Original gen4 hardware couldn't draw to a non-tile-aligned
700 * destination in a miptree unless you actually setup your
701 * renderbuffer as a miptree and used the fragile
702 * lod/array_index/etc. controls to select the image. So,
703 * instead, we just make a new single-level miptree and render
704 * into that.
705 */
706 struct intel_context *intel = intel_context(ctx);
707 struct intel_mipmap_tree *new_mt;
708 int width, height, depth;
709
710 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
711
712 new_mt = intel_miptree_create(intel, image->TexObject->Target,
713 intel_image->base.Base.TexFormat,
714 intel_image->base.Base.Level,
715 intel_image->base.Base.Level,
716 width, height, depth,
717 true);
718
719 intel_miptree_copy_teximage(intel, intel_image, new_mt);
720 intel_renderbuffer_set_draw_offset(irb);
721
722 intel_miptree_reference(&irb->mt, intel_image->mt);
723 intel_miptree_release(&new_mt);
724 }
725 #endif
726 /* update drawing region, etc */
727 intel_draw_buffer(ctx);
728 }
729
730
731 /**
732 * Called by Mesa when rendering to a texture is done.
733 */
734 static void
735 intel_finish_render_texture(struct gl_context * ctx,
736 struct gl_renderbuffer_attachment *att)
737 {
738 struct intel_context *intel = intel_context(ctx);
739 struct gl_texture_object *tex_obj = att->Texture;
740 struct gl_texture_image *image =
741 tex_obj->Image[att->CubeMapFace][att->TextureLevel];
742 struct intel_texture_image *intel_image = intel_texture_image(image);
743
744 DBG("Finish render %s texture tex=%u\n",
745 _mesa_get_format_name(image->TexFormat), att->Texture->Name);
746
747 /* Flag that this image may now be validated into the object's miptree. */
748 if (intel_image)
749 intel_image->used_as_render_target = false;
750
751 /* Since we've (probably) rendered to the texture and will (likely) use
752 * it in the texture domain later on in this batchbuffer, flush the
753 * batch. Once again, we wish for a domain tracker in libdrm to cover
754 * usage inside of a batchbuffer like GEM does in the kernel.
755 */
756 intel_batchbuffer_emit_mi_flush(intel);
757 }
758
759 /**
760 * Do additional "completeness" testing of a framebuffer object.
761 */
762 static void
763 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
764 {
765 struct intel_context *intel = intel_context(ctx);
766 const struct intel_renderbuffer *depthRb =
767 intel_get_renderbuffer(fb, BUFFER_DEPTH);
768 const struct intel_renderbuffer *stencilRb =
769 intel_get_renderbuffer(fb, BUFFER_STENCIL);
770 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
771 int i;
772
773 if (depthRb)
774 depth_mt = depthRb->mt;
775 if (stencilRb) {
776 stencil_mt = stencilRb->mt;
777 if (stencil_mt->stencil_mt)
778 stencil_mt = stencil_mt->stencil_mt;
779 }
780
781 if (depth_mt && stencil_mt) {
782 if (depth_mt == stencil_mt) {
783 /* For true packed depth/stencil (not faked on prefers-separate-stencil
784 * hardware) we need to be sure they're the same level/layer, since
785 * we'll be emitting a single packet describing the packed setup.
786 */
787 if (depthRb->mt_level != stencilRb->mt_level ||
788 depthRb->mt_layer != stencilRb->mt_layer) {
789 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
790 }
791 } else {
792 if (!intel->has_separate_stencil)
793 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
794 if (stencil_mt->format != MESA_FORMAT_S8)
795 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
796 }
797 }
798
799 for (i = 0; i < Elements(fb->Attachment); i++) {
800 struct gl_renderbuffer *rb;
801 struct intel_renderbuffer *irb;
802
803 if (fb->Attachment[i].Type == GL_NONE)
804 continue;
805
806 /* A supported attachment will have a Renderbuffer set either
807 * from being a Renderbuffer or being a texture that got the
808 * intel_wrap_texture() treatment.
809 */
810 rb = fb->Attachment[i].Renderbuffer;
811 if (rb == NULL) {
812 DBG("attachment without renderbuffer\n");
813 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
814 continue;
815 }
816
817 irb = intel_renderbuffer(rb);
818 if (irb == NULL) {
819 DBG("software rendering renderbuffer\n");
820 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
821 continue;
822 }
823
824 if (!intel->vtbl.render_target_supported(intel, irb->Base.Format)) {
825 DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
826 _mesa_get_format_name(irb->Base.Format));
827 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
828 }
829
830 #ifdef I915
831 if (!intel_span_supports_format(irb->Base.Format)) {
832 DBG("Unsupported swrast texture/renderbuffer format attached: %s\n",
833 _mesa_get_format_name(irb->Base.Format));
834 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
835 }
836 #endif
837 }
838 }
839
840 /**
841 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
842 * We can do this when the dst renderbuffer is actually a texture and
843 * there is no scaling, mirroring or scissoring.
844 *
845 * \return new buffer mask indicating the buffers left to blit using the
846 * normal path.
847 */
848 static GLbitfield
849 intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
850 GLint srcX0, GLint srcY0,
851 GLint srcX1, GLint srcY1,
852 GLint dstX0, GLint dstY0,
853 GLint dstX1, GLint dstY1,
854 GLbitfield mask, GLenum filter)
855 {
856 if (mask & GL_COLOR_BUFFER_BIT) {
857 const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
858 const struct gl_framebuffer *readFb = ctx->ReadBuffer;
859 const struct gl_renderbuffer_attachment *drawAtt =
860 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
861
862 /* If the source and destination are the same size with no
863 mirroring, the rectangles are within the size of the
864 texture and there is no scissor then we can use
865 glCopyTexSubimage2D to implement the blit. This will end
866 up as a fast hardware blit on some drivers */
867 if (drawAtt && drawAtt->Texture &&
868 srcX0 - srcX1 == dstX0 - dstX1 &&
869 srcY0 - srcY1 == dstY0 - dstY1 &&
870 srcX1 >= srcX0 &&
871 srcY1 >= srcY0 &&
872 srcX0 >= 0 && srcX1 <= readFb->Width &&
873 srcY0 >= 0 && srcY1 <= readFb->Height &&
874 dstX0 >= 0 && dstX1 <= drawFb->Width &&
875 dstY0 >= 0 && dstY1 <= drawFb->Height &&
876 !ctx->Scissor.Enabled) {
877 const struct gl_texture_object *texObj = drawAtt->Texture;
878 const GLuint dstLevel = drawAtt->TextureLevel;
879 const GLenum target = texObj->Target;
880
881 struct gl_texture_image *texImage =
882 _mesa_select_tex_image(ctx, texObj, target, dstLevel);
883
884 if (intel_copy_texsubimage(intel_context(ctx),
885 intel_texture_image(texImage),
886 dstX0, dstY0,
887 srcX0, srcY0,
888 srcX1 - srcX0, /* width */
889 srcY1 - srcY0))
890 mask &= ~GL_COLOR_BUFFER_BIT;
891 }
892 }
893
894 return mask;
895 }
896
897 static void
898 intel_blit_framebuffer(struct gl_context *ctx,
899 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
900 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
901 GLbitfield mask, GLenum filter)
902 {
903 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
904 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
905 srcX0, srcY0, srcX1, srcY1,
906 dstX0, dstY0, dstX1, dstY1,
907 mask, filter);
908 if (mask == 0x0)
909 return;
910
911 _mesa_meta_BlitFramebuffer(ctx,
912 srcX0, srcY0, srcX1, srcY1,
913 dstX0, dstY0, dstX1, dstY1,
914 mask, filter);
915 }
916
917 void
918 intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
919 {
920 if (irb->mt) {
921 intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
922 irb->mt_level,
923 irb->mt_layer);
924 }
925 }
926
927 void
928 intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
929 {
930 if (irb->mt) {
931 intel_miptree_slice_set_needs_depth_resolve(irb->mt,
932 irb->mt_level,
933 irb->mt_layer);
934 }
935 }
936
937 bool
938 intel_renderbuffer_resolve_hiz(struct intel_context *intel,
939 struct intel_renderbuffer *irb)
940 {
941 if (irb->mt)
942 return intel_miptree_slice_resolve_hiz(intel,
943 irb->mt,
944 irb->mt_level,
945 irb->mt_layer);
946
947 return false;
948 }
949
950 bool
951 intel_renderbuffer_resolve_depth(struct intel_context *intel,
952 struct intel_renderbuffer *irb)
953 {
954 if (irb->mt)
955 return intel_miptree_slice_resolve_depth(intel,
956 irb->mt,
957 irb->mt_level,
958 irb->mt_layer);
959
960 return false;
961 }
962
963 /**
964 * Do one-time context initializations related to GL_EXT_framebuffer_object.
965 * Hook in device driver functions.
966 */
967 void
968 intel_fbo_init(struct intel_context *intel)
969 {
970 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
971 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
972 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
973 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
974 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
975 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
976 intel->ctx.Driver.RenderTexture = intel_render_texture;
977 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
978 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
979 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
980 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
981
982 #if FEATURE_OES_EGL_image
983 intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
984 intel_image_target_renderbuffer_storage;
985 #endif
986 }