i965: Expose GLSL 1.30 on gen6+.
[mesa.git] / src / mesa / drivers / dri / intel / intel_fbo.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/enums.h"
30 #include "main/imports.h"
31 #include "main/macros.h"
32 #include "main/mfeatures.h"
33 #include "main/mtypes.h"
34 #include "main/fbobject.h"
35 #include "main/framebuffer.h"
36 #include "main/renderbuffer.h"
37 #include "main/context.h"
38 #include "main/teximage.h"
39 #include "swrast/swrast.h"
40 #include "drivers/common/meta.h"
41
42 #include "intel_context.h"
43 #include "intel_batchbuffer.h"
44 #include "intel_buffers.h"
45 #include "intel_blit.h"
46 #include "intel_fbo.h"
47 #include "intel_mipmap_tree.h"
48 #include "intel_regions.h"
49 #include "intel_tex.h"
50 #include "intel_span.h"
51 #ifndef I915
52 #include "brw_context.h"
53 #endif
54
55 #define FILE_DEBUG_FLAG DEBUG_FBO
56
57
58 /**
59 * Create a new framebuffer object.
60 */
61 static struct gl_framebuffer *
62 intel_new_framebuffer(struct gl_context * ctx, GLuint name)
63 {
64 /* Only drawable state in intel_framebuffer at this time, just use Mesa's
65 * class
66 */
67 return _mesa_new_framebuffer(ctx, name);
68 }
69
70
71 /** Called by gl_renderbuffer::Delete() */
72 static void
73 intel_delete_renderbuffer(struct gl_renderbuffer *rb)
74 {
75 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
76
77 ASSERT(irb);
78
79 intel_region_release(&irb->region);
80 intel_region_release(&irb->hiz_region);
81
82 _mesa_reference_renderbuffer(&irb->wrapped_depth, NULL);
83 _mesa_reference_renderbuffer(&irb->wrapped_stencil, NULL);
84
85 free(irb);
86 }
87
88 static void
89 intel_map_renderbuffer(struct gl_context *ctx,
90 struct gl_renderbuffer *rb,
91 GLuint x, GLuint y, GLuint w, GLuint h,
92 GLbitfield mode,
93 GLubyte **out_map,
94 GLint *out_stride)
95 {
96 struct intel_context *intel = intel_context(ctx);
97 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
98 GLubyte *map;
99 int stride, flip_stride;
100
101 /* We sometimes get called with this by our intel_span.c usage. */
102 if (!irb->region) {
103 *out_map = NULL;
104 *out_stride = 0;
105 return;
106 }
107
108 irb->map_mode = mode;
109 irb->map_x = x;
110 irb->map_y = y;
111 irb->map_w = w;
112 irb->map_h = h;
113
114 stride = irb->region->pitch * irb->region->cpp;
115
116 if (rb->Format == MESA_FORMAT_S8) {
117 GLuint pix_x, pix_y;
118 uint8_t *tiled_s8_map, *untiled_s8_map;
119
120 /* Flip the Y axis for the default framebuffer. */
121 int y_flip = (rb->Name == 0) ? -1 : 1;
122 int y_bias = (rb->Name == 0) ? (2 * irb->region->height - 1) : 0;
123
124 /* Perform W-tile deswizzling for stencil buffers into a temporary. */
125 stride = w;
126 irb->map_buffer = malloc(stride * h);
127 untiled_s8_map = irb->map_buffer;
128
129 tiled_s8_map = intel_region_map(intel, irb->region, mode);
130
131 for (pix_y = 0; pix_y < h; pix_y++) {
132 for (pix_x = 0; pix_x < w; pix_x++) {
133 GLuint flipped_y = y_flip * (y + pix_y) + y_bias;
134 intptr_t offset = intel_offset_S8(irb->region->pitch,
135 x + pix_x,
136 flipped_y);
137
138 untiled_s8_map[pix_y * stride + pix_x] = tiled_s8_map[offset];
139 }
140 }
141 *out_map = untiled_s8_map;
142 *out_stride = stride;
143
144 DBG("%s: rb %d (%s) s8 detiled mapped: (%d, %d) (%dx%d) -> %p/%d\n",
145 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
146 x, y, w, h, *out_map, *out_stride);
147
148 return;
149 } else if (intel->gen >= 6 &&
150 !(mode & GL_MAP_WRITE_BIT) &&
151 irb->region->tiling == I915_TILING_X) {
152 int dst_stride = ALIGN(w * irb->region->cpp, 4);
153 int src_x, src_y;
154
155 /* On gen6+, we have LLC sharing, which means we can get high-performance
156 * access to linear-mapped buffers. So, blit out a tiled buffer (if
157 * possible, which it isn't really for Y tiling) to a temporary BO and
158 * return a map of that.
159 */
160
161 if (rb->Name) {
162 src_x = x + irb->draw_x;
163 src_y = y + irb->draw_y;
164 } else {
165 src_x = x;
166 src_y = irb->region->height - y - h;
167 }
168
169 irb->map_bo = drm_intel_bo_alloc(intel->bufmgr, "MapRenderbuffer() temp",
170 dst_stride * h, 4096);
171
172 /* We don't do the flip in the blit, because it's always so tricky to get
173 * right.
174 */
175 if (irb->map_bo &&
176 intelEmitCopyBlit(intel,
177 irb->region->cpp,
178 irb->region->pitch, irb->region->bo,
179 0, irb->region->tiling,
180 dst_stride / irb->region->cpp, irb->map_bo,
181 0, I915_TILING_NONE,
182 src_x, src_y,
183 0, 0,
184 w, h,
185 GL_COPY)) {
186 intel_batchbuffer_flush(intel);
187 drm_intel_bo_map(irb->map_bo, false);
188
189 if (rb->Name) {
190 *out_map = irb->map_bo->virtual;
191 *out_stride = dst_stride;
192 } else {
193 *out_map = irb->map_bo->virtual + (h - 1) * dst_stride;
194 *out_stride = -dst_stride;
195 }
196
197 DBG("%s: rb %d (%s) blit mapped: (%d, %d) (%dx%d) -> %p/%d\n",
198 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
199 src_x, src_y, w, h, *out_map, *out_stride);
200
201 return;
202 } else {
203 drm_intel_bo_unreference(irb->map_bo);
204 irb->map_bo = NULL;
205 }
206 }
207
208 map = intel_region_map(intel, irb->region, mode);
209
210 if (rb->Name == 0) {
211 y = irb->region->height - 1 - y;
212 flip_stride = -stride;
213 } else {
214 x += irb->draw_x;
215 y += irb->draw_y;
216 flip_stride = stride;
217 }
218
219 if (drm_intel_bo_references(intel->batch.bo, irb->region->bo)) {
220 intel_batchbuffer_flush(intel);
221 }
222
223 drm_intel_gem_bo_map_gtt(irb->region->bo);
224
225 map = irb->region->bo->virtual;
226 map += x * irb->region->cpp;
227 map += (int)y * stride;
228
229 *out_map = map;
230 *out_stride = flip_stride;
231
232 DBG("%s: rb %d (%s) gtt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
233 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
234 x, y, w, h, *out_map, *out_stride);
235 }
236
237 static void
238 intel_unmap_renderbuffer(struct gl_context *ctx,
239 struct gl_renderbuffer *rb)
240 {
241 struct intel_context *intel = intel_context(ctx);
242 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
243
244 DBG("%s: rb %d (%s)\n", __FUNCTION__,
245 rb->Name, _mesa_get_format_name(rb->Format));
246
247 if (irb->map_buffer) {
248 if (irb->map_mode & GL_MAP_WRITE_BIT) {
249 GLuint pix_x, pix_y;
250 uint8_t *tiled_s8_map = irb->map_buffer;
251 uint8_t *untiled_s8_map = irb->region->bo->virtual;
252
253 /* Flip the Y axis for the default framebuffer. */
254 int y_flip = (rb->Name == 0) ? -1 : 1;
255 int y_bias = (rb->Name == 0) ? (2 * irb->region->height - 1) : 0;
256
257 /* Perform W-tile swizzling back out of the temporary. */
258 for (pix_y = 0; pix_y < irb->map_h; pix_y++) {
259 for (pix_x = 0; pix_x < irb->map_w; pix_x++) {
260 GLuint flipped_y = y_flip * (pix_y + irb->map_y) + y_bias;
261 intptr_t offset = intel_offset_S8(irb->region->pitch,
262 pix_x + irb->map_x,
263 flipped_y);
264
265 tiled_s8_map[offset] =
266 untiled_s8_map[pix_y * irb->map_w + pix_x];
267 }
268 }
269 }
270
271 intel_region_unmap(intel, irb->region);
272 free(irb->map_buffer);
273 irb->map_buffer = NULL;
274 } else if (irb->map_bo) {
275 drm_intel_bo_unmap(irb->map_bo);
276 drm_intel_bo_unreference(irb->map_bo);
277 irb->map_bo = 0;
278 } else {
279 if (irb->region)
280 intel_region_unmap(intel, irb->region);
281 }
282 }
283
284 /**
285 * Return a pointer to a specific pixel in a renderbuffer.
286 */
287 static void *
288 intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb,
289 GLint x, GLint y)
290 {
291 /* By returning NULL we force all software rendering to go through
292 * the span routines.
293 */
294 return NULL;
295 }
296
297
298 /**
299 * Called via glRenderbufferStorageEXT() to set the format and allocate
300 * storage for a user-created renderbuffer.
301 */
302 GLboolean
303 intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
304 GLenum internalFormat,
305 GLuint width, GLuint height)
306 {
307 struct intel_context *intel = intel_context(ctx);
308 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
309 int cpp, tiling;
310
311 ASSERT(rb->Name != 0);
312
313 switch (internalFormat) {
314 default:
315 /* Use the same format-choice logic as for textures.
316 * Renderbuffers aren't any different from textures for us,
317 * except they're less useful because you can't texture with
318 * them.
319 */
320 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
321 GL_NONE, GL_NONE);
322 break;
323 case GL_STENCIL_INDEX:
324 case GL_STENCIL_INDEX1_EXT:
325 case GL_STENCIL_INDEX4_EXT:
326 case GL_STENCIL_INDEX8_EXT:
327 case GL_STENCIL_INDEX16_EXT:
328 /* These aren't actual texture formats, so force them here. */
329 if (intel->has_separate_stencil) {
330 rb->Format = MESA_FORMAT_S8;
331 } else {
332 assert(!intel->must_use_separate_stencil);
333 rb->Format = MESA_FORMAT_S8_Z24;
334 }
335 break;
336 }
337
338 rb->Width = width;
339 rb->Height = height;
340 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
341 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
342 cpp = _mesa_get_format_bytes(rb->Format);
343
344 intel_flush(ctx);
345
346 /* free old region */
347 if (irb->region) {
348 intel_region_release(&irb->region);
349 }
350 if (irb->hiz_region) {
351 intel_region_release(&irb->hiz_region);
352 }
353
354 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
355 _mesa_lookup_enum_by_nr(internalFormat),
356 _mesa_get_format_name(rb->Format), width, height);
357
358 tiling = I915_TILING_NONE;
359 if (intel->use_texture_tiling) {
360 GLenum base_format = _mesa_get_format_base_format(rb->Format);
361
362 if (intel->gen >= 4 && (base_format == GL_DEPTH_COMPONENT ||
363 base_format == GL_STENCIL_INDEX ||
364 base_format == GL_DEPTH_STENCIL))
365 tiling = I915_TILING_Y;
366 else
367 tiling = I915_TILING_X;
368 }
369
370 if (irb->Base.Format == MESA_FORMAT_S8) {
371 /*
372 * The stencil buffer is W tiled. However, we request from the kernel a
373 * non-tiled buffer because the GTT is incapable of W fencing.
374 *
375 * The stencil buffer has quirky pitch requirements. From Vol 2a,
376 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
377 * The pitch must be set to 2x the value computed based on width, as
378 * the stencil buffer is stored with two rows interleaved.
379 * To accomplish this, we resort to the nasty hack of doubling the drm
380 * region's cpp and halving its height.
381 *
382 * If we neglect to double the pitch, then render corruption occurs.
383 */
384 irb->region = intel_region_alloc(intel->intelScreen,
385 I915_TILING_NONE,
386 cpp * 2,
387 ALIGN(width, 64),
388 ALIGN((height + 1) / 2, 64),
389 true);
390 if (!irb->region)
391 return false;
392
393 } else if (irb->Base.Format == MESA_FORMAT_S8_Z24
394 && intel->must_use_separate_stencil) {
395
396 bool ok = true;
397 struct gl_renderbuffer *depth_rb;
398 struct gl_renderbuffer *stencil_rb;
399
400 depth_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
401 MESA_FORMAT_X8_Z24);
402 stencil_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
403 MESA_FORMAT_S8);
404 ok = depth_rb && stencil_rb;
405 ok = ok && intel_alloc_renderbuffer_storage(ctx, depth_rb,
406 depth_rb->InternalFormat,
407 width, height);
408 ok = ok && intel_alloc_renderbuffer_storage(ctx, stencil_rb,
409 stencil_rb->InternalFormat,
410 width, height);
411
412 if (!ok) {
413 if (depth_rb) {
414 intel_delete_renderbuffer(depth_rb);
415 }
416 if (stencil_rb) {
417 intel_delete_renderbuffer(stencil_rb);
418 }
419 return false;
420 }
421
422 depth_rb->Wrapped = rb;
423 stencil_rb->Wrapped = rb;
424 _mesa_reference_renderbuffer(&irb->wrapped_depth, depth_rb);
425 _mesa_reference_renderbuffer(&irb->wrapped_stencil, stencil_rb);
426
427 } else {
428 irb->region = intel_region_alloc(intel->intelScreen, tiling, cpp,
429 width, height, true);
430 if (!irb->region)
431 return false;
432
433 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
434 irb->hiz_region = intel_region_alloc(intel->intelScreen,
435 I915_TILING_Y,
436 irb->region->cpp,
437 irb->region->width,
438 irb->region->height,
439 true);
440 if (!irb->hiz_region) {
441 intel_region_release(&irb->region);
442 return false;
443 }
444 }
445 }
446
447 return true;
448 }
449
450
451 #if FEATURE_OES_EGL_image
452 static void
453 intel_image_target_renderbuffer_storage(struct gl_context *ctx,
454 struct gl_renderbuffer *rb,
455 void *image_handle)
456 {
457 struct intel_context *intel = intel_context(ctx);
458 struct intel_renderbuffer *irb;
459 __DRIscreen *screen;
460 __DRIimage *image;
461
462 screen = intel->intelScreen->driScrnPriv;
463 image = screen->dri2.image->lookupEGLImage(screen, image_handle,
464 screen->loaderPrivate);
465 if (image == NULL)
466 return;
467
468 /* __DRIimage is opaque to the core so it has to be checked here */
469 switch (image->format) {
470 case MESA_FORMAT_RGBA8888_REV:
471 _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
472 "glEGLImageTargetRenderbufferStorage(unsupported image format");
473 return;
474 break;
475 default:
476 break;
477 }
478
479 irb = intel_renderbuffer(rb);
480 intel_region_reference(&irb->region, image->region);
481
482 rb->InternalFormat = image->internal_format;
483 rb->Width = image->region->width;
484 rb->Height = image->region->height;
485 rb->Format = image->format;
486 rb->DataType = image->data_type;
487 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
488 image->internal_format);
489 }
490 #endif
491
492 /**
493 * Called for each hardware renderbuffer when a _window_ is resized.
494 * Just update fields.
495 * Not used for user-created renderbuffers!
496 */
497 static GLboolean
498 intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
499 GLenum internalFormat, GLuint width, GLuint height)
500 {
501 ASSERT(rb->Name == 0);
502 rb->Width = width;
503 rb->Height = height;
504 rb->InternalFormat = internalFormat;
505
506 return true;
507 }
508
509
510 static void
511 intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
512 GLuint width, GLuint height)
513 {
514 int i;
515
516 _mesa_resize_framebuffer(ctx, fb, width, height);
517
518 fb->Initialized = true; /* XXX remove someday */
519
520 if (fb->Name != 0) {
521 return;
522 }
523
524
525 /* Make sure all window system renderbuffers are up to date */
526 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
527 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
528
529 /* only resize if size is changing */
530 if (rb && (rb->Width != width || rb->Height != height)) {
531 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
532 }
533 }
534 }
535
536
537 /** Dummy function for gl_renderbuffer::AllocStorage() */
538 static GLboolean
539 intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
540 GLenum internalFormat, GLuint width, GLuint height)
541 {
542 _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
543 return false;
544 }
545
546 /**
547 * Create a new intel_renderbuffer which corresponds to an on-screen window,
548 * not a user-created renderbuffer.
549 */
550 struct intel_renderbuffer *
551 intel_create_renderbuffer(gl_format format)
552 {
553 GET_CURRENT_CONTEXT(ctx);
554
555 struct intel_renderbuffer *irb;
556
557 irb = CALLOC_STRUCT(intel_renderbuffer);
558 if (!irb) {
559 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
560 return NULL;
561 }
562
563 _mesa_init_renderbuffer(&irb->Base, 0);
564 irb->Base.ClassID = INTEL_RB_CLASS;
565 irb->Base._BaseFormat = _mesa_get_format_base_format(format);
566 irb->Base.Format = format;
567 irb->Base.InternalFormat = irb->Base._BaseFormat;
568 irb->Base.DataType = intel_mesa_format_to_rb_datatype(format);
569
570 /* intel-specific methods */
571 irb->Base.Delete = intel_delete_renderbuffer;
572 irb->Base.AllocStorage = intel_alloc_window_storage;
573 irb->Base.GetPointer = intel_get_pointer;
574
575 return irb;
576 }
577
578
579 struct gl_renderbuffer*
580 intel_create_wrapped_renderbuffer(struct gl_context * ctx,
581 int width, int height,
582 gl_format format)
583 {
584 /*
585 * The name here is irrelevant, as long as its nonzero, because the
586 * renderbuffer never gets entered into Mesa's renderbuffer hash table.
587 */
588 GLuint name = ~0;
589
590 struct intel_renderbuffer *irb = CALLOC_STRUCT(intel_renderbuffer);
591 if (!irb) {
592 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
593 return NULL;
594 }
595
596 struct gl_renderbuffer *rb = &irb->Base;
597 _mesa_init_renderbuffer(rb, name);
598 rb->ClassID = INTEL_RB_CLASS;
599 rb->_BaseFormat = _mesa_get_format_base_format(format);
600 rb->Format = format;
601 rb->InternalFormat = rb->_BaseFormat;
602 rb->DataType = intel_mesa_format_to_rb_datatype(format);
603 rb->Width = width;
604 rb->Height = height;
605
606 return rb;
607 }
608
609
610 /**
611 * Create a new renderbuffer object.
612 * Typically called via glBindRenderbufferEXT().
613 */
614 static struct gl_renderbuffer *
615 intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
616 {
617 /*struct intel_context *intel = intel_context(ctx); */
618 struct intel_renderbuffer *irb;
619
620 irb = CALLOC_STRUCT(intel_renderbuffer);
621 if (!irb) {
622 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
623 return NULL;
624 }
625
626 _mesa_init_renderbuffer(&irb->Base, name);
627 irb->Base.ClassID = INTEL_RB_CLASS;
628
629 /* intel-specific methods */
630 irb->Base.Delete = intel_delete_renderbuffer;
631 irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
632 irb->Base.GetPointer = intel_get_pointer;
633 /* span routines set in alloc_storage function */
634
635 return &irb->Base;
636 }
637
638
639 /**
640 * Called via glBindFramebufferEXT().
641 */
642 static void
643 intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
644 struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
645 {
646 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
647 intel_draw_buffer(ctx);
648 }
649 else {
650 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
651 }
652 }
653
654
655 /**
656 * Called via glFramebufferRenderbufferEXT().
657 */
658 static void
659 intel_framebuffer_renderbuffer(struct gl_context * ctx,
660 struct gl_framebuffer *fb,
661 GLenum attachment, struct gl_renderbuffer *rb)
662 {
663 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
664
665 intel_flush(ctx);
666
667 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
668 intel_draw_buffer(ctx);
669 }
670
671 static bool
672 intel_update_tex_wrapper_regions(struct intel_context *intel,
673 struct intel_renderbuffer *irb,
674 struct intel_texture_image *intel_image);
675
676 static bool
677 intel_update_wrapper(struct gl_context *ctx, struct intel_renderbuffer *irb,
678 struct gl_texture_image *texImage)
679 {
680 struct intel_context *intel = intel_context(ctx);
681 struct intel_texture_image *intel_image = intel_texture_image(texImage);
682 int width, height, depth;
683
684 if (!intel_span_supports_format(texImage->TexFormat)) {
685 DBG("Render to texture BAD FORMAT %s\n",
686 _mesa_get_format_name(texImage->TexFormat));
687 return false;
688 } else {
689 DBG("Render to texture %s\n", _mesa_get_format_name(texImage->TexFormat));
690 }
691
692 intel_miptree_get_dimensions_for_image(texImage, &width, &height, &depth);
693
694 irb->Base.Format = texImage->TexFormat;
695 irb->Base.DataType = intel_mesa_format_to_rb_datatype(texImage->TexFormat);
696 irb->Base.InternalFormat = texImage->InternalFormat;
697 irb->Base._BaseFormat = _mesa_base_tex_format(ctx, irb->Base.InternalFormat);
698 irb->Base.Width = width;
699 irb->Base.Height = height;
700
701 irb->Base.Delete = intel_delete_renderbuffer;
702 irb->Base.AllocStorage = intel_nop_alloc_storage;
703
704 if (intel_image->stencil_rb) {
705 /* The tex image has packed depth/stencil format, but is using separate
706 * stencil. */
707
708 bool ok;
709 struct intel_renderbuffer *depth_irb =
710 intel_renderbuffer(intel_image->depth_rb);
711
712 /* Update the hiz region if necessary. */
713 ok = intel_update_tex_wrapper_regions(intel, depth_irb, intel_image);
714 if (!ok) {
715 return false;
716 }
717
718 /* The tex image shares its embedded depth and stencil renderbuffers with
719 * the renderbuffer wrapper. */
720 _mesa_reference_renderbuffer(&irb->wrapped_depth,
721 intel_image->depth_rb);
722 _mesa_reference_renderbuffer(&irb->wrapped_stencil,
723 intel_image->stencil_rb);
724
725 return true;
726 } else {
727 return intel_update_tex_wrapper_regions(intel, irb, intel_image);
728 }
729 }
730
731 /**
732 * FIXME: The handling of the hiz region is broken for mipmapped depth textures
733 * FIXME: because intel_finalize_mipmap_tree is unaware of it.
734 */
735 static bool
736 intel_update_tex_wrapper_regions(struct intel_context *intel,
737 struct intel_renderbuffer *irb,
738 struct intel_texture_image *intel_image)
739 {
740 struct gl_renderbuffer *rb = &irb->Base;
741
742 /* Point the renderbuffer's region to the texture's region. */
743 if (irb->region != intel_image->mt->region) {
744 intel_region_reference(&irb->region, intel_image->mt->region);
745 }
746
747 /* Allocate the texture's hiz region if necessary. */
748 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)
749 && !intel_image->mt->hiz_region) {
750 intel_image->mt->hiz_region =
751 intel_region_alloc(intel->intelScreen,
752 I915_TILING_Y,
753 _mesa_get_format_bytes(rb->Format),
754 rb->Width,
755 rb->Height,
756 true);
757 if (!intel_image->mt->hiz_region)
758 return false;
759 }
760
761 /* Point the renderbuffer's hiz region to the texture's hiz region. */
762 if (irb->hiz_region != intel_image->mt->hiz_region) {
763 intel_region_reference(&irb->hiz_region, intel_image->mt->hiz_region);
764 }
765
766 return true;
767 }
768
769
770 /**
771 * When glFramebufferTexture[123]D is called this function sets up the
772 * gl_renderbuffer wrapper around the texture image.
773 * This will have the region info needed for hardware rendering.
774 */
775 static struct intel_renderbuffer *
776 intel_wrap_texture(struct gl_context * ctx, struct gl_texture_image *texImage)
777 {
778 const GLuint name = ~0; /* not significant, but distinct for debugging */
779 struct intel_renderbuffer *irb;
780
781 /* make an intel_renderbuffer to wrap the texture image */
782 irb = CALLOC_STRUCT(intel_renderbuffer);
783 if (!irb) {
784 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
785 return NULL;
786 }
787
788 _mesa_init_renderbuffer(&irb->Base, name);
789 irb->Base.ClassID = INTEL_RB_CLASS;
790
791 if (!intel_update_wrapper(ctx, irb, texImage)) {
792 free(irb);
793 return NULL;
794 }
795
796 return irb;
797 }
798
799 void
800 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb,
801 struct intel_texture_image *intel_image,
802 int zoffset)
803 {
804 unsigned int dst_x, dst_y;
805
806 /* compute offset of the particular 2D image within the texture region */
807 intel_miptree_get_image_offset(intel_image->mt,
808 intel_image->base.Base.Level,
809 intel_image->base.Base.Face,
810 zoffset,
811 &dst_x, &dst_y);
812
813 irb->draw_x = dst_x;
814 irb->draw_y = dst_y;
815 }
816
817 /**
818 * Rendering to tiled buffers requires that the base address of the
819 * buffer be aligned to a page boundary. We generally render to
820 * textures by pointing the surface at the mipmap image level, which
821 * may not be aligned to a tile boundary.
822 *
823 * This function returns an appropriately-aligned base offset
824 * according to the tiling restrictions, plus any required x/y offset
825 * from there.
826 */
827 uint32_t
828 intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
829 uint32_t *tile_x,
830 uint32_t *tile_y)
831 {
832 int cpp = irb->region->cpp;
833 uint32_t pitch = irb->region->pitch * cpp;
834
835 if (irb->region->tiling == I915_TILING_NONE) {
836 *tile_x = 0;
837 *tile_y = 0;
838 return irb->draw_x * cpp + irb->draw_y * pitch;
839 } else if (irb->region->tiling == I915_TILING_X) {
840 *tile_x = irb->draw_x % (512 / cpp);
841 *tile_y = irb->draw_y % 8;
842 return ((irb->draw_y / 8) * (8 * pitch) +
843 (irb->draw_x - *tile_x) / (512 / cpp) * 4096);
844 } else {
845 assert(irb->region->tiling == I915_TILING_Y);
846 *tile_x = irb->draw_x % (128 / cpp);
847 *tile_y = irb->draw_y % 32;
848 return ((irb->draw_y / 32) * (32 * pitch) +
849 (irb->draw_x - *tile_x) / (128 / cpp) * 4096);
850 }
851 }
852
853 #ifndef I915
854 static bool
855 need_tile_offset_workaround(struct brw_context *brw,
856 struct intel_renderbuffer *irb)
857 {
858 uint32_t tile_x, tile_y;
859
860 if (brw->has_surface_tile_offset)
861 return false;
862
863 intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y);
864
865 return tile_x != 0 || tile_y != 0;
866 }
867 #endif
868
869 /**
870 * Called by glFramebufferTexture[123]DEXT() (and other places) to
871 * prepare for rendering into texture memory. This might be called
872 * many times to choose different texture levels, cube faces, etc
873 * before intel_finish_render_texture() is ever called.
874 */
875 static void
876 intel_render_texture(struct gl_context * ctx,
877 struct gl_framebuffer *fb,
878 struct gl_renderbuffer_attachment *att)
879 {
880 struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
881 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
882 struct intel_texture_image *intel_image = intel_texture_image(image);
883
884 (void) fb;
885
886 if (!intel_image->mt) {
887 /* Fallback on drawing to a texture that doesn't have a miptree
888 * (has a border, width/height 0, etc.)
889 */
890 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
891 _swrast_render_texture(ctx, fb, att);
892 return;
893 }
894 else if (!irb) {
895 irb = intel_wrap_texture(ctx, image);
896 if (irb) {
897 /* bind the wrapper to the attachment point */
898 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
899 }
900 else {
901 /* fallback to software rendering */
902 _swrast_render_texture(ctx, fb, att);
903 return;
904 }
905 }
906
907 if (!intel_update_wrapper(ctx, irb, image)) {
908 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
909 _swrast_render_texture(ctx, fb, att);
910 return;
911 }
912
913 DBG("Begin render texture tid %lx tex=%u w=%d h=%d refcount=%d\n",
914 _glthread_GetID(),
915 att->Texture->Name, image->Width, image->Height,
916 irb->Base.RefCount);
917
918 intel_renderbuffer_set_draw_offset(irb, intel_image, att->Zoffset);
919 intel_image->used_as_render_target = true;
920
921 #ifndef I915
922 if (need_tile_offset_workaround(brw_context(ctx), irb)) {
923 /* Original gen4 hardware couldn't draw to a non-tile-aligned
924 * destination in a miptree unless you actually setup your
925 * renderbuffer as a miptree and used the fragile
926 * lod/array_index/etc. controls to select the image. So,
927 * instead, we just make a new single-level miptree and render
928 * into that.
929 */
930 struct intel_context *intel = intel_context(ctx);
931 struct intel_mipmap_tree *new_mt;
932 int width, height, depth;
933
934 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
935
936 new_mt = intel_miptree_create(intel, image->TexObject->Target,
937 intel_image->base.Base.TexFormat,
938 intel_image->base.Base.Level,
939 intel_image->base.Base.Level,
940 width, height, depth,
941 true);
942
943 intel_miptree_copy_teximage(intel, intel_image, new_mt);
944 intel_renderbuffer_set_draw_offset(irb, intel_image, att->Zoffset);
945
946 intel_region_reference(&irb->region, intel_image->mt->region);
947 intel_miptree_release(&new_mt);
948 }
949 #endif
950 /* update drawing region, etc */
951 intel_draw_buffer(ctx);
952 }
953
954
955 /**
956 * Called by Mesa when rendering to a texture is done.
957 */
958 static void
959 intel_finish_render_texture(struct gl_context * ctx,
960 struct gl_renderbuffer_attachment *att)
961 {
962 struct intel_context *intel = intel_context(ctx);
963 struct gl_texture_object *tex_obj = att->Texture;
964 struct gl_texture_image *image =
965 tex_obj->Image[att->CubeMapFace][att->TextureLevel];
966 struct intel_texture_image *intel_image = intel_texture_image(image);
967
968 DBG("Finish render texture tid %lx tex=%u\n",
969 _glthread_GetID(), att->Texture->Name);
970
971 /* Flag that this image may now be validated into the object's miptree. */
972 if (intel_image)
973 intel_image->used_as_render_target = false;
974
975 /* Since we've (probably) rendered to the texture and will (likely) use
976 * it in the texture domain later on in this batchbuffer, flush the
977 * batch. Once again, we wish for a domain tracker in libdrm to cover
978 * usage inside of a batchbuffer like GEM does in the kernel.
979 */
980 intel_batchbuffer_emit_mi_flush(intel);
981 }
982
983 /**
984 * Do additional "completeness" testing of a framebuffer object.
985 */
986 static void
987 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
988 {
989 struct intel_context *intel = intel_context(ctx);
990 const struct intel_renderbuffer *depthRb =
991 intel_get_renderbuffer(fb, BUFFER_DEPTH);
992 const struct intel_renderbuffer *stencilRb =
993 intel_get_renderbuffer(fb, BUFFER_STENCIL);
994 int i;
995
996 /*
997 * The depth and stencil renderbuffers are the same renderbuffer or wrap
998 * the same texture.
999 */
1000 if (depthRb && stencilRb) {
1001 bool depth_stencil_are_same;
1002 if (depthRb == stencilRb)
1003 depth_stencil_are_same = true;
1004 else if ((fb->Attachment[BUFFER_DEPTH].Type == GL_TEXTURE) &&
1005 (fb->Attachment[BUFFER_STENCIL].Type == GL_TEXTURE) &&
1006 (fb->Attachment[BUFFER_DEPTH].Texture->Name ==
1007 fb->Attachment[BUFFER_STENCIL].Texture->Name))
1008 depth_stencil_are_same = true;
1009 else
1010 depth_stencil_are_same = false;
1011
1012 if (!intel->has_separate_stencil && !depth_stencil_are_same) {
1013 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1014 }
1015 }
1016
1017 for (i = 0; i < Elements(fb->Attachment); i++) {
1018 struct gl_renderbuffer *rb;
1019 struct intel_renderbuffer *irb;
1020
1021 if (fb->Attachment[i].Type == GL_NONE)
1022 continue;
1023
1024 /* A supported attachment will have a Renderbuffer set either
1025 * from being a Renderbuffer or being a texture that got the
1026 * intel_wrap_texture() treatment.
1027 */
1028 rb = fb->Attachment[i].Renderbuffer;
1029 if (rb == NULL) {
1030 DBG("attachment without renderbuffer\n");
1031 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1032 continue;
1033 }
1034
1035 irb = intel_renderbuffer(rb);
1036 if (irb == NULL) {
1037 DBG("software rendering renderbuffer\n");
1038 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1039 continue;
1040 }
1041
1042 if (!intel_span_supports_format(irb->Base.Format) ||
1043 !intel->vtbl.render_target_supported(irb->Base.Format)) {
1044 DBG("Unsupported texture/renderbuffer format attached: %s\n",
1045 _mesa_get_format_name(irb->Base.Format));
1046 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1047 }
1048 }
1049 }
1050
1051 /**
1052 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
1053 * We can do this when the dst renderbuffer is actually a texture and
1054 * there is no scaling, mirroring or scissoring.
1055 *
1056 * \return new buffer mask indicating the buffers left to blit using the
1057 * normal path.
1058 */
1059 static GLbitfield
1060 intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
1061 GLint srcX0, GLint srcY0,
1062 GLint srcX1, GLint srcY1,
1063 GLint dstX0, GLint dstY0,
1064 GLint dstX1, GLint dstY1,
1065 GLbitfield mask, GLenum filter)
1066 {
1067 if (mask & GL_COLOR_BUFFER_BIT) {
1068 const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
1069 const struct gl_framebuffer *readFb = ctx->ReadBuffer;
1070 const struct gl_renderbuffer_attachment *drawAtt =
1071 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
1072
1073 /* If the source and destination are the same size with no
1074 mirroring, the rectangles are within the size of the
1075 texture and there is no scissor then we can use
1076 glCopyTexSubimage2D to implement the blit. This will end
1077 up as a fast hardware blit on some drivers */
1078 if (drawAtt && drawAtt->Texture &&
1079 srcX0 - srcX1 == dstX0 - dstX1 &&
1080 srcY0 - srcY1 == dstY0 - dstY1 &&
1081 srcX1 >= srcX0 &&
1082 srcY1 >= srcY0 &&
1083 srcX0 >= 0 && srcX1 <= readFb->Width &&
1084 srcY0 >= 0 && srcY1 <= readFb->Height &&
1085 dstX0 >= 0 && dstX1 <= drawFb->Width &&
1086 dstY0 >= 0 && dstY1 <= drawFb->Height &&
1087 !ctx->Scissor.Enabled) {
1088 const struct gl_texture_object *texObj = drawAtt->Texture;
1089 const GLuint dstLevel = drawAtt->TextureLevel;
1090 const GLenum target = texObj->Target;
1091
1092 struct gl_texture_image *texImage =
1093 _mesa_select_tex_image(ctx, texObj, target, dstLevel);
1094
1095 if (intel_copy_texsubimage(intel_context(ctx),
1096 intel_texture_image(texImage),
1097 dstX0, dstY0,
1098 srcX0, srcY0,
1099 srcX1 - srcX0, /* width */
1100 srcY1 - srcY0))
1101 mask &= ~GL_COLOR_BUFFER_BIT;
1102 }
1103 }
1104
1105 return mask;
1106 }
1107
1108 static void
1109 intel_blit_framebuffer(struct gl_context *ctx,
1110 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
1111 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
1112 GLbitfield mask, GLenum filter)
1113 {
1114 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
1115 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
1116 srcX0, srcY0, srcX1, srcY1,
1117 dstX0, dstY0, dstX1, dstY1,
1118 mask, filter);
1119 if (mask == 0x0)
1120 return;
1121
1122 _mesa_meta_BlitFramebuffer(ctx,
1123 srcX0, srcY0, srcX1, srcY1,
1124 dstX0, dstY0, dstX1, dstY1,
1125 mask, filter);
1126 }
1127
1128 /**
1129 * Do one-time context initializations related to GL_EXT_framebuffer_object.
1130 * Hook in device driver functions.
1131 */
1132 void
1133 intel_fbo_init(struct intel_context *intel)
1134 {
1135 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
1136 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
1137 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
1138 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
1139 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
1140 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
1141 intel->ctx.Driver.RenderTexture = intel_render_texture;
1142 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
1143 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
1144 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
1145 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
1146
1147 #if FEATURE_OES_EGL_image
1148 intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
1149 intel_image_target_renderbuffer_storage;
1150 #endif
1151 }