intel: Resolve buffers in intel_map_texture_image()
[mesa.git] / src / mesa / drivers / dri / intel / intel_fbo.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/enums.h"
30 #include "main/imports.h"
31 #include "main/macros.h"
32 #include "main/mfeatures.h"
33 #include "main/mtypes.h"
34 #include "main/fbobject.h"
35 #include "main/framebuffer.h"
36 #include "main/renderbuffer.h"
37 #include "main/context.h"
38 #include "main/teximage.h"
39 #include "main/image.h"
40
41 #include "swrast/swrast.h"
42 #include "drivers/common/meta.h"
43
44 #include "intel_context.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_buffers.h"
47 #include "intel_blit.h"
48 #include "intel_fbo.h"
49 #include "intel_mipmap_tree.h"
50 #include "intel_regions.h"
51 #include "intel_tex.h"
52 #include "intel_span.h"
53 #ifndef I915
54 #include "brw_context.h"
55 #endif
56
57 #define FILE_DEBUG_FLAG DEBUG_FBO
58
59
60 bool
61 intel_framebuffer_has_hiz(struct gl_framebuffer *fb)
62 {
63 struct intel_renderbuffer *rb = NULL;
64 if (fb)
65 rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
66 return rb && rb->mt && rb->mt->hiz_mt;
67 }
68
69 struct intel_region*
70 intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
71 {
72 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
73 if (irb && irb->mt)
74 return irb->mt->region;
75 else
76 return NULL;
77 }
78
79 /**
80 * Create a new framebuffer object.
81 */
82 static struct gl_framebuffer *
83 intel_new_framebuffer(struct gl_context * ctx, GLuint name)
84 {
85 /* Only drawable state in intel_framebuffer at this time, just use Mesa's
86 * class
87 */
88 return _mesa_new_framebuffer(ctx, name);
89 }
90
91
92 /** Called by gl_renderbuffer::Delete() */
93 static void
94 intel_delete_renderbuffer(struct gl_renderbuffer *rb)
95 {
96 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
97
98 ASSERT(irb);
99
100 intel_miptree_release(&irb->mt);
101
102 _mesa_reference_renderbuffer(&irb->wrapped_depth, NULL);
103 _mesa_reference_renderbuffer(&irb->wrapped_stencil, NULL);
104
105 free(irb);
106 }
107
108 /**
109 * \brief Map a renderbuffer through the GTT.
110 *
111 * \see intel_map_renderbuffer()
112 */
113 static void
114 intel_map_renderbuffer_gtt(struct gl_context *ctx,
115 struct gl_renderbuffer *rb,
116 GLuint x, GLuint y, GLuint w, GLuint h,
117 GLbitfield mode,
118 GLubyte **out_map,
119 GLint *out_stride)
120 {
121 struct intel_context *intel = intel_context(ctx);
122 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
123 GLubyte *map;
124 int stride, flip_stride;
125
126 assert(irb->mt);
127
128 irb->map_mode = mode;
129 irb->map_x = x;
130 irb->map_y = y;
131 irb->map_w = w;
132 irb->map_h = h;
133
134 stride = irb->mt->region->pitch * irb->mt->region->cpp;
135
136 if (rb->Name == 0) {
137 y = irb->mt->region->height - 1 - y;
138 flip_stride = -stride;
139 } else {
140 x += irb->draw_x;
141 y += irb->draw_y;
142 flip_stride = stride;
143 }
144
145 if (drm_intel_bo_references(intel->batch.bo, irb->mt->region->bo)) {
146 intel_batchbuffer_flush(intel);
147 }
148
149 drm_intel_gem_bo_map_gtt(irb->mt->region->bo);
150
151 map = irb->mt->region->bo->virtual;
152 map += x * irb->mt->region->cpp;
153 map += (int)y * stride;
154
155 *out_map = map;
156 *out_stride = flip_stride;
157
158 DBG("%s: rb %d (%s) gtt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
159 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
160 x, y, w, h, *out_map, *out_stride);
161 }
162
163 /**
164 * \brief Map a renderbuffer by blitting it to a temporary gem buffer.
165 *
166 * On gen6+, we have LLC sharing, which means we can get high-performance
167 * access to linear-mapped buffers.
168 *
169 * This function allocates a temporary gem buffer at
170 * intel_renderbuffer::map_bo, then blits the renderbuffer into it, and
171 * returns a map of that. (Note: Only X tiled buffers can be blitted).
172 *
173 * \see intel_renderbuffer::map_bo
174 * \see intel_map_renderbuffer()
175 */
176 static void
177 intel_map_renderbuffer_blit(struct gl_context *ctx,
178 struct gl_renderbuffer *rb,
179 GLuint x, GLuint y, GLuint w, GLuint h,
180 GLbitfield mode,
181 GLubyte **out_map,
182 GLint *out_stride)
183 {
184 struct intel_context *intel = intel_context(ctx);
185 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
186
187 int src_x, src_y;
188 int dst_stride;
189
190 assert(irb->mt->region);
191 assert(intel->gen >= 6);
192 assert(!(mode & GL_MAP_WRITE_BIT));
193 assert(irb->mt->region->tiling == I915_TILING_X);
194
195 irb->map_mode = mode;
196 irb->map_x = x;
197 irb->map_y = y;
198 irb->map_w = w;
199 irb->map_h = h;
200
201 dst_stride = ALIGN(w * irb->mt->region->cpp, 4);
202
203 if (rb->Name) {
204 src_x = x + irb->draw_x;
205 src_y = y + irb->draw_y;
206 } else {
207 src_x = x;
208 src_y = irb->mt->region->height - y - h;
209 }
210
211 irb->map_bo = drm_intel_bo_alloc(intel->bufmgr, "MapRenderbuffer() temp",
212 dst_stride * h, 4096);
213
214 /* We don't do the flip in the blit, because it's always so tricky to get
215 * right.
216 */
217 if (irb->map_bo &&
218 intelEmitCopyBlit(intel,
219 irb->mt->region->cpp,
220 irb->mt->region->pitch, irb->mt->region->bo,
221 0, irb->mt->region->tiling,
222 dst_stride / irb->mt->region->cpp, irb->map_bo,
223 0, I915_TILING_NONE,
224 src_x, src_y,
225 0, 0,
226 w, h,
227 GL_COPY)) {
228 intel_batchbuffer_flush(intel);
229 drm_intel_bo_map(irb->map_bo, false);
230
231 if (rb->Name) {
232 *out_map = irb->map_bo->virtual;
233 *out_stride = dst_stride;
234 } else {
235 *out_map = irb->map_bo->virtual + (h - 1) * dst_stride;
236 *out_stride = -dst_stride;
237 }
238
239 DBG("%s: rb %d (%s) blit mapped: (%d, %d) (%dx%d) -> %p/%d\n",
240 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
241 src_x, src_y, w, h, *out_map, *out_stride);
242 } else {
243 /* Fallback to GTT mapping. */
244 drm_intel_bo_unreference(irb->map_bo);
245 irb->map_bo = NULL;
246 intel_map_renderbuffer_gtt(ctx, rb,
247 x, y, w, h,
248 mode,
249 out_map, out_stride);
250 }
251 }
252
253 /**
254 * \brief Map a stencil renderbuffer.
255 *
256 * Stencil buffers are W-tiled. Since the GTT has no W fence, we must detile
257 * the buffer in software.
258 *
259 * This function allocates a temporary malloc'd buffer at
260 * intel_renderbuffer::map_buffer, detiles the stencil buffer into it, then
261 * returns the temporary buffer as the map.
262 *
263 * \see intel_renderbuffer::map_buffer
264 * \see intel_map_renderbuffer()
265 * \see intel_unmap_renderbuffer_s8()
266 */
267 static void
268 intel_map_renderbuffer_s8(struct gl_context *ctx,
269 struct gl_renderbuffer *rb,
270 GLuint x, GLuint y, GLuint w, GLuint h,
271 GLbitfield mode,
272 GLubyte **out_map,
273 GLint *out_stride)
274 {
275 struct intel_context *intel = intel_context(ctx);
276 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
277 uint8_t *tiled_s8_map;
278 uint8_t *untiled_s8_map;
279
280 assert(rb->Format == MESA_FORMAT_S8);
281 assert(irb->mt);
282
283 irb->map_mode = mode;
284 irb->map_x = x;
285 irb->map_y = y;
286 irb->map_w = w;
287 irb->map_h = h;
288
289 /* Flip the Y axis for the default framebuffer. */
290 int y_flip = (rb->Name == 0) ? -1 : 1;
291 int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
292
293 irb->map_buffer = malloc(w * h);
294 untiled_s8_map = irb->map_buffer;
295 tiled_s8_map = intel_region_map(intel, irb->mt->region, mode);
296
297 for (uint32_t pix_y = 0; pix_y < h; pix_y++) {
298 for (uint32_t pix_x = 0; pix_x < w; pix_x++) {
299 uint32_t flipped_y = y_flip * (int32_t)(y + pix_y) + y_bias;
300 ptrdiff_t offset = intel_offset_S8(irb->mt->region->pitch,
301 x + pix_x,
302 flipped_y);
303 untiled_s8_map[pix_y * w + pix_x] = tiled_s8_map[offset];
304 }
305 }
306
307 *out_map = untiled_s8_map;
308 *out_stride = w;
309
310 DBG("%s: rb %d (%s) s8 detiled mapped: (%d, %d) (%dx%d) -> %p/%d\n",
311 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
312 x, y, w, h, *out_map, *out_stride);
313 }
314
315 /**
316 * \brief Map a depthstencil buffer with separate stencil.
317 *
318 * A depthstencil renderbuffer, if using separate stencil, consists of a depth
319 * renderbuffer and a hidden stencil renderbuffer. This function maps the
320 * depth buffer, whose format is MESA_FORMAT_X8_Z24, through the GTT and
321 * returns that as the mapped pointer. The caller need not be aware of the
322 * hidden stencil buffer and may safely assume that the mapped pointer points
323 * to a MESA_FORMAT_S8_Z24 buffer
324 *
325 * The consistency between the depth buffer's S8 bits and the hidden stencil
326 * buffer is managed within intel_map_renderbuffer() and
327 * intel_unmap_renderbuffer() by scattering or gathering the stencil bits
328 * according to the map mode.
329 *
330 * \see intel_map_renderbuffer()
331 * \see intel_unmap_renderbuffer_separate_s8z24()
332 */
333 static void
334 intel_map_renderbuffer_separate_s8z24(struct gl_context *ctx,
335 struct gl_renderbuffer *rb,
336 GLuint x, GLuint y, GLuint w, GLuint h,
337 GLbitfield mode,
338 GLubyte **out_map,
339 GLint *out_stride)
340 {
341 struct intel_context *intel = intel_context(ctx);
342 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
343
344 uint8_t *s8z24_map;
345 int32_t s8z24_stride;
346
347 struct intel_renderbuffer *s8_irb;
348 uint8_t *s8_map;
349
350 assert(rb->Name != 0);
351 assert(rb->Format == MESA_FORMAT_S8_Z24);
352 assert(irb->wrapped_depth != NULL);
353 assert(irb->wrapped_stencil != NULL);
354
355 irb->map_mode = mode;
356 irb->map_x = x;
357 irb->map_y = y;
358 irb->map_w = w;
359 irb->map_h = h;
360
361 /* Map with write mode for the gather below. */
362 intel_map_renderbuffer_gtt(ctx, irb->wrapped_depth,
363 x, y, w, h, mode | GL_MAP_WRITE_BIT,
364 &s8z24_map, &s8z24_stride);
365
366 s8_irb = intel_renderbuffer(irb->wrapped_stencil);
367 s8_map = intel_region_map(intel, s8_irb->mt->region, GL_MAP_READ_BIT);
368
369 /* Gather the stencil buffer into the depth buffer. */
370 for (uint32_t pix_y = 0; pix_y < h; ++pix_y) {
371 for (uint32_t pix_x = 0; pix_x < w; ++pix_x) {
372 ptrdiff_t s8_offset = intel_offset_S8(s8_irb->mt->region->pitch,
373 x + pix_x,
374 y + pix_y);
375 ptrdiff_t s8z24_offset = pix_y * s8z24_stride
376 + pix_x * 4
377 + 3;
378 s8z24_map[s8z24_offset] = s8_map[s8_offset];
379 }
380 }
381
382 intel_region_unmap(intel, s8_irb->mt->region);
383
384 *out_map = s8z24_map;
385 *out_stride = s8z24_stride;
386 }
387
388 /**
389 * \see dd_function_table::MapRenderbuffer
390 */
391 static void
392 intel_map_renderbuffer(struct gl_context *ctx,
393 struct gl_renderbuffer *rb,
394 GLuint x, GLuint y, GLuint w, GLuint h,
395 GLbitfield mode,
396 GLubyte **out_map,
397 GLint *out_stride)
398 {
399 struct intel_context *intel = intel_context(ctx);
400 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
401
402 /* We sometimes get called with this by our intel_span.c usage. */
403 if (!irb->mt && !irb->wrapped_depth) {
404 *out_map = NULL;
405 *out_stride = 0;
406 return;
407 }
408
409 if (rb->Format == MESA_FORMAT_S8) {
410 intel_map_renderbuffer_s8(ctx, rb, x, y, w, h, mode,
411 out_map, out_stride);
412 } else if (irb->wrapped_depth) {
413 intel_map_renderbuffer_separate_s8z24(ctx, rb, x, y, w, h, mode,
414 out_map, out_stride);
415 } else if (intel->gen >= 6 &&
416 !(mode & GL_MAP_WRITE_BIT) &&
417 irb->mt->region->tiling == I915_TILING_X) {
418 intel_map_renderbuffer_blit(ctx, rb, x, y, w, h, mode,
419 out_map, out_stride);
420 } else {
421 intel_map_renderbuffer_gtt(ctx, rb, x, y, w, h, mode,
422 out_map, out_stride);
423 }
424 }
425
426 /**
427 * \see intel_map_renderbuffer_s8()
428 */
429 static void
430 intel_unmap_renderbuffer_s8(struct gl_context *ctx,
431 struct gl_renderbuffer *rb)
432 {
433 struct intel_context *intel = intel_context(ctx);
434 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
435
436 DBG("%s: rb %d (%s)\n", __FUNCTION__,
437 rb->Name, _mesa_get_format_name(rb->Format));
438
439 assert(rb->Format == MESA_FORMAT_S8);
440
441 if (!irb->map_buffer)
442 return;
443
444 if (irb->map_mode & GL_MAP_WRITE_BIT) {
445 /* The temporary buffer was written to, so we must copy its pixels into
446 * the real buffer.
447 */
448 uint8_t *untiled_s8_map = irb->map_buffer;
449 uint8_t *tiled_s8_map = irb->mt->region->bo->virtual;
450
451 /* Flip the Y axis for the default framebuffer. */
452 int y_flip = (rb->Name == 0) ? -1 : 1;
453 int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
454
455 for (uint32_t pix_y = 0; pix_y < irb->map_h; pix_y++) {
456 for (uint32_t pix_x = 0; pix_x < irb->map_w; pix_x++) {
457 uint32_t flipped_y = y_flip * (int32_t)(pix_y + irb->map_y) + y_bias;
458 ptrdiff_t offset = intel_offset_S8(irb->mt->region->pitch,
459 pix_x + irb->map_x,
460 flipped_y);
461 tiled_s8_map[offset] =
462 untiled_s8_map[pix_y * irb->map_w + pix_x];
463 }
464 }
465 }
466
467 intel_region_unmap(intel, irb->mt->region);
468 free(irb->map_buffer);
469 irb->map_buffer = NULL;
470 }
471
472 /**
473 * \brief Unmap a depthstencil renderbuffer with separate stencil.
474 *
475 * \see intel_map_renderbuffer_separate_s8z24()
476 * \see intel_unmap_renderbuffer()
477 */
478 static void
479 intel_unmap_renderbuffer_separate_s8z24(struct gl_context *ctx,
480 struct gl_renderbuffer *rb)
481 {
482 struct intel_context *intel = intel_context(ctx);
483 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
484 struct intel_renderbuffer *s8z24_irb;
485
486 assert(rb->Name != 0);
487 assert(rb->Format == MESA_FORMAT_S8_Z24);
488 assert(irb->wrapped_depth != NULL);
489 assert(irb->wrapped_stencil != NULL);
490
491 s8z24_irb = intel_renderbuffer(irb->wrapped_depth);
492
493 if (irb->map_mode & GL_MAP_WRITE_BIT) {
494 /* Copy the stencil bits from the depth buffer into the stencil buffer.
495 */
496 uint32_t map_x = irb->map_x;
497 uint32_t map_y = irb->map_y;
498 uint32_t map_w = irb->map_w;
499 uint32_t map_h = irb->map_h;
500
501 struct intel_renderbuffer *s8_irb;
502 uint8_t *s8_map;
503
504 s8_irb = intel_renderbuffer(irb->wrapped_stencil);
505 s8_map = intel_region_map(intel, s8_irb->mt->region, GL_MAP_WRITE_BIT);
506
507 int32_t s8z24_stride = 4 * s8z24_irb->mt->region->pitch;
508 uint8_t *s8z24_map = s8z24_irb->mt->region->bo->virtual
509 + map_y * s8z24_stride
510 + map_x * 4;
511
512 for (uint32_t pix_y = 0; pix_y < map_h; ++pix_y) {
513 for (uint32_t pix_x = 0; pix_x < map_w; ++pix_x) {
514 ptrdiff_t s8_offset = intel_offset_S8(s8_irb->mt->region->pitch,
515 map_x + pix_x,
516 map_y + pix_y);
517 ptrdiff_t s8z24_offset = pix_y * s8z24_stride
518 + pix_x * 4
519 + 3;
520 s8_map[s8_offset] = s8z24_map[s8z24_offset];
521 }
522 }
523
524 intel_region_unmap(intel, s8_irb->mt->region);
525 }
526
527 drm_intel_gem_bo_unmap_gtt(s8z24_irb->mt->region->bo);
528 }
529
530 /**
531 * \see dd_function_table::UnmapRenderbuffer
532 */
533 static void
534 intel_unmap_renderbuffer(struct gl_context *ctx,
535 struct gl_renderbuffer *rb)
536 {
537 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
538
539 DBG("%s: rb %d (%s)\n", __FUNCTION__,
540 rb->Name, _mesa_get_format_name(rb->Format));
541
542 if (rb->Format == MESA_FORMAT_S8) {
543 intel_unmap_renderbuffer_s8(ctx, rb);
544 } else if (irb->wrapped_depth) {
545 intel_unmap_renderbuffer_separate_s8z24(ctx, rb);
546 } else if (irb->map_bo) {
547 /* Paired with intel_map_renderbuffer_blit(). */
548 drm_intel_bo_unmap(irb->map_bo);
549 drm_intel_bo_unreference(irb->map_bo);
550 irb->map_bo = 0;
551 } else {
552 /* Paired with intel_map_renderbuffer_gtt(). */
553 if (irb->mt) {
554 /* The miptree may be null when intel_map_renderbuffer() is
555 * called from intel_span.c.
556 */
557 drm_intel_gem_bo_unmap_gtt(irb->mt->region->bo);
558 }
559 }
560 }
561
562 /**
563 * Return a pointer to a specific pixel in a renderbuffer.
564 */
565 static void *
566 intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb,
567 GLint x, GLint y)
568 {
569 /* By returning NULL we force all software rendering to go through
570 * the span routines.
571 */
572 return NULL;
573 }
574
575
576 /**
577 * Called via glRenderbufferStorageEXT() to set the format and allocate
578 * storage for a user-created renderbuffer.
579 */
580 GLboolean
581 intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
582 GLenum internalFormat,
583 GLuint width, GLuint height)
584 {
585 struct intel_context *intel = intel_context(ctx);
586 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
587 int cpp, tiling;
588
589 ASSERT(rb->Name != 0);
590
591 switch (internalFormat) {
592 default:
593 /* Use the same format-choice logic as for textures.
594 * Renderbuffers aren't any different from textures for us,
595 * except they're less useful because you can't texture with
596 * them.
597 */
598 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
599 GL_NONE, GL_NONE);
600 break;
601 case GL_STENCIL_INDEX:
602 case GL_STENCIL_INDEX1_EXT:
603 case GL_STENCIL_INDEX4_EXT:
604 case GL_STENCIL_INDEX8_EXT:
605 case GL_STENCIL_INDEX16_EXT:
606 /* These aren't actual texture formats, so force them here. */
607 if (intel->has_separate_stencil) {
608 rb->Format = MESA_FORMAT_S8;
609 } else {
610 assert(!intel->must_use_separate_stencil);
611 rb->Format = MESA_FORMAT_S8_Z24;
612 }
613 break;
614 }
615
616 rb->Width = width;
617 rb->Height = height;
618 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
619 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
620 cpp = _mesa_get_format_bytes(rb->Format);
621
622 intel_flush(ctx);
623
624 intel_miptree_release(&irb->mt);
625
626 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
627 _mesa_lookup_enum_by_nr(internalFormat),
628 _mesa_get_format_name(rb->Format), width, height);
629
630 tiling = I915_TILING_NONE;
631 if (intel->use_texture_tiling) {
632 GLenum base_format = _mesa_get_format_base_format(rb->Format);
633
634 if (intel->gen >= 4 && (base_format == GL_DEPTH_COMPONENT ||
635 base_format == GL_STENCIL_INDEX ||
636 base_format == GL_DEPTH_STENCIL))
637 tiling = I915_TILING_Y;
638 else
639 tiling = I915_TILING_X;
640 }
641
642 if (irb->Base.Format == MESA_FORMAT_S8) {
643 /*
644 * The stencil buffer is W tiled. However, we request from the kernel a
645 * non-tiled buffer because the GTT is incapable of W fencing.
646 *
647 * The stencil buffer has quirky pitch requirements. From Vol 2a,
648 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
649 * The pitch must be set to 2x the value computed based on width, as
650 * the stencil buffer is stored with two rows interleaved.
651 * To accomplish this, we resort to the nasty hack of doubling the drm
652 * region's cpp and halving its height.
653 *
654 * If we neglect to double the pitch, then render corruption occurs.
655 */
656 irb->mt = intel_miptree_create_for_renderbuffer(
657 intel,
658 rb->Format,
659 I915_TILING_NONE,
660 cpp * 2,
661 ALIGN(width, 64),
662 ALIGN((height + 1) / 2, 64));
663 if (!irb->mt)
664 return false;
665
666 } else if (irb->Base.Format == MESA_FORMAT_S8_Z24
667 && intel->must_use_separate_stencil) {
668
669 bool ok = true;
670 struct gl_renderbuffer *depth_rb;
671 struct gl_renderbuffer *stencil_rb;
672
673 depth_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
674 MESA_FORMAT_X8_Z24);
675 stencil_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
676 MESA_FORMAT_S8);
677 ok = depth_rb && stencil_rb;
678 ok = ok && intel_alloc_renderbuffer_storage(ctx, depth_rb,
679 depth_rb->InternalFormat,
680 width, height);
681 ok = ok && intel_alloc_renderbuffer_storage(ctx, stencil_rb,
682 stencil_rb->InternalFormat,
683 width, height);
684
685 if (!ok) {
686 if (depth_rb) {
687 intel_delete_renderbuffer(depth_rb);
688 }
689 if (stencil_rb) {
690 intel_delete_renderbuffer(stencil_rb);
691 }
692 return false;
693 }
694
695 depth_rb->Wrapped = rb;
696 stencil_rb->Wrapped = rb;
697 _mesa_reference_renderbuffer(&irb->wrapped_depth, depth_rb);
698 _mesa_reference_renderbuffer(&irb->wrapped_stencil, stencil_rb);
699
700 } else {
701 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
702 tiling, cpp,
703 width, height);
704 if (!irb->mt)
705 return false;
706
707 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
708 bool ok = intel_miptree_alloc_hiz(intel, irb->mt);
709 if (!ok) {
710 intel_miptree_release(&irb->mt);
711 return false;
712 }
713 }
714 }
715
716 return true;
717 }
718
719
720 #if FEATURE_OES_EGL_image
721 static void
722 intel_image_target_renderbuffer_storage(struct gl_context *ctx,
723 struct gl_renderbuffer *rb,
724 void *image_handle)
725 {
726 struct intel_context *intel = intel_context(ctx);
727 struct intel_renderbuffer *irb;
728 __DRIscreen *screen;
729 __DRIimage *image;
730
731 screen = intel->intelScreen->driScrnPriv;
732 image = screen->dri2.image->lookupEGLImage(screen, image_handle,
733 screen->loaderPrivate);
734 if (image == NULL)
735 return;
736
737 /* __DRIimage is opaque to the core so it has to be checked here */
738 switch (image->format) {
739 case MESA_FORMAT_RGBA8888_REV:
740 _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
741 "glEGLImageTargetRenderbufferStorage(unsupported image format");
742 return;
743 break;
744 default:
745 break;
746 }
747
748 irb = intel_renderbuffer(rb);
749 intel_miptree_release(&irb->mt);
750 irb->mt = intel_miptree_create_for_region(intel,
751 GL_TEXTURE_2D,
752 image->format,
753 image->region);
754 if (!irb->mt)
755 return;
756
757 rb->InternalFormat = image->internal_format;
758 rb->Width = image->region->width;
759 rb->Height = image->region->height;
760 rb->Format = image->format;
761 rb->DataType = image->data_type;
762 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
763 image->internal_format);
764 }
765 #endif
766
767 /**
768 * Called for each hardware renderbuffer when a _window_ is resized.
769 * Just update fields.
770 * Not used for user-created renderbuffers!
771 */
772 static GLboolean
773 intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
774 GLenum internalFormat, GLuint width, GLuint height)
775 {
776 ASSERT(rb->Name == 0);
777 rb->Width = width;
778 rb->Height = height;
779 rb->InternalFormat = internalFormat;
780
781 return true;
782 }
783
784
785 static void
786 intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
787 GLuint width, GLuint height)
788 {
789 int i;
790
791 _mesa_resize_framebuffer(ctx, fb, width, height);
792
793 fb->Initialized = true; /* XXX remove someday */
794
795 if (fb->Name != 0) {
796 return;
797 }
798
799
800 /* Make sure all window system renderbuffers are up to date */
801 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
802 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
803
804 /* only resize if size is changing */
805 if (rb && (rb->Width != width || rb->Height != height)) {
806 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
807 }
808 }
809 }
810
811
812 /** Dummy function for gl_renderbuffer::AllocStorage() */
813 static GLboolean
814 intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
815 GLenum internalFormat, GLuint width, GLuint height)
816 {
817 _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
818 return false;
819 }
820
821 /**
822 * Create a new intel_renderbuffer which corresponds to an on-screen window,
823 * not a user-created renderbuffer.
824 */
825 struct intel_renderbuffer *
826 intel_create_renderbuffer(gl_format format)
827 {
828 GET_CURRENT_CONTEXT(ctx);
829
830 struct intel_renderbuffer *irb;
831
832 irb = CALLOC_STRUCT(intel_renderbuffer);
833 if (!irb) {
834 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
835 return NULL;
836 }
837
838 _mesa_init_renderbuffer(&irb->Base, 0);
839 irb->Base.ClassID = INTEL_RB_CLASS;
840 irb->Base._BaseFormat = _mesa_get_format_base_format(format);
841 irb->Base.Format = format;
842 irb->Base.InternalFormat = irb->Base._BaseFormat;
843 irb->Base.DataType = intel_mesa_format_to_rb_datatype(format);
844
845 /* intel-specific methods */
846 irb->Base.Delete = intel_delete_renderbuffer;
847 irb->Base.AllocStorage = intel_alloc_window_storage;
848 irb->Base.GetPointer = intel_get_pointer;
849
850 return irb;
851 }
852
853
854 struct gl_renderbuffer*
855 intel_create_wrapped_renderbuffer(struct gl_context * ctx,
856 int width, int height,
857 gl_format format)
858 {
859 /*
860 * The name here is irrelevant, as long as its nonzero, because the
861 * renderbuffer never gets entered into Mesa's renderbuffer hash table.
862 */
863 GLuint name = ~0;
864
865 struct intel_renderbuffer *irb = CALLOC_STRUCT(intel_renderbuffer);
866 if (!irb) {
867 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
868 return NULL;
869 }
870
871 struct gl_renderbuffer *rb = &irb->Base;
872 _mesa_init_renderbuffer(rb, name);
873 rb->ClassID = INTEL_RB_CLASS;
874 rb->_BaseFormat = _mesa_get_format_base_format(format);
875 rb->Format = format;
876 rb->InternalFormat = rb->_BaseFormat;
877 rb->DataType = intel_mesa_format_to_rb_datatype(format);
878 rb->Width = width;
879 rb->Height = height;
880
881 return rb;
882 }
883
884
885 /**
886 * Create a new renderbuffer object.
887 * Typically called via glBindRenderbufferEXT().
888 */
889 static struct gl_renderbuffer *
890 intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
891 {
892 /*struct intel_context *intel = intel_context(ctx); */
893 struct intel_renderbuffer *irb;
894
895 irb = CALLOC_STRUCT(intel_renderbuffer);
896 if (!irb) {
897 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
898 return NULL;
899 }
900
901 _mesa_init_renderbuffer(&irb->Base, name);
902 irb->Base.ClassID = INTEL_RB_CLASS;
903
904 /* intel-specific methods */
905 irb->Base.Delete = intel_delete_renderbuffer;
906 irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
907 irb->Base.GetPointer = intel_get_pointer;
908 /* span routines set in alloc_storage function */
909
910 return &irb->Base;
911 }
912
913
914 /**
915 * Called via glBindFramebufferEXT().
916 */
917 static void
918 intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
919 struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
920 {
921 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
922 intel_draw_buffer(ctx);
923 }
924 else {
925 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
926 }
927 }
928
929
930 /**
931 * Called via glFramebufferRenderbufferEXT().
932 */
933 static void
934 intel_framebuffer_renderbuffer(struct gl_context * ctx,
935 struct gl_framebuffer *fb,
936 GLenum attachment, struct gl_renderbuffer *rb)
937 {
938 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
939
940 intel_flush(ctx);
941
942 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
943 intel_draw_buffer(ctx);
944 }
945
946 static struct intel_renderbuffer*
947 intel_renderbuffer_wrap_miptree(struct intel_context *intel,
948 struct intel_mipmap_tree *mt,
949 uint32_t level,
950 uint32_t layer,
951 gl_format format,
952 GLenum internal_format);
953
954 /**
955 * \par Special case for separate stencil
956 *
957 * When wrapping a depthstencil texture that uses separate stencil, this
958 * function is recursively called twice: once to create \c
959 * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the
960 * call to create \c irb->wrapped_depth, the \c format and \c
961 * internal_format parameters do not match \c mt->format. In that case, \c
962 * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
963 * MESA_FORMAT_X8_Z24.
964 *
965 * @return true on success
966 */
967 static bool
968 intel_renderbuffer_update_wrapper(struct intel_context *intel,
969 struct intel_renderbuffer *irb,
970 struct intel_mipmap_tree *mt,
971 uint32_t level,
972 uint32_t layer,
973 gl_format format,
974 GLenum internal_format)
975 {
976 struct gl_renderbuffer *rb = &irb->Base;
977
978 rb->Format = format;
979 if (!intel_span_supports_format(rb->Format)) {
980 DBG("Render to texture BAD FORMAT %s\n",
981 _mesa_get_format_name(rb->Format));
982 return false;
983 } else {
984 DBG("Render to texture %s\n", _mesa_get_format_name(rb->Format));
985 }
986
987 rb->InternalFormat = internal_format;
988 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
989 rb->_BaseFormat = _mesa_get_format_base_format(rb->Format);
990 rb->Width = mt->level[level].width;
991 rb->Height = mt->level[level].height;
992
993 irb->Base.Delete = intel_delete_renderbuffer;
994 irb->Base.AllocStorage = intel_nop_alloc_storage;
995
996 intel_miptree_check_level_layer(mt, level, layer);
997 irb->mt_level = level;
998 irb->mt_layer = layer;
999
1000 if (mt->stencil_mt && _mesa_is_depthstencil_format(rb->InternalFormat)) {
1001 assert((irb->wrapped_depth == NULL) == (irb->wrapped_stencil == NULL));
1002
1003 struct intel_renderbuffer *depth_irb;
1004 struct intel_renderbuffer *stencil_irb;
1005
1006 if (!irb->wrapped_depth) {
1007 depth_irb = intel_renderbuffer_wrap_miptree(intel,
1008 mt, level, layer,
1009 MESA_FORMAT_X8_Z24,
1010 GL_DEPTH_COMPONENT24);
1011 stencil_irb = intel_renderbuffer_wrap_miptree(intel,
1012 mt->stencil_mt,
1013 level, layer,
1014 MESA_FORMAT_S8,
1015 GL_STENCIL_INDEX8);
1016 _mesa_reference_renderbuffer(&irb->wrapped_depth, &depth_irb->Base);
1017 _mesa_reference_renderbuffer(&irb->wrapped_stencil, &stencil_irb->Base);
1018
1019 if (!irb->wrapped_depth || !irb->wrapped_stencil)
1020 return false;
1021 } else {
1022 bool ok = true;
1023
1024 depth_irb = intel_renderbuffer(irb->wrapped_depth);
1025 stencil_irb = intel_renderbuffer(irb->wrapped_stencil);
1026
1027 ok &= intel_renderbuffer_update_wrapper(intel,
1028 depth_irb,
1029 mt,
1030 level, layer,
1031 MESA_FORMAT_X8_Z24,
1032 GL_DEPTH_COMPONENT24);
1033 ok &= intel_renderbuffer_update_wrapper(intel,
1034 stencil_irb,
1035 mt->stencil_mt,
1036 level, layer,
1037 MESA_FORMAT_S8,
1038 GL_STENCIL_INDEX8);
1039 if (!ok)
1040 return false;
1041 }
1042 } else {
1043 intel_miptree_reference(&irb->mt, mt);
1044 intel_renderbuffer_set_draw_offset(irb);
1045 }
1046
1047 return true;
1048 }
1049
1050 /**
1051 * \brief Wrap a renderbuffer around a single slice of a miptree.
1052 *
1053 * Called by glFramebufferTexture*(). This just allocates a
1054 * ``struct intel_renderbuffer`` then calls
1055 * intel_renderbuffer_update_wrapper() to do the real work.
1056 *
1057 * \see intel_renderbuffer_update_wrapper()
1058 */
1059 static struct intel_renderbuffer*
1060 intel_renderbuffer_wrap_miptree(struct intel_context *intel,
1061 struct intel_mipmap_tree *mt,
1062 uint32_t level,
1063 uint32_t layer,
1064 gl_format format,
1065 GLenum internal_format)
1066
1067 {
1068 const GLuint name = ~0; /* not significant, but distinct for debugging */
1069 struct gl_context *ctx = &intel->ctx;
1070 struct intel_renderbuffer *irb;
1071
1072 intel_miptree_check_level_layer(mt, level, layer);
1073
1074 irb = CALLOC_STRUCT(intel_renderbuffer);
1075 if (!irb) {
1076 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
1077 return NULL;
1078 }
1079
1080 _mesa_init_renderbuffer(&irb->Base, name);
1081 irb->Base.ClassID = INTEL_RB_CLASS;
1082
1083 if (!intel_renderbuffer_update_wrapper(intel, irb,
1084 mt, level, layer,
1085 format, internal_format)) {
1086 free(irb);
1087 return NULL;
1088 }
1089
1090 return irb;
1091 }
1092
1093 void
1094 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
1095 {
1096 unsigned int dst_x, dst_y;
1097
1098 /* compute offset of the particular 2D image within the texture region */
1099 intel_miptree_get_image_offset(irb->mt,
1100 irb->mt_level,
1101 0, /* face, which we ignore */
1102 irb->mt_layer,
1103 &dst_x, &dst_y);
1104
1105 irb->draw_x = dst_x;
1106 irb->draw_y = dst_y;
1107 }
1108
1109 /**
1110 * Rendering to tiled buffers requires that the base address of the
1111 * buffer be aligned to a page boundary. We generally render to
1112 * textures by pointing the surface at the mipmap image level, which
1113 * may not be aligned to a tile boundary.
1114 *
1115 * This function returns an appropriately-aligned base offset
1116 * according to the tiling restrictions, plus any required x/y offset
1117 * from there.
1118 */
1119 uint32_t
1120 intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
1121 uint32_t *tile_x,
1122 uint32_t *tile_y)
1123 {
1124 struct intel_region *region = irb->mt->region;
1125 int cpp = region->cpp;
1126 uint32_t pitch = region->pitch * cpp;
1127
1128 if (region->tiling == I915_TILING_NONE) {
1129 *tile_x = 0;
1130 *tile_y = 0;
1131 return irb->draw_x * cpp + irb->draw_y * pitch;
1132 } else if (region->tiling == I915_TILING_X) {
1133 *tile_x = irb->draw_x % (512 / cpp);
1134 *tile_y = irb->draw_y % 8;
1135 return ((irb->draw_y / 8) * (8 * pitch) +
1136 (irb->draw_x - *tile_x) / (512 / cpp) * 4096);
1137 } else {
1138 assert(region->tiling == I915_TILING_Y);
1139 *tile_x = irb->draw_x % (128 / cpp);
1140 *tile_y = irb->draw_y % 32;
1141 return ((irb->draw_y / 32) * (32 * pitch) +
1142 (irb->draw_x - *tile_x) / (128 / cpp) * 4096);
1143 }
1144 }
1145
1146 #ifndef I915
1147 static bool
1148 need_tile_offset_workaround(struct brw_context *brw,
1149 struct intel_renderbuffer *irb)
1150 {
1151 uint32_t tile_x, tile_y;
1152
1153 if (brw->has_surface_tile_offset)
1154 return false;
1155
1156 intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y);
1157
1158 return tile_x != 0 || tile_y != 0;
1159 }
1160 #endif
1161
1162 /**
1163 * Called by glFramebufferTexture[123]DEXT() (and other places) to
1164 * prepare for rendering into texture memory. This might be called
1165 * many times to choose different texture levels, cube faces, etc
1166 * before intel_finish_render_texture() is ever called.
1167 */
1168 static void
1169 intel_render_texture(struct gl_context * ctx,
1170 struct gl_framebuffer *fb,
1171 struct gl_renderbuffer_attachment *att)
1172 {
1173 struct intel_context *intel = intel_context(ctx);
1174 struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
1175 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
1176 struct intel_texture_image *intel_image = intel_texture_image(image);
1177 struct intel_mipmap_tree *mt = intel_image->mt;
1178
1179 (void) fb;
1180
1181 int layer;
1182 if (att->CubeMapFace > 0) {
1183 assert(att->Zoffset == 0);
1184 layer = att->CubeMapFace;
1185 } else {
1186 layer = att->Zoffset;
1187 }
1188
1189 if (!intel_image->mt) {
1190 /* Fallback on drawing to a texture that doesn't have a miptree
1191 * (has a border, width/height 0, etc.)
1192 */
1193 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
1194 _swrast_render_texture(ctx, fb, att);
1195 return;
1196 }
1197 else if (!irb) {
1198 irb = intel_renderbuffer_wrap_miptree(intel,
1199 mt,
1200 att->TextureLevel,
1201 layer,
1202 image->TexFormat,
1203 image->InternalFormat);
1204
1205 if (irb) {
1206 /* bind the wrapper to the attachment point */
1207 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
1208 }
1209 else {
1210 /* fallback to software rendering */
1211 _swrast_render_texture(ctx, fb, att);
1212 return;
1213 }
1214 }
1215
1216 if (!intel_renderbuffer_update_wrapper(intel, irb,
1217 mt, att->TextureLevel, layer,
1218 image->TexFormat,
1219 image->InternalFormat)) {
1220 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
1221 _swrast_render_texture(ctx, fb, att);
1222 return;
1223 }
1224
1225 DBG("Begin render texture tid %lx tex=%u w=%d h=%d refcount=%d\n",
1226 _glthread_GetID(),
1227 att->Texture->Name, image->Width, image->Height,
1228 irb->Base.RefCount);
1229
1230 intel_image->used_as_render_target = true;
1231
1232 #ifndef I915
1233 if (need_tile_offset_workaround(brw_context(ctx), irb)) {
1234 /* Original gen4 hardware couldn't draw to a non-tile-aligned
1235 * destination in a miptree unless you actually setup your
1236 * renderbuffer as a miptree and used the fragile
1237 * lod/array_index/etc. controls to select the image. So,
1238 * instead, we just make a new single-level miptree and render
1239 * into that.
1240 */
1241 struct intel_context *intel = intel_context(ctx);
1242 struct intel_mipmap_tree *new_mt;
1243 int width, height, depth;
1244
1245 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
1246
1247 new_mt = intel_miptree_create(intel, image->TexObject->Target,
1248 intel_image->base.Base.TexFormat,
1249 intel_image->base.Base.Level,
1250 intel_image->base.Base.Level,
1251 width, height, depth,
1252 true);
1253
1254 intel_miptree_copy_teximage(intel, intel_image, new_mt);
1255 intel_renderbuffer_set_draw_offset(irb);
1256
1257 intel_miptree_reference(&irb->mt, intel_image->mt);
1258 intel_miptree_release(&new_mt);
1259 }
1260 #endif
1261 /* update drawing region, etc */
1262 intel_draw_buffer(ctx);
1263 }
1264
1265
1266 /**
1267 * Called by Mesa when rendering to a texture is done.
1268 */
1269 static void
1270 intel_finish_render_texture(struct gl_context * ctx,
1271 struct gl_renderbuffer_attachment *att)
1272 {
1273 struct intel_context *intel = intel_context(ctx);
1274 struct gl_texture_object *tex_obj = att->Texture;
1275 struct gl_texture_image *image =
1276 tex_obj->Image[att->CubeMapFace][att->TextureLevel];
1277 struct intel_texture_image *intel_image = intel_texture_image(image);
1278
1279 DBG("Finish render texture tid %lx tex=%u\n",
1280 _glthread_GetID(), att->Texture->Name);
1281
1282 /* Flag that this image may now be validated into the object's miptree. */
1283 if (intel_image)
1284 intel_image->used_as_render_target = false;
1285
1286 /* Since we've (probably) rendered to the texture and will (likely) use
1287 * it in the texture domain later on in this batchbuffer, flush the
1288 * batch. Once again, we wish for a domain tracker in libdrm to cover
1289 * usage inside of a batchbuffer like GEM does in the kernel.
1290 */
1291 intel_batchbuffer_emit_mi_flush(intel);
1292 }
1293
1294 /**
1295 * Do additional "completeness" testing of a framebuffer object.
1296 */
1297 static void
1298 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
1299 {
1300 struct intel_context *intel = intel_context(ctx);
1301 const struct intel_renderbuffer *depthRb =
1302 intel_get_renderbuffer(fb, BUFFER_DEPTH);
1303 const struct intel_renderbuffer *stencilRb =
1304 intel_get_renderbuffer(fb, BUFFER_STENCIL);
1305 int i;
1306
1307 /*
1308 * The depth and stencil renderbuffers are the same renderbuffer or wrap
1309 * the same texture.
1310 */
1311 if (depthRb && stencilRb) {
1312 bool depth_stencil_are_same;
1313 if (depthRb == stencilRb)
1314 depth_stencil_are_same = true;
1315 else if ((fb->Attachment[BUFFER_DEPTH].Type == GL_TEXTURE) &&
1316 (fb->Attachment[BUFFER_STENCIL].Type == GL_TEXTURE) &&
1317 (fb->Attachment[BUFFER_DEPTH].Texture->Name ==
1318 fb->Attachment[BUFFER_STENCIL].Texture->Name))
1319 depth_stencil_are_same = true;
1320 else
1321 depth_stencil_are_same = false;
1322
1323 if (!intel->has_separate_stencil && !depth_stencil_are_same) {
1324 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1325 }
1326 }
1327
1328 for (i = 0; i < Elements(fb->Attachment); i++) {
1329 struct gl_renderbuffer *rb;
1330 struct intel_renderbuffer *irb;
1331
1332 if (fb->Attachment[i].Type == GL_NONE)
1333 continue;
1334
1335 /* A supported attachment will have a Renderbuffer set either
1336 * from being a Renderbuffer or being a texture that got the
1337 * intel_wrap_texture() treatment.
1338 */
1339 rb = fb->Attachment[i].Renderbuffer;
1340 if (rb == NULL) {
1341 DBG("attachment without renderbuffer\n");
1342 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1343 continue;
1344 }
1345
1346 irb = intel_renderbuffer(rb);
1347 if (irb == NULL) {
1348 DBG("software rendering renderbuffer\n");
1349 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1350 continue;
1351 }
1352
1353 if (!intel_span_supports_format(irb->Base.Format) ||
1354 !intel->vtbl.render_target_supported(irb->Base.Format)) {
1355 DBG("Unsupported texture/renderbuffer format attached: %s\n",
1356 _mesa_get_format_name(irb->Base.Format));
1357 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1358 }
1359 }
1360 }
1361
1362 /**
1363 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
1364 * We can do this when the dst renderbuffer is actually a texture and
1365 * there is no scaling, mirroring or scissoring.
1366 *
1367 * \return new buffer mask indicating the buffers left to blit using the
1368 * normal path.
1369 */
1370 static GLbitfield
1371 intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
1372 GLint srcX0, GLint srcY0,
1373 GLint srcX1, GLint srcY1,
1374 GLint dstX0, GLint dstY0,
1375 GLint dstX1, GLint dstY1,
1376 GLbitfield mask, GLenum filter)
1377 {
1378 if (mask & GL_COLOR_BUFFER_BIT) {
1379 const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
1380 const struct gl_framebuffer *readFb = ctx->ReadBuffer;
1381 const struct gl_renderbuffer_attachment *drawAtt =
1382 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
1383
1384 /* If the source and destination are the same size with no
1385 mirroring, the rectangles are within the size of the
1386 texture and there is no scissor then we can use
1387 glCopyTexSubimage2D to implement the blit. This will end
1388 up as a fast hardware blit on some drivers */
1389 if (drawAtt && drawAtt->Texture &&
1390 srcX0 - srcX1 == dstX0 - dstX1 &&
1391 srcY0 - srcY1 == dstY0 - dstY1 &&
1392 srcX1 >= srcX0 &&
1393 srcY1 >= srcY0 &&
1394 srcX0 >= 0 && srcX1 <= readFb->Width &&
1395 srcY0 >= 0 && srcY1 <= readFb->Height &&
1396 dstX0 >= 0 && dstX1 <= drawFb->Width &&
1397 dstY0 >= 0 && dstY1 <= drawFb->Height &&
1398 !ctx->Scissor.Enabled) {
1399 const struct gl_texture_object *texObj = drawAtt->Texture;
1400 const GLuint dstLevel = drawAtt->TextureLevel;
1401 const GLenum target = texObj->Target;
1402
1403 struct gl_texture_image *texImage =
1404 _mesa_select_tex_image(ctx, texObj, target, dstLevel);
1405
1406 if (intel_copy_texsubimage(intel_context(ctx),
1407 intel_texture_image(texImage),
1408 dstX0, dstY0,
1409 srcX0, srcY0,
1410 srcX1 - srcX0, /* width */
1411 srcY1 - srcY0))
1412 mask &= ~GL_COLOR_BUFFER_BIT;
1413 }
1414 }
1415
1416 return mask;
1417 }
1418
1419 static void
1420 intel_blit_framebuffer(struct gl_context *ctx,
1421 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
1422 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
1423 GLbitfield mask, GLenum filter)
1424 {
1425 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
1426 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
1427 srcX0, srcY0, srcX1, srcY1,
1428 dstX0, dstY0, dstX1, dstY1,
1429 mask, filter);
1430 if (mask == 0x0)
1431 return;
1432
1433 _mesa_meta_BlitFramebuffer(ctx,
1434 srcX0, srcY0, srcX1, srcY1,
1435 dstX0, dstY0, dstX1, dstY1,
1436 mask, filter);
1437 }
1438
1439 void
1440 intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
1441 {
1442 if (irb->mt) {
1443 intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
1444 irb->mt_level,
1445 irb->mt_layer);
1446 } else if (irb->wrapped_depth) {
1447 intel_renderbuffer_set_needs_hiz_resolve(
1448 intel_renderbuffer(irb->wrapped_depth));
1449 } else {
1450 return;
1451 }
1452 }
1453
1454 void
1455 intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
1456 {
1457 if (irb->mt) {
1458 intel_miptree_slice_set_needs_depth_resolve(irb->mt,
1459 irb->mt_level,
1460 irb->mt_layer);
1461 } else if (irb->wrapped_depth) {
1462 intel_renderbuffer_set_needs_depth_resolve(
1463 intel_renderbuffer(irb->wrapped_depth));
1464 } else {
1465 return;
1466 }
1467 }
1468
1469 bool
1470 intel_renderbuffer_resolve_hiz(struct intel_context *intel,
1471 struct intel_renderbuffer *irb)
1472 {
1473 if (irb->mt)
1474 return intel_miptree_slice_resolve_hiz(intel,
1475 irb->mt,
1476 irb->mt_level,
1477 irb->mt_layer);
1478 if (irb->wrapped_depth)
1479 return intel_renderbuffer_resolve_hiz(intel,
1480 intel_renderbuffer(irb->wrapped_depth));
1481
1482 return false;
1483 }
1484
1485 bool
1486 intel_renderbuffer_resolve_depth(struct intel_context *intel,
1487 struct intel_renderbuffer *irb)
1488 {
1489 if (irb->mt)
1490 return intel_miptree_slice_resolve_depth(intel,
1491 irb->mt,
1492 irb->mt_level,
1493 irb->mt_layer);
1494
1495 if (irb->wrapped_depth)
1496 return intel_renderbuffer_resolve_depth(intel,
1497 intel_renderbuffer(irb->wrapped_depth));
1498
1499 return false;
1500 }
1501
1502 /**
1503 * Do one-time context initializations related to GL_EXT_framebuffer_object.
1504 * Hook in device driver functions.
1505 */
1506 void
1507 intel_fbo_init(struct intel_context *intel)
1508 {
1509 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
1510 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
1511 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
1512 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
1513 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
1514 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
1515 intel->ctx.Driver.RenderTexture = intel_render_texture;
1516 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
1517 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
1518 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
1519 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
1520
1521 #if FEATURE_OES_EGL_image
1522 intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
1523 intel_image_target_renderbuffer_storage;
1524 #endif
1525 }