i965: Use GTT maps when available to upload vertex arrays and system VBOs.
[mesa.git] / src / mesa / drivers / dri / intel / intel_span.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "main/glheader.h"
29 #include "main/macros.h"
30 #include "main/mtypes.h"
31 #include "main/colormac.h"
32
33 #include "intel_buffers.h"
34 #include "intel_fbo.h"
35 #include "intel_screen.h"
36 #include "intel_span.h"
37 #include "intel_regions.h"
38 #include "intel_tex.h"
39
40 #include "swrast/swrast.h"
41
42 static void
43 intel_set_span_functions(struct intel_context *intel,
44 struct gl_renderbuffer *rb);
45
46 #define SPAN_CACHE_SIZE 4096
47
48 static void
49 get_span_cache(struct intel_renderbuffer *irb, uint32_t offset)
50 {
51 if (irb->span_cache == NULL) {
52 irb->span_cache = _mesa_malloc(SPAN_CACHE_SIZE);
53 irb->span_cache_offset = -1;
54 }
55
56 if ((offset & ~(SPAN_CACHE_SIZE - 1)) != irb->span_cache_offset) {
57 irb->span_cache_offset = offset & ~(SPAN_CACHE_SIZE - 1);
58 dri_bo_get_subdata(irb->region->buffer, irb->span_cache_offset,
59 SPAN_CACHE_SIZE, irb->span_cache);
60 }
61 }
62
63 static void
64 clear_span_cache(struct intel_renderbuffer *irb)
65 {
66 irb->span_cache_offset = -1;
67 }
68
69 static uint32_t
70 pread_32(struct intel_renderbuffer *irb, uint32_t offset)
71 {
72 get_span_cache(irb, offset);
73
74 return *(uint32_t *)(irb->span_cache + (offset & (SPAN_CACHE_SIZE - 1)));
75 }
76
77 static uint32_t
78 pread_xrgb8888(struct intel_renderbuffer *irb, uint32_t offset)
79 {
80 get_span_cache(irb, offset);
81
82 return *(uint32_t *)(irb->span_cache + (offset & (SPAN_CACHE_SIZE - 1))) |
83 0xff000000;
84 }
85
86 static uint16_t
87 pread_16(struct intel_renderbuffer *irb, uint32_t offset)
88 {
89 get_span_cache(irb, offset);
90
91 return *(uint16_t *)(irb->span_cache + (offset & (SPAN_CACHE_SIZE - 1)));
92 }
93
94 static uint8_t
95 pread_8(struct intel_renderbuffer *irb, uint32_t offset)
96 {
97 get_span_cache(irb, offset);
98
99 return *(uint8_t *)(irb->span_cache + (offset & (SPAN_CACHE_SIZE - 1)));
100 }
101
102 static void
103 pwrite_32(struct intel_renderbuffer *irb, uint32_t offset, uint32_t val)
104 {
105 clear_span_cache(irb);
106
107 dri_bo_subdata(irb->region->buffer, offset, 4, &val);
108 }
109
110 static void
111 pwrite_xrgb8888(struct intel_renderbuffer *irb, uint32_t offset, uint32_t val)
112 {
113 clear_span_cache(irb);
114
115 dri_bo_subdata(irb->region->buffer, offset, 3, &val);
116 }
117
118 static void
119 pwrite_16(struct intel_renderbuffer *irb, uint32_t offset, uint16_t val)
120 {
121 clear_span_cache(irb);
122
123 dri_bo_subdata(irb->region->buffer, offset, 2, &val);
124 }
125
126 static void
127 pwrite_8(struct intel_renderbuffer *irb, uint32_t offset, uint8_t val)
128 {
129 clear_span_cache(irb);
130
131 dri_bo_subdata(irb->region->buffer, offset, 1, &val);
132 }
133
134 static uint32_t
135 z24s8_to_s8z24(uint32_t val)
136 {
137 return (val << 24) | (val >> 8);
138 }
139
140 static uint32_t
141 s8z24_to_z24s8(uint32_t val)
142 {
143 return (val >> 24) | (val << 8);
144 }
145
146 static uint32_t no_tile_swizzle(struct intel_renderbuffer *irb,
147 int x, int y)
148 {
149 return (y * irb->region->pitch + x) * irb->region->cpp;
150 }
151
152 /*
153 * Deal with tiled surfaces
154 */
155
156 static uint32_t x_tile_swizzle(struct intel_renderbuffer *irb,
157 int x, int y)
158 {
159 int tile_stride;
160 int xbyte;
161 int x_tile_off, y_tile_off;
162 int x_tile_number, y_tile_number;
163 int tile_off, tile_base;
164
165 tile_stride = (irb->region->pitch * irb->region->cpp) << 3;
166
167 xbyte = x * irb->region->cpp;
168
169 x_tile_off = xbyte & 0x1ff;
170 y_tile_off = y & 7;
171
172 x_tile_number = xbyte >> 9;
173 y_tile_number = y >> 3;
174
175 tile_off = (y_tile_off << 9) + x_tile_off;
176
177 switch (irb->region->bit_6_swizzle) {
178 case I915_BIT_6_SWIZZLE_NONE:
179 break;
180 case I915_BIT_6_SWIZZLE_9:
181 tile_off ^= ((tile_off >> 3) & 64);
182 break;
183 case I915_BIT_6_SWIZZLE_9_10:
184 tile_off ^= ((tile_off >> 3) & 64) ^ ((tile_off >> 4) & 64);
185 break;
186 case I915_BIT_6_SWIZZLE_9_11:
187 tile_off ^= ((tile_off >> 3) & 64) ^ ((tile_off >> 5) & 64);
188 break;
189 case I915_BIT_6_SWIZZLE_9_10_11:
190 tile_off ^= ((tile_off >> 3) & 64) ^ ((tile_off >> 4) & 64) ^
191 ((tile_off >> 5) & 64);
192 break;
193 default:
194 fprintf(stderr, "Unknown tile swizzling mode %d\n",
195 irb->region->bit_6_swizzle);
196 exit(1);
197 }
198
199 tile_base = (x_tile_number << 12) + y_tile_number * tile_stride;
200
201 #if 0
202 printf("(%d,%d) -> %d + %d = %d (pitch = %d, tstride = %d)\n",
203 x, y, tile_off, tile_base,
204 tile_off + tile_base,
205 irb->region->pitch, tile_stride);
206 #endif
207
208 return tile_base + tile_off;
209 }
210
211 static uint32_t y_tile_swizzle(struct intel_renderbuffer *irb,
212 int x, int y)
213 {
214 int tile_stride;
215 int xbyte;
216 int x_tile_off, y_tile_off;
217 int x_tile_number, y_tile_number;
218 int tile_off, tile_base;
219
220 tile_stride = (irb->region->pitch * irb->region->cpp) << 5;
221
222 xbyte = x * irb->region->cpp;
223
224 x_tile_off = xbyte & 0x7f;
225 y_tile_off = y & 0x1f;
226
227 x_tile_number = xbyte >> 7;
228 y_tile_number = y >> 5;
229
230 tile_off = ((x_tile_off & ~0xf) << 5) + (y_tile_off << 4) +
231 (x_tile_off & 0xf);
232
233 switch (irb->region->bit_6_swizzle) {
234 case I915_BIT_6_SWIZZLE_NONE:
235 break;
236 case I915_BIT_6_SWIZZLE_9:
237 tile_off ^= ((tile_off >> 3) & 64);
238 break;
239 case I915_BIT_6_SWIZZLE_9_10:
240 tile_off ^= ((tile_off >> 3) & 64) ^ ((tile_off >> 4) & 64);
241 break;
242 case I915_BIT_6_SWIZZLE_9_11:
243 tile_off ^= ((tile_off >> 3) & 64) ^ ((tile_off >> 5) & 64);
244 break;
245 case I915_BIT_6_SWIZZLE_9_10_11:
246 tile_off ^= ((tile_off >> 3) & 64) ^ ((tile_off >> 4) & 64) ^
247 ((tile_off >> 5) & 64);
248 break;
249 default:
250 fprintf(stderr, "Unknown tile swizzling mode %d\n",
251 irb->region->bit_6_swizzle);
252 exit(1);
253 }
254
255 tile_base = (x_tile_number << 12) + y_tile_number * tile_stride;
256
257 return tile_base + tile_off;
258 }
259
260 /*
261 break intelWriteRGBASpan_ARGB8888
262 */
263
264 #undef DBG
265 #define DBG 0
266
267 #define LOCAL_VARS \
268 struct intel_context *intel = intel_context(ctx); \
269 struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
270 const GLint yScale = ctx->DrawBuffer->Name ? 1 : -1; \
271 const GLint yBias = ctx->DrawBuffer->Name ? 0 : irb->Base.Height - 1;\
272 unsigned int num_cliprects; \
273 struct drm_clip_rect *cliprects; \
274 int x_off, y_off; \
275 GLuint p; \
276 (void) p; \
277 intel_get_cliprects(intel, &cliprects, &num_cliprects, &x_off, &y_off);
278
279 /* XXX FBO: this is identical to the macro in spantmp2.h except we get
280 * the cliprect info from the context, not the driDrawable.
281 * Move this into spantmp2.h someday.
282 */
283 #define HW_CLIPLOOP() \
284 do { \
285 int _nc = num_cliprects; \
286 while ( _nc-- ) { \
287 int minx = cliprects[_nc].x1 - x_off; \
288 int miny = cliprects[_nc].y1 - y_off; \
289 int maxx = cliprects[_nc].x2 - x_off; \
290 int maxy = cliprects[_nc].y2 - y_off;
291
292 #if 0
293 }}
294 #endif
295
296 #define Y_FLIP(_y) ((_y) * yScale + yBias)
297
298 /* XXX with GEM, these need to tell the kernel */
299 #define HW_LOCK()
300
301 #define HW_UNLOCK()
302
303 /* Convenience macros to avoid typing the swizzle argument over and over */
304 #define NO_TILE(_X, _Y) no_tile_swizzle(irb, (_X) + x_off, (_Y) + y_off)
305 #define X_TILE(_X, _Y) x_tile_swizzle(irb, (_X) + x_off, (_Y) + y_off)
306 #define Y_TILE(_X, _Y) y_tile_swizzle(irb, (_X) + x_off, (_Y) + y_off)
307
308 /* r5g6b5 color span and pixel functions */
309 #define INTEL_PIXEL_FMT GL_RGB
310 #define INTEL_PIXEL_TYPE GL_UNSIGNED_SHORT_5_6_5
311 #define INTEL_READ_VALUE(offset) pread_16(irb, offset)
312 #define INTEL_WRITE_VALUE(offset, v) pwrite_16(irb, offset, v)
313 #define INTEL_TAG(x) x##_RGB565
314 #include "intel_spantmp.h"
315
316 /* a8r8g8b8 color span and pixel functions */
317 #define INTEL_PIXEL_FMT GL_BGRA
318 #define INTEL_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV
319 #define INTEL_READ_VALUE(offset) pread_32(irb, offset)
320 #define INTEL_WRITE_VALUE(offset, v) pwrite_32(irb, offset, v)
321 #define INTEL_TAG(x) x##_ARGB8888
322 #include "intel_spantmp.h"
323
324 /* x8r8g8b8 color span and pixel functions */
325 #define INTEL_PIXEL_FMT GL_BGRA
326 #define INTEL_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV
327 #define INTEL_READ_VALUE(offset) pread_xrgb8888(irb, offset)
328 #define INTEL_WRITE_VALUE(offset, v) pwrite_xrgb8888(irb, offset, v)
329 #define INTEL_TAG(x) x##_xRGB8888
330 #include "intel_spantmp.h"
331
332 #define LOCAL_DEPTH_VARS \
333 struct intel_context *intel = intel_context(ctx); \
334 struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
335 const GLint yScale = ctx->DrawBuffer->Name ? 1 : -1; \
336 const GLint yBias = ctx->DrawBuffer->Name ? 0 : irb->Base.Height - 1;\
337 unsigned int num_cliprects; \
338 struct drm_clip_rect *cliprects; \
339 int x_off, y_off; \
340 intel_get_cliprects(intel, &cliprects, &num_cliprects, &x_off, &y_off);
341
342
343 #define LOCAL_STENCIL_VARS LOCAL_DEPTH_VARS
344
345 /* z16 depthbuffer functions. */
346 #define INTEL_VALUE_TYPE GLushort
347 #define INTEL_WRITE_DEPTH(offset, d) pwrite_16(irb, offset, d)
348 #define INTEL_READ_DEPTH(offset) pread_16(irb, offset)
349 #define INTEL_TAG(name) name##_z16
350 #include "intel_depthtmp.h"
351
352 /* z24 depthbuffer functions. */
353 #define INTEL_VALUE_TYPE GLuint
354 #define INTEL_WRITE_DEPTH(offset, d) pwrite_32(irb, offset, d)
355 #define INTEL_READ_DEPTH(offset) pread_32(irb, offset)
356 #define INTEL_TAG(name) name##_z24
357 #include "intel_depthtmp.h"
358
359 /* z24s8 depthbuffer functions. */
360 #define INTEL_VALUE_TYPE GLuint
361 #define INTEL_WRITE_DEPTH(offset, d) pwrite_32(irb, offset, z24s8_to_s8z24(d))
362 #define INTEL_READ_DEPTH(offset) s8z24_to_z24s8(pread_32(irb, offset))
363 #define INTEL_TAG(name) name##_z24_s8
364 #include "intel_depthtmp.h"
365
366
367 /**
368 ** 8-bit stencil function (XXX FBO: This is obsolete)
369 **/
370 #define WRITE_STENCIL(_x, _y, d) pwrite_8(irb, NO_TILE(_x, _y) + 3, d)
371 #define READ_STENCIL(d, _x, _y) d = pread_8(irb, NO_TILE(_x, _y) + 3);
372 #define TAG(x) intel##x##_z24_s8
373 #include "stenciltmp.h"
374
375 /**
376 ** 8-bit x-tile stencil function (XXX FBO: This is obsolete)
377 **/
378 #define WRITE_STENCIL(_x, _y, d) pwrite_8(irb, X_TILE(_x, _y) + 3, d)
379 #define READ_STENCIL(d, _x, _y) d = pread_8(irb, X_TILE(_x, _y) + 3);
380 #define TAG(x) intel_XTile_##x##_z24_s8
381 #include "stenciltmp.h"
382
383 /**
384 ** 8-bit y-tile stencil function (XXX FBO: This is obsolete)
385 **/
386 #define WRITE_STENCIL(_x, _y, d) pwrite_8(irb, Y_TILE(_x, _y) + 3, d)
387 #define READ_STENCIL(d, _x, _y) d = pread_8(irb, Y_TILE(_x, _y) + 3)
388 #define TAG(x) intel_YTile_##x##_z24_s8
389 #include "stenciltmp.h"
390
391 void
392 intel_renderbuffer_map(struct intel_context *intel, struct gl_renderbuffer *rb)
393 {
394 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
395
396 if (irb == NULL || irb->region == NULL)
397 return;
398
399 intel_set_span_functions(intel, rb);
400 }
401
402 void
403 intel_renderbuffer_unmap(struct intel_context *intel,
404 struct gl_renderbuffer *rb)
405 {
406 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
407
408 if (irb == NULL || irb->region == NULL)
409 return;
410
411 clear_span_cache(irb);
412
413 rb->GetRow = NULL;
414 rb->PutRow = NULL;
415 }
416
417 /**
418 * Map or unmap all the renderbuffers which we may need during
419 * software rendering.
420 * XXX in the future, we could probably convey extra information to
421 * reduce the number of mappings needed. I.e. if doing a glReadPixels
422 * from the depth buffer, we really only need one mapping.
423 *
424 * XXX Rewrite this function someday.
425 * We can probably just loop over all the renderbuffer attachments,
426 * map/unmap all of them, and not worry about the _ColorDrawBuffers
427 * _ColorReadBuffer, _DepthBuffer or _StencilBuffer fields.
428 */
429 static void
430 intel_map_unmap_buffers(struct intel_context *intel, GLboolean map)
431 {
432 GLcontext *ctx = &intel->ctx;
433 GLuint i, j;
434
435 /* color draw buffers */
436 for (j = 0; j < ctx->DrawBuffer->_NumColorDrawBuffers; j++) {
437 if (map)
438 intel_renderbuffer_map(intel, ctx->DrawBuffer->_ColorDrawBuffers[j]);
439 else
440 intel_renderbuffer_unmap(intel, ctx->DrawBuffer->_ColorDrawBuffers[j]);
441 }
442
443 /* check for render to textures */
444 for (i = 0; i < BUFFER_COUNT; i++) {
445 struct gl_renderbuffer_attachment *att =
446 ctx->DrawBuffer->Attachment + i;
447 struct gl_texture_object *tex = att->Texture;
448 if (tex) {
449 /* render to texture */
450 ASSERT(att->Renderbuffer);
451 if (map)
452 intel_tex_map_images(intel, intel_texture_object(tex));
453 else
454 intel_tex_unmap_images(intel, intel_texture_object(tex));
455 }
456 }
457
458 /* color read buffers */
459 if (map)
460 intel_renderbuffer_map(intel, ctx->ReadBuffer->_ColorReadBuffer);
461 else
462 intel_renderbuffer_unmap(intel, ctx->ReadBuffer->_ColorReadBuffer);
463
464 /* depth buffer (Note wrapper!) */
465 if (ctx->DrawBuffer->_DepthBuffer) {
466 if (map)
467 intel_renderbuffer_map(intel, ctx->DrawBuffer->_DepthBuffer->Wrapped);
468 else
469 intel_renderbuffer_unmap(intel,
470 ctx->DrawBuffer->_DepthBuffer->Wrapped);
471 }
472
473 /* stencil buffer (Note wrapper!) */
474 if (ctx->DrawBuffer->_StencilBuffer) {
475 if (map)
476 intel_renderbuffer_map(intel,
477 ctx->DrawBuffer->_StencilBuffer->Wrapped);
478 else
479 intel_renderbuffer_unmap(intel,
480 ctx->DrawBuffer->_StencilBuffer->Wrapped);
481 }
482 }
483
484
485
486 /**
487 * Prepare for softare rendering. Map current read/draw framebuffers'
488 * renderbuffes and all currently bound texture objects.
489 *
490 * Old note: Moved locking out to get reasonable span performance.
491 */
492 void
493 intelSpanRenderStart(GLcontext * ctx)
494 {
495 struct intel_context *intel = intel_context(ctx);
496 GLuint i;
497
498 intelFlush(&intel->ctx);
499 LOCK_HARDWARE(intel);
500
501 for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
502 if (ctx->Texture.Unit[i]._ReallyEnabled) {
503 struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
504 intel_tex_map_images(intel, intel_texture_object(texObj));
505 }
506 }
507
508 intel_map_unmap_buffers(intel, GL_TRUE);
509 }
510
511 /**
512 * Called when done softare rendering. Unmap the buffers we mapped in
513 * the above function.
514 */
515 void
516 intelSpanRenderFinish(GLcontext * ctx)
517 {
518 struct intel_context *intel = intel_context(ctx);
519 GLuint i;
520
521 _swrast_flush(ctx);
522
523 for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
524 if (ctx->Texture.Unit[i]._ReallyEnabled) {
525 struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
526 intel_tex_unmap_images(intel, intel_texture_object(texObj));
527 }
528 }
529
530 intel_map_unmap_buffers(intel, GL_FALSE);
531
532 UNLOCK_HARDWARE(intel);
533 }
534
535
536 void
537 intelInitSpanFuncs(GLcontext * ctx)
538 {
539 struct swrast_device_driver *swdd = _swrast_GetDeviceDriverReference(ctx);
540 swdd->SpanRenderStart = intelSpanRenderStart;
541 swdd->SpanRenderFinish = intelSpanRenderFinish;
542 }
543
544
545 /**
546 * Plug in appropriate span read/write functions for the given renderbuffer.
547 * These are used for the software fallbacks.
548 */
549 static void
550 intel_set_span_functions(struct intel_context *intel,
551 struct gl_renderbuffer *rb)
552 {
553 struct intel_renderbuffer *irb = (struct intel_renderbuffer *) rb;
554 uint32_t tiling;
555
556 /* If in GEM mode, we need to do the tile address swizzling ourselves,
557 * instead of the fence registers handling it.
558 */
559 if (intel->ttm)
560 tiling = irb->region->tiling;
561 else
562 tiling = I915_TILING_NONE;
563
564 if (rb->_ActualFormat == GL_RGB5) {
565 /* 565 RGB */
566 switch (tiling) {
567 case I915_TILING_NONE:
568 default:
569 intelInitPointers_RGB565(rb);
570 break;
571 case I915_TILING_X:
572 intel_XTile_InitPointers_RGB565(rb);
573 break;
574 case I915_TILING_Y:
575 intel_YTile_InitPointers_RGB565(rb);
576 break;
577 }
578 }
579 else if (rb->_ActualFormat == GL_RGB8) {
580 /* 8888 RGBx */
581 switch (tiling) {
582 case I915_TILING_NONE:
583 default:
584 intelInitPointers_xRGB8888(rb);
585 break;
586 case I915_TILING_X:
587 intel_XTile_InitPointers_xRGB8888(rb);
588 break;
589 case I915_TILING_Y:
590 intel_YTile_InitPointers_xRGB8888(rb);
591 break;
592 }
593 }
594 else if (rb->_ActualFormat == GL_RGBA8) {
595 /* 8888 RGBA */
596 switch (tiling) {
597 case I915_TILING_NONE:
598 default:
599 intelInitPointers_ARGB8888(rb);
600 break;
601 case I915_TILING_X:
602 intel_XTile_InitPointers_ARGB8888(rb);
603 break;
604 case I915_TILING_Y:
605 intel_YTile_InitPointers_ARGB8888(rb);
606 break;
607 }
608 }
609 else if (rb->_ActualFormat == GL_DEPTH_COMPONENT16) {
610 switch (tiling) {
611 case I915_TILING_NONE:
612 default:
613 intelInitDepthPointers_z16(rb);
614 break;
615 case I915_TILING_X:
616 intel_XTile_InitDepthPointers_z16(rb);
617 break;
618 case I915_TILING_Y:
619 intel_YTile_InitDepthPointers_z16(rb);
620 break;
621 }
622 }
623 else if (rb->_ActualFormat == GL_DEPTH_COMPONENT24) {
624 switch (tiling) {
625 case I915_TILING_NONE:
626 default:
627 intelInitDepthPointers_z24(rb);
628 break;
629 case I915_TILING_X:
630 intel_XTile_InitDepthPointers_z24(rb);
631 break;
632 case I915_TILING_Y:
633 intel_YTile_InitDepthPointers_z24(rb);
634 break;
635 }
636 }
637 else if (rb->_ActualFormat == GL_DEPTH24_STENCIL8_EXT) {
638 switch (tiling) {
639 case I915_TILING_NONE:
640 default:
641 intelInitDepthPointers_z24_s8(rb);
642 break;
643 case I915_TILING_X:
644 intel_XTile_InitDepthPointers_z24_s8(rb);
645 break;
646 case I915_TILING_Y:
647 intel_YTile_InitDepthPointers_z24_s8(rb);
648 break;
649 }
650 }
651 else if (rb->_ActualFormat == GL_STENCIL_INDEX8_EXT) {
652 switch (tiling) {
653 case I915_TILING_NONE:
654 default:
655 intelInitStencilPointers_z24_s8(rb);
656 break;
657 case I915_TILING_X:
658 intel_XTile_InitStencilPointers_z24_s8(rb);
659 break;
660 case I915_TILING_Y:
661 intel_YTile_InitStencilPointers_z24_s8(rb);
662 break;
663 }
664 }
665 else {
666 _mesa_problem(NULL,
667 "Unexpected _ActualFormat in intelSetSpanFunctions");
668 }
669 }