1 /**************************************************************************
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5 VA Linux Systems Inc., Fremont, California.
7 The Weather Channel (TM) funded Tungsten Graphics to develop the
8 initial release of the Radeon 8500 driver under the XFree86 license.
9 This notice must be preserved.
13 Permission is hereby granted, free of charge, to any person obtaining
14 a copy of this software and associated documentation files (the
15 "Software"), to deal in the Software without restriction, including
16 without limitation the rights to use, copy, modify, merge, publish,
17 distribute, sublicense, and/or sell copies of the Software, and to
18 permit persons to whom the Software is furnished to do so, subject to
19 the following conditions:
21 The above copyright notice and this permission notice (including the
22 next paragraph) shall be included in all copies or substantial
23 portions of the Software.
25 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
28 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
29 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
30 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
31 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
33 **************************************************************************/
37 * Kevin E. Martin <martin@valinux.com>
38 * Gareth Hughes <gareth@valinux.com>
39 * Keith Whitwell <keith@tungstengraphics.com>
43 #include "main/glheader.h"
44 #include "swrast/swrast.h"
46 #include "radeon_common.h"
47 #include "radeon_lock.h"
48 #include "radeon_span.h"
52 static void radeonSetSpanFunctions(struct radeon_renderbuffer
*rrb
);
54 static GLubyte
*radeon_ptr32(const struct radeon_renderbuffer
* rrb
,
57 GLubyte
*ptr
= rrb
->bo
->ptr
;
58 uint32_t mask
= RADEON_BO_FLAGS_MACRO_TILE
| RADEON_BO_FLAGS_MICRO_TILE
;
63 if (rrb
->has_surface
|| !(rrb
->bo
->flags
& mask
)) {
64 offset
= x
* rrb
->cpp
+ y
* rrb
->pitch
;
67 if (rrb
->bo
->flags
& RADEON_BO_FLAGS_MACRO_TILE
) {
68 if (rrb
->bo
->flags
& RADEON_BO_FLAGS_MICRO_TILE
) {
69 nmacroblkpl
= rrb
->pitch
>> 5;
70 offset
+= ((y
>> 4) * nmacroblkpl
) << 11;
71 offset
+= ((y
& 15) >> 1) << 8;
72 offset
+= (y
& 1) << 4;
73 offset
+= (x
>> 5) << 11;
74 offset
+= ((x
& 31) >> 2) << 5;
75 offset
+= (x
& 3) << 2;
77 nmacroblkpl
= rrb
->pitch
>> 6;
78 offset
+= ((y
>> 3) * nmacroblkpl
) << 11;
79 offset
+= (y
& 7) << 8;
80 offset
+= (x
>> 6) << 11;
81 offset
+= ((x
& 63) >> 3) << 5;
82 offset
+= (x
& 7) << 2;
85 nmicroblkpl
= ((rrb
->pitch
+ 31) & ~31) >> 5;
86 offset
+= (y
* nmicroblkpl
) << 5;
87 offset
+= (x
>> 3) << 5;
88 offset
+= (x
& 7) << 2;
94 static GLubyte
*radeon_ptr16(const struct radeon_renderbuffer
* rrb
,
97 GLubyte
*ptr
= rrb
->bo
->ptr
;
98 uint32_t mask
= RADEON_BO_FLAGS_MACRO_TILE
| RADEON_BO_FLAGS_MICRO_TILE
;
103 if (rrb
->has_surface
|| !(rrb
->bo
->flags
& mask
)) {
104 offset
= x
* rrb
->cpp
+ y
* rrb
->pitch
;
107 if (rrb
->bo
->flags
& RADEON_BO_FLAGS_MACRO_TILE
) {
108 if (rrb
->bo
->flags
& RADEON_BO_FLAGS_MICRO_TILE
) {
109 nmacroblkpl
= rrb
->pitch
>> 6;
110 offset
+= ((y
>> 4) * nmacroblkpl
) << 11;
111 offset
+= ((y
& 15) >> 1) << 8;
112 offset
+= (y
& 1) << 4;
113 offset
+= (x
>> 6) << 11;
114 offset
+= ((x
& 63) >> 3) << 5;
115 offset
+= (x
& 7) << 1;
117 nmacroblkpl
= rrb
->pitch
>> 7;
118 offset
+= ((y
>> 3) * nmacroblkpl
) << 11;
119 offset
+= (y
& 7) << 8;
120 offset
+= (x
>> 7) << 11;
121 offset
+= ((x
& 127) >> 4) << 5;
122 offset
+= (x
& 15) << 2;
125 nmicroblkpl
= ((rrb
->pitch
+ 31) & ~31) >> 5;
126 offset
+= (y
* nmicroblkpl
) << 5;
127 offset
+= (x
>> 4) << 5;
128 offset
+= (x
& 15) << 2;
134 static GLubyte
*radeon_ptr(const struct radeon_renderbuffer
* rrb
,
137 GLubyte
*ptr
= rrb
->bo
->ptr
;
138 uint32_t mask
= RADEON_BO_FLAGS_MACRO_TILE
| RADEON_BO_FLAGS_MICRO_TILE
;
145 if (rrb
->has_surface
|| !(rrb
->bo
->flags
& mask
)) {
146 offset
= x
* rrb
->cpp
+ y
* rrb
->pitch
;
149 if (rrb
->bo
->flags
& RADEON_BO_FLAGS_MACRO_TILE
) {
150 if (rrb
->bo
->flags
& RADEON_BO_FLAGS_MICRO_TILE
) {
151 microblkxs
= 16 / rrb
->cpp
;
152 macroblkxs
= 128 / rrb
->cpp
;
153 nmacroblkpl
= rrb
->pitch
/ macroblkxs
;
154 offset
+= ((y
>> 4) * nmacroblkpl
) << 11;
155 offset
+= ((y
& 15) >> 1) << 8;
156 offset
+= (y
& 1) << 4;
157 offset
+= (x
/ macroblkxs
) << 11;
158 offset
+= ((x
& (macroblkxs
- 1)) / microblkxs
) << 5;
159 offset
+= (x
& (microblkxs
- 1)) * rrb
->cpp
;
161 microblkxs
= 32 / rrb
->cpp
;
162 macroblkxs
= 256 / rrb
->cpp
;
163 nmacroblkpl
= rrb
->pitch
/ macroblkxs
;
164 offset
+= ((y
>> 3) * nmacroblkpl
) << 11;
165 offset
+= (y
& 7) << 8;
166 offset
+= (x
/ macroblkxs
) << 11;
167 offset
+= ((x
& (macroblkxs
- 1)) / microblkxs
) << 5;
168 offset
+= (x
& (microblkxs
- 1)) * rrb
->cpp
;
171 microblkxs
= 32 / rrb
->cpp
;
172 nmicroblkpl
= ((rrb
->pitch
+ 31) & ~31) >> 5;
173 offset
+= (y
* nmicroblkpl
) << 5;
174 offset
+= (x
/ microblkxs
) << 5;
175 offset
+= (x
& (microblkxs
- 1)) * rrb
->cpp
;
183 * Note that all information needed to access pixels in a renderbuffer
184 * should be obtained through the gl_renderbuffer parameter, not per-context
188 struct radeon_context *radeon = RADEON_CONTEXT(ctx); \
189 struct radeon_renderbuffer *rrb = (void *) rb; \
190 const GLint yScale = ctx->DrawBuffer->Name ? 1 : -1; \
191 const GLint yBias = ctx->DrawBuffer->Name ? 0 : rrb->base.Height - 1;\
192 unsigned int num_cliprects; \
193 struct drm_clip_rect *cliprects; \
197 radeon_get_cliprects(radeon, &cliprects, &num_cliprects, &x_off, &y_off);
199 #define LOCAL_DEPTH_VARS \
200 struct radeon_context *radeon = RADEON_CONTEXT(ctx); \
201 struct radeon_renderbuffer *rrb = (void *) rb; \
202 const GLint yScale = ctx->DrawBuffer->Name ? 1 : -1; \
203 const GLint yBias = ctx->DrawBuffer->Name ? 0 : rrb->base.Height - 1;\
204 unsigned int num_cliprects; \
205 struct drm_clip_rect *cliprects; \
207 radeon_get_cliprects(radeon, &cliprects, &num_cliprects, &x_off, &y_off);
209 #define LOCAL_STENCIL_VARS LOCAL_DEPTH_VARS
211 #define Y_FLIP(_y) ((_y) * yScale + yBias)
217 /* XXX FBO: this is identical to the macro in spantmp2.h except we get
218 * the cliprect info from the context, not the driDrawable.
219 * Move this into spantmp2.h someday.
221 #define HW_CLIPLOOP() \
223 int _nc = num_cliprects; \
225 int minx = cliprects[_nc].x1 - x_off; \
226 int miny = cliprects[_nc].y1 - y_off; \
227 int maxx = cliprects[_nc].x2 - x_off; \
228 int maxy = cliprects[_nc].y2 - y_off;
230 /* ================================================================
234 /* 16 bit, RGB565 color spanline and pixel functions
236 #define SPANTMP_PIXEL_FMT GL_RGB
237 #define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_5_6_5
239 #define TAG(x) radeon##x##_RGB565
240 #define TAG2(x,y) radeon##x##_RGB565##y
241 #define GET_PTR(X,Y) radeon_ptr16(rrb, (X), (Y))
242 #include "spantmp2.h"
244 /* 32 bit, ARGB8888 color spanline and pixel functions
246 #define SPANTMP_PIXEL_FMT GL_BGRA
247 #define SPANTMP_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV
249 #define TAG(x) radeon##x##_ARGB8888
250 #define TAG2(x,y) radeon##x##_ARGB8888##y
251 #define GET_PTR(X,Y) radeon_ptr32(rrb, (X), (Y))
252 #include "spantmp2.h"
254 /* ================================================================
258 /* The Radeon family has depth tiling on all the time, so we have to convert
259 * the x,y coordinates into the memory bus address (mba) in the same
260 * manner as the engine. In each case, the linear block address (ba)
261 * is calculated, and then wired with x and y to produce the final
263 * The chip will do address translation on its own if the surface registers
264 * are set up correctly. It is not quite enough to get it working with hyperz
268 /* 16-bit depth buffer functions
270 #define VALUE_TYPE GLushort
272 #define WRITE_DEPTH( _x, _y, d ) \
273 *(GLushort *)radeon_ptr(rrb, _x + x_off, _y + y_off) = d
275 #define READ_DEPTH( d, _x, _y ) \
276 d = *(GLushort *)radeon_ptr(rrb, _x + x_off, _y + y_off)
278 #define TAG(x) radeon##x##_z16
279 #include "depthtmp.h"
281 /* 24 bit depth, 8 bit stencil depthbuffer functions
283 * Careful: It looks like the R300 uses ZZZS byte order while the R200
284 * uses SZZZ for 24 bit depth, 8 bit stencil mode.
286 #define VALUE_TYPE GLuint
289 #define WRITE_DEPTH( _x, _y, d ) \
291 GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off ); \
292 GLuint tmp = *_ptr; \
294 tmp |= ((d << 8) & 0xffffff00); \
298 #define WRITE_DEPTH( _x, _y, d ) \
300 GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off ); \_
301 GLuint tmp
= *_ptr
; \
303 tmp
|= ((d
) & 0x00ffffff); \
309 #define READ_DEPTH( d, _x, _y ) \
311 d = (*(GLuint*)(radeon_ptr32(rrb, _x + x_off, _y + y_off)) & 0xffffff00) >> 8; \
314 #define READ_DEPTH( d, _x, _y ) \
315 d = *(GLuint*)(radeon_ptr32(rrb, _x + x_off, _y + y_off )) & 0x00ffffff;
318 fprintf(stderr, "dval(%d, %d, %d, %d)=0x%08X\n", _x, xo, _y, yo, d);\
319 d = *(GLuint*)(radeon_ptr(rrb, _x, _y )) & 0x00ffffff;
321 #define TAG(x) radeon##x##_z24_s8
322 #include "depthtmp.h"
324 /* ================================================================
328 /* 24 bit depth, 8 bit stencil depthbuffer functions
331 #define WRITE_STENCIL( _x, _y, d ) \
333 GLuint *_ptr = (GLuint*)radeon_ptr32(rrb, _x + x_off, _y + y_off); \
334 GLuint tmp = *_ptr; \
340 #define WRITE_STENCIL( _x, _y, d ) \
342 GLuint *_ptr = (GLuint*)radeon_ptr32(rrb, _x + x_off, _y + y_off); \
343 GLuint tmp = *_ptr; \
345 tmp |= (((d) & 0xff) << 24); \
351 #define READ_STENCIL( d, _x, _y ) \
353 GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off ); \
354 GLuint tmp = *_ptr; \
355 d = tmp & 0x000000ff; \
358 #define READ_STENCIL( d, _x, _y ) \
360 GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off ); \
361 GLuint tmp = *_ptr; \
362 d = (tmp & 0xff000000) >> 24; \
366 #define TAG(x) radeon##x##_z24_s8
367 #include "stenciltmp.h"
370 static void map_buffer(struct gl_renderbuffer
*rb
, GLboolean write
)
372 struct radeon_renderbuffer
*rrb
= (void*)rb
;
376 r
= radeon_bo_map(rrb
->bo
, write
);
378 fprintf(stderr
, "(%s) error(%d) mapping buffer.\n",
383 radeonSetSpanFunctions(rrb
);
386 static void unmap_buffer(struct gl_renderbuffer
*rb
)
388 struct radeon_renderbuffer
*rrb
= (void*)rb
;
391 radeon_bo_unmap(rrb
->bo
);
397 static void radeonSpanRenderStart(GLcontext
* ctx
)
399 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
402 radeon_firevertices(rmesa
);
404 for (i
= 0; i
< ctx
->Const
.MaxTextureImageUnits
; i
++) {
405 if (ctx
->Texture
.Unit
[i
]._ReallyEnabled
)
406 ctx
->Driver
.MapTexture(ctx
, ctx
->Texture
.Unit
[i
]._Current
);
409 /* color draw buffers */
410 for (i
= 0; i
< ctx
->DrawBuffer
->_NumColorDrawBuffers
; i
++) {
411 map_buffer(ctx
->DrawBuffer
->_ColorDrawBuffers
[i
], GL_TRUE
);
414 map_buffer(ctx
->ReadBuffer
->_ColorReadBuffer
, GL_FALSE
);
416 if (ctx
->DrawBuffer
->_DepthBuffer
) {
417 map_buffer(ctx
->DrawBuffer
->_DepthBuffer
->Wrapped
, GL_TRUE
);
419 if (ctx
->DrawBuffer
->_StencilBuffer
)
420 map_buffer(ctx
->DrawBuffer
->_StencilBuffer
->Wrapped
, GL_TRUE
);
422 /* The locking and wait for idle should really only be needed in classic mode.
423 * In a future memory manager based implementation, this should become
424 * unnecessary due to the fact that mapping our buffers, textures, etc.
425 * should implicitly wait for any previous rendering commands that must
427 LOCK_HARDWARE(rmesa
);
428 radeonWaitForIdleLocked(rmesa
);
431 static void radeonSpanRenderFinish(GLcontext
* ctx
)
433 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
436 UNLOCK_HARDWARE(rmesa
);
438 for (i
= 0; i
< ctx
->Const
.MaxTextureImageUnits
; i
++) {
439 if (ctx
->Texture
.Unit
[i
]._ReallyEnabled
)
440 ctx
->Driver
.UnmapTexture(ctx
, ctx
->Texture
.Unit
[i
]._Current
);
443 /* color draw buffers */
444 for (i
= 0; i
< ctx
->DrawBuffer
->_NumColorDrawBuffers
; i
++)
445 unmap_buffer(ctx
->DrawBuffer
->_ColorDrawBuffers
[i
]);
447 unmap_buffer(ctx
->ReadBuffer
->_ColorReadBuffer
);
449 if (ctx
->DrawBuffer
->_DepthBuffer
)
450 unmap_buffer(ctx
->DrawBuffer
->_DepthBuffer
->Wrapped
);
451 if (ctx
->DrawBuffer
->_StencilBuffer
)
452 unmap_buffer(ctx
->DrawBuffer
->_StencilBuffer
->Wrapped
);
455 void radeonInitSpanFuncs(GLcontext
* ctx
)
457 struct swrast_device_driver
*swdd
=
458 _swrast_GetDeviceDriverReference(ctx
);
459 swdd
->SpanRenderStart
= radeonSpanRenderStart
;
460 swdd
->SpanRenderFinish
= radeonSpanRenderFinish
;
464 * Plug in the Get/Put routines for the given driRenderbuffer.
466 static void radeonSetSpanFunctions(struct radeon_renderbuffer
*rrb
)
468 if (rrb
->base
._ActualFormat
== GL_RGB5
) {
469 radeonInitPointers_RGB565(&rrb
->base
);
470 } else if (rrb
->base
._ActualFormat
== GL_RGB8
) {
471 radeonInitPointers_ARGB8888(&rrb
->base
);
472 } else if (rrb
->base
._ActualFormat
== GL_RGBA8
) {
473 radeonInitPointers_ARGB8888(&rrb
->base
);
474 } else if (rrb
->base
._ActualFormat
== GL_DEPTH_COMPONENT16
) {
475 radeonInitDepthPointers_z16(&rrb
->base
);
476 } else if (rrb
->base
._ActualFormat
== GL_DEPTH_COMPONENT24
) {
477 radeonInitDepthPointers_z24_s8(&rrb
->base
);
478 } else if (rrb
->base
._ActualFormat
== GL_STENCIL_INDEX8_EXT
) {
479 radeonInitStencilPointers_z24_s8(&rrb
->base
);