1 /**************************************************************************
3 Copyright (C) 2004 Nicolai Haehnle.
4 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
6 The Weather Channel (TM) funded Tungsten Graphics to develop the
7 initial release of the Radeon 8500 driver under the XFree86 license.
8 This notice must be preserved.
12 Permission is hereby granted, free of charge, to any person obtaining a
13 copy of this software and associated documentation files (the "Software"),
14 to deal in the Software without restriction, including without limitation
15 on the rights to use, copy, modify, merge, publish, distribute, sub
16 license, and/or sell copies of the Software, and to permit persons to whom
17 the Software is furnished to do so, subject to the following conditions:
19 The above copyright notice and this permission notice (including the next
20 paragraph) shall be included in all copies or substantial portions of the
23 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26 ATI, VA LINUX SYSTEMS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
27 DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28 OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29 USE OR OTHER DEALINGS IN THE SOFTWARE.
31 **************************************************************************/
33 #include "radeon_common.h"
35 #if defined(USE_X86_ASM)
36 #define COPY_DWORDS( dst, src, nr ) \
39 __asm__ __volatile__( "rep ; movsl" \
40 : "=%c" (__tmp), "=D" (dst), "=S" (__tmp) \
46 #define COPY_DWORDS( dst, src, nr ) \
49 for ( j = 0 ; j < nr ; j++ ) \
50 dst[j] = ((int *)src)[j]; \
55 static void radeonEmitVec4(uint32_t *out
, GLvoid
* data
, int stride
, int count
)
59 if (RADEON_DEBUG
& DEBUG_VERTS
)
60 fprintf(stderr
, "%s count %d stride %d out %p data %p\n",
61 __FUNCTION__
, count
, stride
, (void *)out
, (void *)data
);
64 COPY_DWORDS(out
, data
, count
);
66 for (i
= 0; i
< count
; i
++) {
67 out
[0] = *(int *)data
;
73 void radeonEmitVec8(uint32_t *out
, GLvoid
* data
, int stride
, int count
)
77 if (RADEON_DEBUG
& DEBUG_VERTS
)
78 fprintf(stderr
, "%s count %d stride %d out %p data %p\n",
79 __FUNCTION__
, count
, stride
, (void *)out
, (void *)data
);
82 COPY_DWORDS(out
, data
, count
* 2);
84 for (i
= 0; i
< count
; i
++) {
85 out
[0] = *(int *)data
;
86 out
[1] = *(int *)(data
+ 4);
92 void radeonEmitVec12(uint32_t *out
, GLvoid
* data
, int stride
, int count
)
96 if (RADEON_DEBUG
& DEBUG_VERTS
)
97 fprintf(stderr
, "%s count %d stride %d out %p data %p\n",
98 __FUNCTION__
, count
, stride
, (void *)out
, (void *)data
);
101 COPY_DWORDS(out
, data
, count
* 3);
104 for (i
= 0; i
< count
; i
++) {
105 out
[0] = *(int *)data
;
106 out
[1] = *(int *)(data
+ 4);
107 out
[2] = *(int *)(data
+ 8);
113 static void radeonEmitVec16(uint32_t *out
, GLvoid
* data
, int stride
, int count
)
117 if (RADEON_DEBUG
& DEBUG_VERTS
)
118 fprintf(stderr
, "%s count %d stride %d out %p data %p\n",
119 __FUNCTION__
, count
, stride
, (void *)out
, (void *)data
);
122 COPY_DWORDS(out
, data
, count
* 4);
124 for (i
= 0; i
< count
; i
++) {
125 out
[0] = *(int *)data
;
126 out
[1] = *(int *)(data
+ 4);
127 out
[2] = *(int *)(data
+ 8);
128 out
[3] = *(int *)(data
+ 12);
134 void rcommon_emit_vector(GLcontext
* ctx
, struct radeon_aos
*aos
,
135 GLvoid
* data
, int size
, int stride
, int count
)
137 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
141 radeonAllocDmaRegion(rmesa
, &aos
->bo
, &aos
->offset
, size
* 4, 32);
145 radeonAllocDmaRegion(rmesa
, &aos
->bo
, &aos
->offset
, size
* count
* 4, 32);
149 aos
->components
= size
;
152 out
= (uint32_t*)((char*)aos
->bo
->ptr
+ aos
->offset
);
154 case 1: radeonEmitVec4(out
, data
, stride
, count
); break;
155 case 2: radeonEmitVec8(out
, data
, stride
, count
); break;
156 case 3: radeonEmitVec12(out
, data
, stride
, count
); break;
157 case 4: radeonEmitVec16(out
, data
, stride
, count
); break;
164 void radeonRefillCurrentDmaRegion(radeonContextPtr rmesa
, int size
)
167 size
= MAX2(size
, MAX_DMA_BUF_SZ
* 16);
169 if (RADEON_DEBUG
& (DEBUG_IOCTL
| DEBUG_DMA
))
170 fprintf(stderr
, "%s\n", __FUNCTION__
);
172 if (rmesa
->dma
.flush
) {
173 rmesa
->dma
.flush(rmesa
->glCtx
);
176 if (rmesa
->dma
.nr_released_bufs
> 4) {
177 rcommonFlushCmdBuf(rmesa
, __FUNCTION__
);
178 rmesa
->dma
.nr_released_bufs
= 0;
181 if (rmesa
->dma
.current
) {
182 radeon_bo_unmap(rmesa
->dma
.current
);
183 radeon_bo_unref(rmesa
->dma
.current
);
184 rmesa
->dma
.current
= 0;
188 rmesa
->dma
.current
= radeon_bo_open(rmesa
->radeonScreen
->bom
,
189 0, size
, 4, RADEON_GEM_DOMAIN_GTT
,
192 if (!rmesa
->dma
.current
) {
193 rcommonFlushCmdBuf(rmesa
, __FUNCTION__
);
194 rmesa
->dma
.nr_released_bufs
= 0;
198 rmesa
->dma
.current_used
= 0;
199 rmesa
->dma
.current_vertexptr
= 0;
201 radeon_validate_bo(rmesa
, rmesa
->dma
.current
, RADEON_GEM_DOMAIN_GTT
, 0);
203 if (radeon_revalidate_bos(rmesa
->glCtx
) == GL_FALSE
)
204 fprintf(stderr
,"failure to revalidate BOs - badness\n");
206 radeon_bo_map(rmesa
->dma
.current
, 1);
209 /* Allocates a region from rmesa->dma.current. If there isn't enough
210 * space in current, grab a new buffer (and discard what was left of current)
212 void radeonAllocDmaRegion(radeonContextPtr rmesa
,
213 struct radeon_bo
**pbo
, int *poffset
,
214 int bytes
, int alignment
)
216 if (RADEON_DEBUG
& DEBUG_IOCTL
)
217 fprintf(stderr
, "%s %d\n", __FUNCTION__
, bytes
);
219 if (rmesa
->dma
.flush
)
220 rmesa
->dma
.flush(rmesa
->glCtx
);
222 assert(rmesa
->dma
.current_used
== rmesa
->dma
.current_vertexptr
);
225 rmesa
->dma
.current_used
= (rmesa
->dma
.current_used
+ alignment
) & ~alignment
;
227 if (!rmesa
->dma
.current
|| rmesa
->dma
.current_used
+ bytes
> rmesa
->dma
.current
->size
)
228 radeonRefillCurrentDmaRegion(rmesa
, (bytes
+ 15) & ~15);
230 *poffset
= rmesa
->dma
.current_used
;
231 *pbo
= rmesa
->dma
.current
;
234 /* Always align to at least 16 bytes */
235 rmesa
->dma
.current_used
= (rmesa
->dma
.current_used
+ bytes
+ 15) & ~15;
236 rmesa
->dma
.current_vertexptr
= rmesa
->dma
.current_used
;
238 assert(rmesa
->dma
.current_used
<= rmesa
->dma
.current
->size
);
241 void radeonReleaseDmaRegion(radeonContextPtr rmesa
)
243 if (RADEON_DEBUG
& DEBUG_IOCTL
)
244 fprintf(stderr
, "%s %p\n", __FUNCTION__
, rmesa
->dma
.current
);
245 if (rmesa
->dma
.current
) {
246 rmesa
->dma
.nr_released_bufs
++;
247 radeon_bo_unmap(rmesa
->dma
.current
);
248 radeon_bo_unref(rmesa
->dma
.current
);
250 rmesa
->dma
.current
= NULL
;
254 /* Flush vertices in the current dma region.
256 void rcommon_flush_last_swtcl_prim( GLcontext
*ctx
)
258 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
259 struct radeon_dma
*dma
= &rmesa
->dma
;
262 if (RADEON_DEBUG
& DEBUG_IOCTL
)
263 fprintf(stderr
, "%s %p\n", __FUNCTION__
, dma
->current
);
267 GLuint current_offset
= dma
->current_used
;
269 assert (dma
->current_used
+
270 rmesa
->swtcl
.numverts
* rmesa
->swtcl
.vertex_size
* 4 ==
271 dma
->current_vertexptr
);
273 if (dma
->current_used
!= dma
->current_vertexptr
) {
274 dma
->current_used
= dma
->current_vertexptr
;
276 rmesa
->vtbl
.swtcl_flush(ctx
, current_offset
);
278 rmesa
->swtcl
.numverts
= 0;
281 /* Alloc space in the current dma region.
284 rcommonAllocDmaLowVerts( radeonContextPtr rmesa
, int nverts
, int vsize
)
286 GLuint bytes
= vsize
* nverts
;
289 if (!rmesa
->dma
.current
|| rmesa
->dma
.current_vertexptr
+ bytes
> rmesa
->dma
.current
->size
) {
290 radeonRefillCurrentDmaRegion(rmesa
, bytes
);
293 if (!rmesa
->dma
.flush
) {
294 /* make sure we have enough space to use this in cmdbuf */
295 rcommonEnsureCmdBufSpace(rmesa
,
296 rmesa
->hw
.max_state_size
+ (12*sizeof(int)),
298 /* if cmdbuf flushed DMA restart */
299 if (!rmesa
->dma
.current
)
301 rmesa
->glCtx
->Driver
.NeedFlush
|= FLUSH_STORED_VERTICES
;
302 rmesa
->dma
.flush
= rcommon_flush_last_swtcl_prim
;
305 ASSERT( vsize
== rmesa
->swtcl
.vertex_size
* 4 );
306 ASSERT( rmesa
->dma
.flush
== rcommon_flush_last_swtcl_prim
);
307 ASSERT( rmesa
->dma
.current_used
+
308 rmesa
->swtcl
.numverts
* rmesa
->swtcl
.vertex_size
* 4 ==
309 rmesa
->dma
.current_vertexptr
);
311 head
= (rmesa
->dma
.current
->ptr
+ rmesa
->dma
.current_vertexptr
);
312 rmesa
->dma
.current_vertexptr
+= bytes
;
313 rmesa
->swtcl
.numverts
+= nverts
;
317 void radeonReleaseArrays( GLcontext
*ctx
, GLuint newinputs
)
319 radeonContextPtr radeon
= RADEON_CONTEXT( ctx
);
322 if (radeon
->tcl
.elt_dma_bo
) {
323 radeon_bo_unref(radeon
->tcl
.elt_dma_bo
);
324 radeon
->tcl
.elt_dma_bo
= NULL
;
326 for (i
= 0; i
< radeon
->tcl
.aos_count
; i
++) {
327 if (radeon
->tcl
.aos
[i
].bo
) {
328 radeon_bo_unref(radeon
->tcl
.aos
[i
].bo
);
329 radeon
->tcl
.aos
[i
].bo
= NULL
;