#include <errno.h>
#include "radeon_common.h"
+#include "radeon_fog.h"
#include "main/simple_list.h"
#if defined(USE_X86_ASM)
}
}
-void rcommon_emit_vector(GLcontext * ctx, struct radeon_aos *aos,
+void rcommon_emit_vector(struct gl_context * ctx, struct radeon_aos *aos,
const GLvoid * data, int size, int stride, int count)
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
radeon_bo_unmap(aos->bo);
}
+void rcommon_emit_vecfog(struct gl_context *ctx, struct radeon_aos *aos,
+ GLvoid *data, int stride, int count)
+{
+ int i;
+ float *out;
+ int size = 1;
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
+
+ if (RADEON_DEBUG & RADEON_VERTS)
+ fprintf(stderr, "%s count %d stride %d\n",
+ __FUNCTION__, count, stride);
+
+ if (stride == 0) {
+ radeonAllocDmaRegion( rmesa, &aos->bo, &aos->offset, size * 4, 32 );
+ count = 1;
+ aos->stride = 0;
+ } else {
+ radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, size * count * 4, 32);
+ aos->stride = size;
+ }
+
+ aos->components = size;
+ aos->count = count;
+
+ /* Emit the data */
+ radeon_bo_map(aos->bo, 1);
+ out = (float*)((char*)aos->bo->ptr + aos->offset);
+ for (i = 0; i < count; i++) {
+ out[0] = radeonComputeFogBlendFactor( ctx, *(GLfloat *)data );
+ out++;
+ data += stride;
+ }
+ radeon_bo_unmap(aos->bo);
+}
+
void radeon_init_dma(radeonContextPtr rmesa)
{
make_empty_list(&rmesa->dma.free);
if (size > rmesa->dma.minimum_size)
rmesa->dma.minimum_size = (size + 15) & (~15);
- radeon_print(RADEON_DMA, RADEON_NORMAL, "%s size %d minimum_size %d\n",
+ radeon_print(RADEON_DMA, RADEON_NORMAL, "%s size %d minimum_size %Zi\n",
__FUNCTION__, size, rmesa->dma.minimum_size);
-
if (is_empty_list(&rmesa->dma.free)
|| last_elem(&rmesa->dma.free)->bo->size < size) {
dma_bo = CALLOC_STRUCT(radeon_dma_bo);
rmesa->dma.current_used = 0;
rmesa->dma.current_vertexptr = 0;
-
+
if (radeon_cs_space_check_with_bo(rmesa->cmdbuf.cs,
first_elem(&rmesa->dma.reserved)->bo,
RADEON_GEM_DOMAIN_GTT, 0))
/* Cmd buff have been flushed in radeon_revalidate_bos */
goto again_alloc;
}
+ radeon_bo_map(first_elem(&rmesa->dma.reserved)->bo, 1);
}
/* Allocates a region from rmesa->dma.current. If there isn't enough
fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
if (rmesa->dma.flush)
- rmesa->dma.flush(rmesa->glCtx);
+ rmesa->dma.flush(&rmesa->glCtx);
assert(rmesa->dma.current_used == rmesa->dma.current_vertexptr);
foreach_s(dma_bo, temp, &rmesa->dma.free) {
remove_from_list(dma_bo);
radeon_bo_unref(dma_bo->bo);
- FREE(dma_bo);
+ free(dma_bo);
}
foreach_s(dma_bo, temp, &rmesa->dma.wait) {
remove_from_list(dma_bo);
radeon_bo_unref(dma_bo->bo);
- FREE(dma_bo);
+ free(dma_bo);
}
foreach_s(dma_bo, temp, &rmesa->dma.reserved) {
remove_from_list(dma_bo);
radeon_bo_unref(dma_bo->bo);
- FREE(dma_bo);
+ free(dma_bo);
}
}
__FUNCTION__, free, wait, reserved, rmesa->dma.minimum_size);
}
- if (!rmesa->radeonScreen->driScreen->dri2.enabled) {
- /* request updated cs processing information from kernel */
- legacy_track_pending(rmesa->radeonScreen->bom, 0);
- }
/* move waiting bos to free list.
wait list provides gpu time to handle data before reuse */
foreach_s(dma_bo, temp, &rmesa->dma.wait) {
WARN_ONCE("Leaking dma buffer object!\n");
radeon_bo_unref(dma_bo->bo);
remove_from_list(dma_bo);
- FREE(dma_bo);
+ free(dma_bo);
continue;
}
/* free objects that are too small to be used because of large request */
if (dma_bo->bo->size < rmesa->dma.minimum_size) {
radeon_bo_unref(dma_bo->bo);
remove_from_list(dma_bo);
- FREE(dma_bo);
+ free(dma_bo);
continue;
}
- if (!radeon_bo_is_idle(dma_bo->bo))
- continue;
+ if (!radeon_bo_is_idle(dma_bo->bo)) {
+ break;
+ }
remove_from_list(dma_bo);
dma_bo->expire_counter = expire_at;
insert_at_tail(&rmesa->dma.free, dma_bo);
/* move reserved to wait list */
foreach_s(dma_bo, temp, &rmesa->dma.reserved) {
+ radeon_bo_unmap(dma_bo->bo);
/* free objects that are too small to be used because of large request */
if (dma_bo->bo->size < rmesa->dma.minimum_size) {
radeon_bo_unref(dma_bo->bo);
remove_from_list(dma_bo);
- FREE(dma_bo);
+ free(dma_bo);
continue;
}
remove_from_list(dma_bo);
break;
remove_from_list(dma_bo);
radeon_bo_unref(dma_bo->bo);
- FREE(dma_bo);
+ free(dma_bo);
}
}
/* Flush vertices in the current dma region.
*/
-void rcommon_flush_last_swtcl_prim( GLcontext *ctx )
+void rcommon_flush_last_swtcl_prim( struct gl_context *ctx )
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
struct radeon_dma *dma = &rmesa->dma;
-
+
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __FUNCTION__);
dma->flush = NULL;
if(is_empty_list(&rmesa->dma.reserved)
||rmesa->dma.current_vertexptr + bytes > first_elem(&rmesa->dma.reserved)->bo->size) {
if (rmesa->dma.flush) {
- rmesa->dma.flush(rmesa->glCtx);
+ rmesa->dma.flush(&rmesa->glCtx);
}
radeonRefillCurrentDmaRegion(rmesa, bytes);
if (!rmesa->dma.flush) {
/* if cmdbuf flushed DMA restart */
- rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
+ rmesa->glCtx.Driver.NeedFlush |= FLUSH_STORED_VERTICES;
rmesa->dma.flush = rcommon_flush_last_swtcl_prim;
}
return head;
}
-void radeonReleaseArrays( GLcontext *ctx, GLuint newinputs )
+void radeonReleaseArrays( struct gl_context *ctx, GLuint newinputs )
{
radeonContextPtr radeon = RADEON_CONTEXT( ctx );
int i;
fprintf(stderr, "%s\n", __FUNCTION__);
if (radeon->dma.flush) {
- radeon->dma.flush(radeon->glCtx);
+ radeon->dma.flush(&radeon->glCtx);
}
for (i = 0; i < radeon->tcl.aos_count; i++) {
if (radeon->tcl.aos[i].bo) {