aos->components = size;
aos->count = count;
+ radeon_bo_map(aos->bo, 1);
out = (uint32_t*)((char*)aos->bo->ptr + aos->offset);
for (i = 0; i < count; i++) {
out[0] = r200ComputeFogBlendFactor( ctx, *(GLfloat *)data );
out++;
data += stride;
}
+ radeon_bo_unmap(aos->bo);
}
/* Emit any changed arrays to new GART memory, re-emit a packet to
radeonEmitState(&rmesa->radeon);
r200EmitVertexAOS( rmesa,
rmesa->radeon.swtcl.vertex_size,
- first_elem(&rmesa->radeon.dma.reserved)->bo,
+ rmesa->radeon.swtcl.bo,
current_offset);
GLubyte *in = (GLubyte *)src_ptr;
radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offset, size, 4);
-
+ radeon_bo_map(r300->ind_buf.bo, 1);
assert(r300->ind_buf.bo->ptr != NULL);
out = (GLuint *)ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
if (i < mesa_ind_buf->count) {
*out++ = in[i];
}
-
+ radeon_bo_unmap(r300->ind_buf.bo);
#if MESA_BIG_ENDIAN
} else { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
GLushort *in = (GLushort *)src_ptr;
radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo,
&r300->ind_buf.bo_offset, size, 4);
+ radeon_bo_map(r300->ind_buf.bo, 1);
assert(r300->ind_buf.bo->ptr != NULL);
out = (GLuint *)ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
if (i < mesa_ind_buf->count) {
*out++ = in[i];
}
+ radeon_bo_unmap(r300->ind_buf.bo);
#endif
}
radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offset, size, 4);
+ radeon_bo_map(r300->ind_buf.bo, 1);
assert(r300->ind_buf.bo->ptr != NULL);
dst_ptr = ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
_mesa_memcpy(dst_ptr, src_ptr, size);
+ radeon_bo_unmap(r300->ind_buf.bo);
r300->ind_buf.is_32bit = (mesa_ind_buf->type == GL_UNSIGNED_INT);
r300->ind_buf.count = mesa_ind_buf->count;
}
radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, sizeof(GLfloat) * input->Size * count, 32);
+ radeon_bo_map(attr->bo, 1);
dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT,
break;
}
+ radeon_bo_unmap(attr->bo);
if (mapped_named_bo) {
ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
}
radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, size, 32);
+ radeon_bo_map(attr->bo, 1);
+
if (!input->BufferObj->Pointer) {
ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
mapped_named_bo = GL_TRUE;
ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
}
+ radeon_bo_unmap(attr->bo);
attr->stride = dst_stride;
}
}
radeonAllocDmaRegion(&r300->radeon, &vbuf->attribs[index].bo, &vbuf->attribs[index].bo_offset, size, 32);
+ radeon_bo_map(vbuf->attribs[index].bo, 1);
assert(vbuf->attribs[index].bo->ptr != NULL);
dst = (uint32_t *)ADD_POINTERS(vbuf->attribs[index].bo->ptr, vbuf->attribs[index].bo_offset);
switch (vbuf->attribs[index].dwords) {
case 4: radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
default: assert(0); break;
}
+ radeon_bo_unmap(vbuf->attribs[index].bo);
}
}
r300EmitCacheFlush(rmesa);
radeonEmitState(&rmesa->radeon);
- r300_emit_scissor(ctx);
+ r300_emit_scissor(ctx);
r300EmitVertexAOS(rmesa,
- rmesa->radeon.swtcl.vertex_size,
- first_elem(&rmesa->radeon.dma.reserved)->bo,
- current_offset);
+ rmesa->radeon.swtcl.vertex_size,
+ rmesa->radeon.swtcl.bo,
+ current_offset);
r300EmitVbufPrim(rmesa,
rmesa->radeon.swtcl.hw_primitive,
radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset,
sizeof(GLfloat) * input->Size * count, 32);
+
+ radeon_bo_map(attr->bo, 1);
+
dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
assert(src_ptr != NULL);
break;
}
+ radeon_bo_unmap(attr->bo);
+
if (mapped_named_bo)
{
ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset, size, 32);
+ radeon_bo_map(attr->bo, 1);
+
if (!input->BufferObj->Pointer)
{
ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
}
}
+ radeon_bo_unmap(attr->bo);
if (mapped_named_bo)
{
ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
radeonAllocDmaRegion(&context->radeon, &context->stream_desc[index].bo,
&context->stream_desc[index].bo_offset, size, 32);
+
+ radeon_bo_map(context->stream_desc[index].bo, 1);
assert(context->stream_desc[index].bo->ptr != NULL);
+
+
dst = (uint32_t *)ADD_POINTERS(context->stream_desc[index].bo->ptr,
context->stream_desc[index].bo_offset);
switch (context->stream_desc[index].dwords)
{
case 1:
- radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count);
+ radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count);
break;
case 2:
radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count);
assert(0);
break;
}
+ radeon_bo_unmap(context->stream_desc[index].bo);
}
}
radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
&context->ind_buf.bo_offset, size, 4);
+ radeon_bo_map(context->ind_buf.bo, 1);
assert(context->ind_buf.bo->ptr != NULL);
out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
*out++ = in[i];
}
+ radeon_bo_unmap(context->ind_buf.bo);
#if MESA_BIG_ENDIAN
}
else
radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
&context->ind_buf.bo_offset, size, 4);
+ radeon_bo_map(context->ind_buf.bo, 1);
assert(context->ind_buf.bo->ptr != NULL);
out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
{
*out++ = in[i];
}
+ radeon_bo_unmap(context->ind_buf.bo);
#endif
}
radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
&context->ind_buf.bo_offset, size, 4);
+ radeon_bo_map(context->ind_buf.bo, 1);
assert(context->ind_buf.bo->ptr != NULL);
dst_ptr = ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
_mesa_memcpy(dst_ptr, src_ptr, size);
+ radeon_bo_unmap(context->ind_buf.bo);
context->ind_buf.is_32bit = (mesa_ind_buf->type == GL_UNSIGNED_INT);
context->ind_buf.count = mesa_ind_buf->count;
GLuint vertex_attr_count;
GLuint emit_prediction;
+ struct radeon_bo *bo;
};
#define RADEON_MAX_AOS_ARRAYS 16
aos->components = size;
aos->count = count;
+ radeon_bo_map(aos->bo, 1);
out = (uint32_t*)((char*)aos->bo->ptr + aos->offset);
switch (size) {
case 1: radeonEmitVec4(out, data, stride, count); break;
assert(0);
break;
}
+ radeon_bo_unmap(aos->bo);
}
void radeon_init_dma(radeonContextPtr rmesa)
__FUNCTION__, size, rmesa->dma.minimum_size);
- /* unmap old reserved bo */
- if (!is_empty_list(&rmesa->dma.reserved))
- radeon_bo_unmap(first_elem(&rmesa->dma.reserved)->bo);
-
if (is_empty_list(&rmesa->dma.free)
|| last_elem(&rmesa->dma.free)->bo->size < size) {
dma_bo = CALLOC_STRUCT(radeon_dma_bo);
/* Cmd buff have been flushed in radeon_revalidate_bos */
goto again_alloc;
}
-
- radeon_bo_map(first_elem(&rmesa->dma.reserved)->bo, 1);
}
/* Allocates a region from rmesa->dma.current. If there isn't enough
foreach_s(dma_bo, temp, &rmesa->dma.reserved) {
remove_from_list(dma_bo);
- radeon_bo_unmap(dma_bo->bo);
radeon_bo_unref(dma_bo->bo);
FREE(dma_bo);
}
insert_at_tail(&rmesa->dma.free, dma_bo);
}
- /* unmap the last dma region */
- if (!is_empty_list(&rmesa->dma.reserved))
- radeon_bo_unmap(first_elem(&rmesa->dma.reserved)->bo);
/* move reserved to wait list */
foreach_s(dma_bo, temp, &rmesa->dma.reserved) {
/* free objects that are too small to be used because of large request */
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
struct radeon_dma *dma = &rmesa->dma;
-
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __FUNCTION__);
dma->flush = NULL;
+ radeon_bo_unmap(rmesa->swtcl.bo);
+
if (!is_empty_list(&dma->reserved)) {
GLuint current_offset = dma->current_used;
}
rmesa->swtcl.numverts = 0;
}
+ radeon_bo_unref(rmesa->swtcl.bo);
+ rmesa->swtcl.bo = NULL;
}
/* Alloc space in the current dma region.
*/
void *head;
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __FUNCTION__);
+
if(is_empty_list(&rmesa->dma.reserved)
||rmesa->dma.current_vertexptr + bytes > first_elem(&rmesa->dma.reserved)->bo->size) {
if (rmesa->dma.flush) {
rmesa->swtcl.numverts * rmesa->swtcl.vertex_size * 4 ==
rmesa->dma.current_vertexptr );
- head = (first_elem(&rmesa->dma.reserved)->bo->ptr + rmesa->dma.current_vertexptr);
+ rmesa->swtcl.bo = first_elem(&rmesa->dma.reserved)->bo;
+ radeon_bo_ref(rmesa->swtcl.bo);
+ radeon_bo_map(rmesa->swtcl.bo, 1);
+
+ head = (rmesa->swtcl.bo->ptr + rmesa->dma.current_vertexptr);
rmesa->dma.current_vertexptr += bytes;
rmesa->swtcl.numverts += nverts;
return head;
/* Emit the data
*/
+ radeon_bo_map(aos->bo, 1);
out = (uint32_t*)((char*)aos->bo->ptr + aos->offset);
for (i = 0; i < count; i++) {
out[0] = radeonComputeFogBlendFactor( ctx, *(GLfloat *)data );
out++;
data += stride;
}
+ radeon_bo_unmap(aos->bo);
}
static void emit_s0_vec(uint32_t *out, GLvoid *data, int stride, int count)
/* Emit the data
*/
+ radeon_bo_map(aos->bo, 1);
out = (uint32_t*)((char*)aos->bo->ptr + aos->offset);
switch (size) {
case 1:
exit(1);
break;
}
+ radeon_bo_unmap(aos->bo);
}
}
-
+ radeon_bo_map(rmesa->radeon.tcl.aos[0].bo, 1);
setup_tab[i].emit( ctx, 0, VB->Count,
rmesa->radeon.tcl.aos[0].bo->ptr + rmesa->radeon.tcl.aos[0].offset);
-
+ radeon_bo_unmap(rmesa->radeon.tcl.aos[0].bo);
// rmesa->radeon.tcl.aos[0].size = setup_tab[i].vertex_size;
rmesa->radeon.tcl.aos[0].stride = setup_tab[i].vertex_size;
rmesa->tcl.vertex_format = setup_tab[i].vertex_format;
radeonEmitState(&rmesa->radeon);
radeonEmitVertexAOS( rmesa,
rmesa->radeon.swtcl.vertex_size,
- first_elem(&rmesa->radeon.dma.reserved)->bo,
+ rmesa->radeon.swtcl.bo,
current_offset);