unsigned mapped:1;
unsigned dirty:1;
- unsigned aub_dirty:1;
unsigned alignment:13;
unsigned flags:16;
struct block fenced; /* after bmFenceBuffers (mi_flush, emit irq, write dword) */
/* then to pool->lru or free() */
+ unsigned ctxId;
unsigned last_fence;
unsigned free_on_hardware;
}
-static int evict_lru( struct intel_context *intel, GLuint max_fence )
+static int evict_lru( struct intel_context *intel, GLuint max_fence, GLuint *pool )
{
struct bufmgr *bm = intel->bm;
struct block *block, *tmp;
block->buf->block = NULL;
free_block(intel, block);
+ *pool = i;
return 1;
}
}
#define foreach_s_rev(ptr, t, list) \
for(ptr=(list)->prev,t=(ptr)->prev; list != ptr; ptr=t, t=(t)->prev)
-static int evict_mru( struct intel_context *intel)
+static int evict_mru( struct intel_context *intel, GLuint *pool )
{
struct bufmgr *bm = intel->bm;
struct block *block, *tmp;
block->buf->block = NULL;
free_block(intel, block);
+ *pool = i;
return 1;
}
}
}
-
static int check_fenced( struct intel_context *intel )
{
struct bufmgr *bm = intel->bm;
struct bufmgr *bm = intel->bm;
int i;
+ assert(intel->locked);
+
DBG("%s 0x%x bytes (%s)\n", __FUNCTION__, buf->size, buf->name);
for (i = 0; i < bm->nr_pools; i++) {
static GLboolean evict_and_alloc_block( struct intel_context *intel,
struct buffer *buf )
{
+ GLuint pool;
struct bufmgr *bm = intel->bm;
assert(buf->block == NULL);
/* Look for memory blocks not used for >1 frame:
*/
- while (evict_lru(intel, intel->second_last_swap_fence))
- if (alloc_block(intel, buf))
+ while (evict_lru(intel, intel->second_last_swap_fence, &pool))
+ if (alloc_from_pool(intel, pool, buf))
return GL_TRUE;
/* If we're not thrashing, allow lru eviction to dig deeper into
* recently used textures. We'll probably be thrashing soon:
*/
if (!intel->thrashing) {
- while (evict_lru(intel, 0))
- if (alloc_block(intel, buf))
+ while (evict_lru(intel, 0, &pool))
+ if (alloc_from_pool(intel, pool, buf))
return GL_TRUE;
}
if (!is_empty_list(&bm->on_hardware)) {
bmSetFence(intel);
- if (!is_empty_list(&bm->fenced)) {
+ while (!is_empty_list(&bm->fenced)) {
GLuint fence = bm->fenced.next->fence;
bmFinishFence(intel, fence);
}
return GL_TRUE;
}
- while (evict_mru(intel))
- if (alloc_block(intel, buf))
+ while (evict_mru(intel, &pool))
+ if (alloc_from_pool(intel, pool, buf))
return GL_TRUE;
+ DBG("%s 0x%x bytes failed\n", __FUNCTION__, buf->size);
+
+ assert(is_empty_list(&bm->on_hardware));
+ assert(is_empty_list(&bm->fenced));
+
return GL_FALSE;
}
make_empty_list(&bm.referenced);
make_empty_list(&bm.fenced);
make_empty_list(&bm.on_hardware);
+
+ /* The context id of any of the share group. This won't be used
+ * in communication with the kernel, so it doesn't matter if
+ * this context is eventually deleted.
+ */
+ bm.ctxId = intel->hHWContext;
}
nr_attach++;
unsigned flags)
{
struct bufmgr *bm = intel->bm;
- int retval;
+ int retval = 0;
LOCK(bm);
{
buf->id = ++bm->buf_nr;
buf->name = name;
- buf->alignment = align ? align : 6;
+ buf->alignment = align;
buf->flags = BM_MEM_AGP|BM_MEM_VRAM|BM_MEM_LOCAL;
return buf;
}
+
+void *bmFindVirtual( struct intel_context *intel,
+ unsigned int offset,
+ size_t sz )
+{
+ struct bufmgr *bm = intel->bm;
+ int i;
+
+ for (i = 0; i < bm->nr_pools; i++)
+ if (offset >= bm->pool[i].low_offset &&
+ offset + sz <= bm->pool[i].low_offset + bm->pool[i].size)
+ return bm->pool[i].virtual + offset;
+
+ return NULL;
+}
void bmGenBuffers(struct intel_context *intel,
/* If buffer size changes, free and reallocate. Otherwise update in
* place.
*/
-void bmBufferData(struct intel_context *intel,
- struct buffer *buf,
- unsigned size,
- const void *data,
- unsigned flags )
+int bmBufferData(struct intel_context *intel,
+ struct buffer *buf,
+ unsigned size,
+ const void *data,
+ unsigned flags )
{
struct bufmgr *bm = intel->bm;
+ int retval = 0;
LOCK(bm);
{
buf->size = size;
if (buf->block) {
- assert (buf->block->mem->size == size);
+ assert (buf->block->mem->size >= size);
}
if (buf->flags & (BM_NO_BACKING_STORE|BM_NO_EVICT)) {
- if (data != NULL) {
- if (!buf->block && !evict_and_alloc_block(intel, buf))
- assert(0);
+
+ assert(intel->locked || data == NULL);
+
+ if (data != NULL) {
+ if (!buf->block && !evict_and_alloc_block(intel, buf)) {
+ bm->fail = 1;
+ retval = -1;
+ goto out;
+ }
wait_quiescent(intel, buf->block);
}
}
}
+ out:
UNLOCK(bm);
+ return retval;
}
/* Update the buffer in place, in whatever space it is currently resident:
*/
-void bmBufferSubData(struct intel_context *intel,
+int bmBufferSubData(struct intel_context *intel,
struct buffer *buf,
unsigned offset,
unsigned size,
const void *data )
{
struct bufmgr *bm = intel->bm;
+ int retval = 0;
if (size == 0)
- return;
+ return 0;
LOCK(bm);
{
assert(offset+size <= buf->size);
if (buf->flags & (BM_NO_EVICT|BM_NO_BACKING_STORE)) {
- if (!buf->block && !evict_and_alloc_block(intel, buf))
- assert(0);
+
+ assert(intel->locked);
+
+ if (!buf->block && !evict_and_alloc_block(intel, buf)) {
+ bm->fail = 1;
+ retval = -1;
+ goto out;
+ }
if (!(buf->flags & BM_NO_FENCE_SUBDATA))
wait_quiescent(intel, buf->block);
do_memcpy(buf->backing_store + offset, data, size);
}
}
+ out:
UNLOCK(bm);
-}
-
-
-
-void bmBufferDataAUB(struct intel_context *intel,
- struct buffer *buf,
- unsigned size,
- const void *data,
- unsigned flags,
- unsigned aubtype,
- unsigned aubsubtype )
-{
- bmBufferData(intel, buf, size, data, flags);
-
-
- /* This only works because in this version of the buffer manager we
- * allocate all buffers statically in agp space and so can emit the
- * uploads to the aub file with the correct offsets as they happen.
- */
- if (data && intel->aub_file) {
-
- if (buf->block && !buf->dirty) {
- intel->vtbl.aub_gtt_data(intel,
- buf->block->mem->ofs,
- buf->block->virtual,
- size,
- aubtype,
- aubsubtype);
- buf->aub_dirty = 0;
- }
- }
-}
-
-
-void bmBufferSubDataAUB(struct intel_context *intel,
- struct buffer *buf,
- unsigned offset,
- unsigned size,
- const void *data,
- unsigned aubtype,
- unsigned aubsubtype )
-{
- bmBufferSubData(intel, buf, offset, size, data);
-
-
- /* This only works because in this version of the buffer manager we
- * allocate all buffers statically in agp space and so can emit the
- * uploads to the aub file with the correct offsets as they happen.
- */
- if (intel->aub_file) {
- if (buf->block && !buf->dirty)
- intel->vtbl.aub_gtt_data(intel,
- buf->block->mem->ofs + offset,
- ((const char *)buf->block->virtual) + offset,
- size,
- aubtype,
- aubsubtype);
- }
-}
-
-void bmUnmapBufferAUB( struct intel_context *intel,
- struct buffer *buf,
- unsigned aubtype,
- unsigned aubsubtype )
-{
- bmUnmapBuffer(intel, buf);
-
- if (intel->aub_file) {
- /* Hack - exclude the framebuffer mappings. If you removed
- * this, you'd get very big aubfiles, but you *would* be able to
- * see fallback rendering.
- */
- if (buf->block && !buf->dirty && buf->block->pool == &intel->bm->pool[0]) {
- buf->aub_dirty = 1;
- }
- }
+ return retval;
}
unsigned bmBufferOffset(struct intel_context *intel,
struct buffer *buf)
{
struct bufmgr *bm = intel->bm;
- unsigned retval;
+ unsigned retval = 0;
LOCK(bm);
{
unsigned flags )
{
struct bufmgr *bm = intel->bm;
- void *retval;
+ void *retval = NULL;
LOCK(bm);
{
retval = NULL;
}
else if (buf->flags & (BM_NO_BACKING_STORE|BM_NO_EVICT)) {
+
+ assert(intel->locked);
+
if (!buf->block && !evict_and_alloc_block(intel, buf)) {
- _mesa_printf("%s: alloc failed\n", __FUNCTION__);
+ DBG("%s: alloc failed\n", __FUNCTION__);
+ bm->fail = 1;
retval = NULL;
}
else {
}
}
else {
- DBG("%s - set buf %d dirty\n", __FUNCTION__, buf->id);
+ DBG("%s - set buf %d dirty\n", __FUNCTION__, buf->id);
set_dirty(intel, buf);
if (buf->backing_store == 0)
int bmValidateBuffers( struct intel_context *intel )
{
struct bufmgr *bm = intel->bm;
- int retval;
+ int retval = 0;
LOCK(bm);
{
DBG("%s fail %d\n", __FUNCTION__, bm->fail);
+ assert(intel->locked);
if (!bm->fail) {
struct block *block, *tmp;
buf->backing_store,
buf->size);
- if (intel->aub_file) {
- intel->vtbl.aub_gtt_data(intel,
- buf->block->mem->ofs,
- buf->backing_store,
- buf->size,
- 0,
- 0);
- }
-
buf->dirty = 0;
- buf->aub_dirty = 0;
- }
- else if (buf->aub_dirty) {
- intel->vtbl.aub_gtt_data(intel,
- buf->block->mem->ofs,
- buf->block->virtual,
- buf->size,
- 0,
- 0);
- buf->aub_dirty = 0;
}
block->referenced = 0;
bm->need_fence = 1;
}
- retval = !bm->fail;
- bm->fail = 0;
- assert(is_empty_list(&bm->referenced));
+ retval = bm->fail ? -1 : 0;
}
UNLOCK(bm);
+
+ if (retval != 0)
+ DBG("%s failed\n", __FUNCTION__);
+
return retval;
}
block->referenced = 0;
}
-
- bm->fail = 0;
}
UNLOCK(bm);
}
GLuint dword[2];
dword[0] = intel->vtbl.flush_cmd();
dword[1] = 0;
- intel_cmd_ioctl(intel, (char *)&dword, sizeof(dword), GL_TRUE);
+ intel_cmd_ioctl(intel, (char *)&dword, sizeof(dword));
intel->bm->last_fence = intelEmitIrqLocked( intel );
return intel->bm->last_fence;
}
+unsigned bmSetFenceLock( struct intel_context *intel )
+{
+ unsigned last;
+ LOCK(intel->bm);
+ last = bmSetFence(intel);
+ UNLOCK(intel->bm);
+ return last;
+}
unsigned bmLockAndFence( struct intel_context *intel )
{
if (intel->bm->need_fence) {
LOCK_HARDWARE(intel);
+ LOCK(intel->bm);
bmSetFence(intel);
+ UNLOCK(intel->bm);
UNLOCK_HARDWARE(intel);
}
check_fenced(intel);
}
-
+void bmFinishFenceLock( struct intel_context *intel, unsigned fence )
+{
+ LOCK(intel->bm);
+ bmFinishFence(intel, fence);
+ UNLOCK(intel->bm);
+}
/* Specifically ignore texture memory sharing.
assert(is_empty_list(&bm->referenced));
bm->need_fence = 1;
+ bm->fail = 0;
bmFinishFence(intel, bmSetFence(intel));
+ assert(is_empty_list(&bm->fenced));
+ assert(is_empty_list(&bm->on_hardware));
+
for (i = 0; i < bm->nr_pools; i++) {
if (!(bm->pool[i].flags & BM_NO_EVICT)) {
foreach_s(block, tmp, &bm->pool[i].lru) {
}
+
+void bmEvictAll( struct intel_context *intel )
+{
+ struct bufmgr *bm = intel->bm;
+
+ LOCK(bm);
+ {
+ struct block *block, *tmp;
+ GLuint i;
+
+ DBG("%s\n", __FUNCTION__);
+
+ assert(is_empty_list(&bm->referenced));
+
+ bm->need_fence = 1;
+ bm->fail = 0;
+ bmFinishFence(intel, bmSetFence(intel));
+
+ assert(is_empty_list(&bm->fenced));
+ assert(is_empty_list(&bm->on_hardware));
+
+ for (i = 0; i < bm->nr_pools; i++) {
+ if (!(bm->pool[i].flags & BM_NO_EVICT)) {
+ foreach_s(block, tmp, &bm->pool[i].lru) {
+ assert(bmTestFence(intel, block->fence));
+ set_dirty(intel, block->buf);
+ block->buf->block = NULL;
+
+ free_block(intel, block);
+ }
+ }
+ }
+ }
+ UNLOCK(bm);
+}
+
+
+GLboolean bmError( struct intel_context *intel )
+{
+ struct bufmgr *bm = intel->bm;
+ GLboolean retval;
+
+ LOCK(bm);
+ {
+ retval = bm->fail;
+ }
+ UNLOCK(bm);
+
+ return retval;
+}
+
+
+GLuint bmCtxId( struct intel_context *intel )
+{
+ return intel->bm->ctxId;
+}