}
-GLboolean brw_draw_prims( GLcontext *ctx,
- const struct gl_client_array *arrays[],
- const struct brw_draw_prim *prim,
- GLuint nr_prims,
- const struct brw_draw_index_buffer *ib,
- GLuint min_index,
- GLuint max_index,
- GLuint flags )
+static GLboolean brw_try_draw_prims( GLcontext *ctx,
+ const struct gl_client_array *arrays[],
+ const struct brw_draw_prim *prim,
+ GLuint nr_prims,
+ const struct brw_draw_index_buffer *ib,
+ GLuint min_index,
+ GLuint max_index,
+ GLuint flags )
{
struct intel_context *intel = intel_context(ctx);
struct brw_context *brw = brw_context(ctx);
* way around this, as not every flush is due to a buffer filling
* up.
*/
- intel_batchbuffer_flush( brw->intel.batch );
+ if (!intel_batchbuffer_flush( brw->intel.batch )) {
+ DBG("%s intel_batchbuffer_flush failed\n", __FUNCTION__);
+ retval = GL_FALSE;
+ }
- if (intel->thrashing) {
+ if (retval && intel->thrashing) {
bmSetFence(intel);
}
}
UNLOCK_HARDWARE(intel);
+
+ if (!retval)
+ _mesa_printf("%s failed\n", __FUNCTION__);
+
return retval;
}
+
+GLboolean brw_draw_prims( GLcontext *ctx,
+ const struct gl_client_array *arrays[],
+ const struct brw_draw_prim *prim,
+ GLuint nr_prims,
+ const struct brw_draw_index_buffer *ib,
+ GLuint min_index,
+ GLuint max_index,
+ GLuint flags )
+{
+ struct intel_context *intel = intel_context(ctx);
+ GLboolean retval;
+
+ retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index, flags);
+
+
+ if (!retval && bmError(intel)) {
+
+ DBG("retrying\n");
+ /* This looks like out-of-memory but potentially we have
+ * situation where there is enough memory but it has become
+ * fragmented. Clear out all heaps and start from scratch by
+ * faking a contended lock event: (done elsewhere)
+ */
+
+ /* Then try a second time only to upload textures and draw the
+ * primitives:
+ */
+ retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index, flags);
+ }
+
+ return retval;
+}
+
+
static void brw_invalidate_vbo_cb( struct intel_context *intel, void *ptr )
{
/* nothing to do, we don't rely on the contents being preserved */
size = (size + 3) & ~3;
if (pool->offset + fixup + size >= pool->size) {
+ _mesa_printf("%s failed\n", __FUNCTION__);
assert(0);
exit(0);
}
int bit = ffs( ~p->temp_in_use );
if (!bit) {
fprintf(stderr, "%s: out of temporaries\n", __FILE__);
- abort();
+ assert(0);
}
if (bit > p->program->Base.NumTemporaries)
* client would, so flags here is more proscriptive than the usage
* values in the ARB_vbo interface:
*/
-void bmBufferData(struct intel_context *,
+int bmBufferData(struct intel_context *,
struct buffer *buf,
unsigned size,
const void *data,
unsigned flags );
-void bmBufferSubData(struct intel_context *,
+int bmBufferSubData(struct intel_context *,
struct buffer *buf,
unsigned offset,
unsigned size,
const void *data );
-void bmBufferDataAUB(struct intel_context *,
+int bmBufferDataAUB(struct intel_context *,
struct buffer *buf,
unsigned size,
const void *data,
unsigned aubtype,
unsigned aubsubtype );
-void bmBufferSubDataAUB(struct intel_context *,
+int bmBufferSubDataAUB(struct intel_context *,
struct buffer *buf,
unsigned offset,
unsigned size,
void bmReleaseBuffers( struct intel_context * );
+GLboolean bmError( struct intel_context * );
+void bmEvictAll( struct intel_context * );
+
/* This functionality is used by the buffer manager, not really sure
* if we need to be exposing it in this way, probably libdrm will
* offer equivalent calls.
}
-static int evict_lru( struct intel_context *intel, GLuint max_fence )
+static int evict_lru( struct intel_context *intel, GLuint max_fence, GLuint *pool )
{
struct bufmgr *bm = intel->bm;
struct block *block, *tmp;
block->buf->block = NULL;
free_block(intel, block);
+ *pool = i;
return 1;
}
}
#define foreach_s_rev(ptr, t, list) \
for(ptr=(list)->prev,t=(ptr)->prev; list != ptr; ptr=t, t=(t)->prev)
-static int evict_mru( struct intel_context *intel)
+static int evict_mru( struct intel_context *intel, GLuint *pool )
{
struct bufmgr *bm = intel->bm;
struct block *block, *tmp;
block->buf->block = NULL;
free_block(intel, block);
+ *pool = i;
return 1;
}
}
struct bufmgr *bm = intel->bm;
int i;
+ assert(intel->locked);
+
DBG("%s 0x%x bytes (%s)\n", __FUNCTION__, buf->size, buf->name);
for (i = 0; i < bm->nr_pools; i++) {
static GLboolean evict_and_alloc_block( struct intel_context *intel,
struct buffer *buf )
{
+ GLuint pool;
struct bufmgr *bm = intel->bm;
assert(buf->block == NULL);
/* Look for memory blocks not used for >1 frame:
*/
- while (evict_lru(intel, intel->second_last_swap_fence))
- if (alloc_block(intel, buf))
+ while (evict_lru(intel, intel->second_last_swap_fence, &pool))
+ if (alloc_from_pool(intel, pool, buf))
return GL_TRUE;
/* If we're not thrashing, allow lru eviction to dig deeper into
* recently used textures. We'll probably be thrashing soon:
*/
if (!intel->thrashing) {
- while (evict_lru(intel, 0))
- if (alloc_block(intel, buf))
+ while (evict_lru(intel, 0, &pool))
+ if (alloc_from_pool(intel, pool, buf))
return GL_TRUE;
}
if (!is_empty_list(&bm->on_hardware)) {
bmSetFence(intel);
- if (!is_empty_list(&bm->fenced)) {
+ while (!is_empty_list(&bm->fenced)) {
GLuint fence = bm->fenced.next->fence;
bmFinishFence(intel, fence);
}
return GL_TRUE;
}
- while (evict_mru(intel))
- if (alloc_block(intel, buf))
+ while (evict_mru(intel, &pool))
+ if (alloc_from_pool(intel, pool, buf))
return GL_TRUE;
+ DBG("%s 0x%x bytes failed\n", __FUNCTION__, buf->size);
+
+ assert(is_empty_list(&bm->on_hardware));
+ assert(is_empty_list(&bm->fenced));
+
return GL_FALSE;
}
/* If buffer size changes, free and reallocate. Otherwise update in
* place.
*/
-void bmBufferData(struct intel_context *intel,
- struct buffer *buf,
- unsigned size,
- const void *data,
- unsigned flags )
+int bmBufferData(struct intel_context *intel,
+ struct buffer *buf,
+ unsigned size,
+ const void *data,
+ unsigned flags )
{
struct bufmgr *bm = intel->bm;
+ int retval = 0;
LOCK(bm);
{
buf->size = size;
if (buf->block) {
- assert (buf->block->mem->size == size);
+ assert (buf->block->mem->size >= size);
}
if (buf->flags & (BM_NO_BACKING_STORE|BM_NO_EVICT)) {
- if (data != NULL) {
- if (!buf->block && !evict_and_alloc_block(intel, buf))
- assert(0);
+
+ assert(intel->locked || data == NULL);
+
+ if (data != NULL) {
+ if (!buf->block && !evict_and_alloc_block(intel, buf)) {
+ bm->fail = 1;
+ retval = -1;
+ goto out;
+ }
wait_quiescent(intel, buf->block);
}
}
}
+ out:
UNLOCK(bm);
+ return retval;
}
/* Update the buffer in place, in whatever space it is currently resident:
*/
-void bmBufferSubData(struct intel_context *intel,
+int bmBufferSubData(struct intel_context *intel,
struct buffer *buf,
unsigned offset,
unsigned size,
const void *data )
{
struct bufmgr *bm = intel->bm;
+ int retval;
if (size == 0)
- return;
+ return 0;
LOCK(bm);
{
assert(offset+size <= buf->size);
if (buf->flags & (BM_NO_EVICT|BM_NO_BACKING_STORE)) {
- if (!buf->block && !evict_and_alloc_block(intel, buf))
- assert(0);
+
+ assert(intel->locked);
+
+ if (!buf->block && !evict_and_alloc_block(intel, buf)) {
+ bm->fail = 1;
+ retval = -1;
+ goto out;
+ }
if (!(buf->flags & BM_NO_FENCE_SUBDATA))
wait_quiescent(intel, buf->block);
do_memcpy(buf->backing_store + offset, data, size);
}
}
+ out:
UNLOCK(bm);
+ return retval;
}
-void bmBufferDataAUB(struct intel_context *intel,
+int bmBufferDataAUB(struct intel_context *intel,
struct buffer *buf,
unsigned size,
const void *data,
unsigned aubtype,
unsigned aubsubtype )
{
- bmBufferData(intel, buf, size, data, flags);
+ int retval = bmBufferData(intel, buf, size, data, flags);
/* This only works because in this version of the buffer manager we
* allocate all buffers statically in agp space and so can emit the
* uploads to the aub file with the correct offsets as they happen.
*/
- if (data && intel->aub_file) {
+ if (retval == 0 && data && intel->aub_file) {
if (buf->block && !buf->dirty) {
intel->vtbl.aub_gtt_data(intel,
buf->aub_dirty = 0;
}
}
+
+ return retval;
}
-void bmBufferSubDataAUB(struct intel_context *intel,
+int bmBufferSubDataAUB(struct intel_context *intel,
struct buffer *buf,
unsigned offset,
unsigned size,
unsigned aubtype,
unsigned aubsubtype )
{
- bmBufferSubData(intel, buf, offset, size, data);
+ int retval = bmBufferSubData(intel, buf, offset, size, data);
/* This only works because in this version of the buffer manager we
* uploads to the aub file with the correct offsets as they happen.
*/
if (intel->aub_file) {
- if (buf->block && !buf->dirty)
+ if (retval == 0 && buf->block && !buf->dirty)
intel->vtbl.aub_gtt_data(intel,
buf->block->mem->ofs + offset,
((const char *)buf->block->virtual) + offset,
aubtype,
aubsubtype);
}
+
+ return retval;
}
void bmUnmapBufferAUB( struct intel_context *intel,
retval = NULL;
}
else if (buf->flags & (BM_NO_BACKING_STORE|BM_NO_EVICT)) {
+
+ assert(intel->locked);
+
if (!buf->block && !evict_and_alloc_block(intel, buf)) {
_mesa_printf("%s: alloc failed\n", __FUNCTION__);
+ bm->fail = 1;
retval = NULL;
}
else {
LOCK(bm);
{
DBG("%s fail %d\n", __FUNCTION__, bm->fail);
+ assert(intel->locked);
if (!bm->fail) {
struct block *block, *tmp;
}
retval = !bm->fail;
- bm->fail = 0;
- assert(is_empty_list(&bm->referenced));
}
UNLOCK(bm);
+
+ if (!retval)
+ _mesa_printf("%s failed\n", __FUNCTION__);
+
return retval;
}
LOCK(bm);
{
struct block *block, *tmp;
+ assert(intel->locked);
foreach_s (block, tmp, &bm->referenced) {
block->referenced = 0;
}
-
- bm->fail = 0;
}
UNLOCK(bm);
}
assert(is_empty_list(&bm->referenced));
bm->need_fence = 1;
+ bm->fail = 0;
+ bmFinishFence(intel, bmSetFence(intel));
+
+ assert(is_empty_list(&bm->fenced));
+ assert(is_empty_list(&bm->on_hardware));
+
+ for (i = 0; i < bm->nr_pools; i++) {
+ if (!(bm->pool[i].flags & BM_NO_EVICT)) {
+ foreach_s(block, tmp, &bm->pool[i].lru) {
+ assert(bmTestFence(intel, block->fence));
+ set_dirty(intel, block->buf);
+ }
+ }
+ }
+ }
+ UNLOCK(bm);
+}
+
+
+
+void bmEvictAll( struct intel_context *intel )
+{
+ struct bufmgr *bm = intel->bm;
+
+ LOCK(bm);
+ {
+ struct block *block, *tmp;
+ GLuint i;
+
+ DBG("%s\n", __FUNCTION__);
+
+ assert(is_empty_list(&bm->referenced));
+
+ bm->need_fence = 1;
+ bm->fail = 0;
bmFinishFence(intel, bmSetFence(intel));
+ assert(is_empty_list(&bm->fenced));
+ assert(is_empty_list(&bm->on_hardware));
+
for (i = 0; i < bm->nr_pools; i++) {
if (!(bm->pool[i].flags & BM_NO_EVICT)) {
foreach_s(block, tmp, &bm->pool[i].lru) {
assert(bmTestFence(intel, block->fence));
set_dirty(intel, block->buf);
+ block->buf->block = NULL;
+
+ free_block(intel, block);
}
}
}
}
+GLboolean bmError( struct intel_context *intel )
+{
+ struct bufmgr *bm = intel->bm;
+ GLboolean retval;
+
+ LOCK(bm);
+ {
+ retval = bm->fail;
+ }
+ UNLOCK(bm);
+
+ return retval;
+}
batch->ptr = NULL;
}
-void intel_batchbuffer_map( struct intel_batchbuffer *batch )
+GLubyte *intel_batchbuffer_map( struct intel_batchbuffer *batch )
{
if (!batch->map) {
batch->map = bmMapBuffer(batch->intel, batch->buffer,
BM_MEM_AGP|BM_MEM_LOCAL|BM_CLIENT|BM_WRITE);
batch->ptr += (unsigned long)batch->map;
}
- assert(batch->map);
+
+ return batch->map;
}
void intel_batchbuffer_unmap( struct intel_batchbuffer *batch )
#define MI_BATCH_BUFFER_END (0xA<<23)
-void intel_batchbuffer_flush( struct intel_batchbuffer *batch )
+GLboolean intel_batchbuffer_flush( struct intel_batchbuffer *batch )
{
struct intel_context *intel = batch->intel;
GLuint used = batch->ptr - (batch->map + batch->offset);
GLuint offset;
GLboolean ignore_cliprects = (batch->flags & INTEL_BATCH_CLIPRECTS) ? GL_FALSE : GL_TRUE;
+ GLint retval = GL_TRUE;
assert(intel->locked);
if (used == 0) {
bmReleaseBuffers( batch->intel );
- return;
+ return GL_TRUE;
}
/* Throw away non-effective packets.
sched_yield();
LOCK_HARDWARE(intel);
- return;
+ return GL_TRUE;
}
offset = bmBufferOffset(batch->intel, batch->buffer);
if (!bmValidateBuffers( batch->intel )) {
- assert(0);
+ assert(intel->locked);
+ bmReleaseBuffers( batch->intel );
+ retval = GL_FALSE;
+ goto out;
}
/* Reset the buffer:
*/
+ out:
intel_batchbuffer_reset( batch );
intel_batchbuffer_map( batch );
+
+ if (!retval)
+ DBG("%s failed\n", __FUNCTION__);
+
+ return retval;
}
void intel_batchbuffer_free( struct intel_batchbuffer *batch );
-void intel_batchbuffer_flush( struct intel_batchbuffer *batch );
+GLboolean intel_batchbuffer_flush( struct intel_batchbuffer *batch );
void intel_batchbuffer_unmap( struct intel_batchbuffer *batch );
-void intel_batchbuffer_map( struct intel_batchbuffer *batch );
+GLubyte *intel_batchbuffer_map( struct intel_batchbuffer *batch );
/* Unlike bmBufferData, this currently requires the buffer be mapped.
return GL_TRUE;
}
+
+static void lost_hardware( struct intel_context *intel )
+{
+ bm_fake_NotifyContendedLockTake( intel );
+ intel->vtbl.lost_hardware( intel );
+}
+
static void intelContendedLock( struct intel_context *intel, GLuint flags )
{
__DRIdrawablePrivate *dPriv = intel->driDrawable;
*/
if (sarea->ctxOwner != me) {
sarea->ctxOwner = me;
-
- /* Should also fence the frontbuffer even if ctxOwner doesn't
- * change:
- */
- bm_fake_NotifyContendedLockTake( intel );
-
-
- /*
- */
- intel->vtbl.lost_hardware( intel );
+ lost_hardware(intel);
}
/* Drawable changed?
intel->locked = 1;
if (intel->aub_wrap) {
- /* Should also fence the frontbuffer even if ctxOwner doesn't
- * change:
- */
bm_fake_NotifyContendedLockTake( intel );
-
- /*
- */
intel->vtbl.lost_hardware( intel );
intel->vtbl.aub_wrap(intel);
-
intel->aub_wrap = 0;
}
+ if (bmError(intel)) {
+ bmEvictAll(intel);
+ intel->vtbl.lost_hardware( intel );
+ }
/* Make sure nothing has been emitted prior to getting the lock:
*/
/* XXX: postpone, may not be needed:
*/
- intel_batchbuffer_map(intel->batch);
+ if (!intel_batchbuffer_map(intel->batch)) {
+ bmEvictAll(intel);
+ intel->vtbl.lost_hardware( intel );
+
+ /* This could only fail if the batchbuffer was greater in size
+ * than the available texture memory:
+ */
+ if (!intel_batchbuffer_map(intel->batch)) {
+ _mesa_printf("double failure to map batchbuffer\n");
+ assert(0);
+ }
+ }
}
/* Upload data for a particular image.
*/
-void intel_miptree_image_data(struct intel_context *intel,
- struct intel_mipmap_tree *dst,
- GLuint face,
- GLuint level,
- const void *src,
- GLuint src_row_pitch,
- GLuint src_image_pitch)
+GLboolean intel_miptree_image_data(struct intel_context *intel,
+ struct intel_mipmap_tree *dst,
+ GLuint face,
+ GLuint level,
+ const void *src,
+ GLuint src_row_pitch,
+ GLuint src_image_pitch)
{
GLuint depth = dst->level[level].depth;
GLuint dst_offset = intel_miptree_image_offset(dst, face, level);
DBG("%s\n", __FUNCTION__);
for (i = 0; i < depth; i++) {
- intel_region_data(intel,
- dst->region,
- dst_offset + dst_depth_offset[i],
- 0,
- 0,
- src,
- src_row_pitch,
- 0, 0, /* source x,y */
- dst->level[level].width,
- dst->level[level].height);
+ if (!intel_region_data(intel,
+ dst->region,
+ dst_offset + dst_depth_offset[i],
+ 0,
+ 0,
+ src,
+ src_row_pitch,
+ 0, 0, /* source x,y */
+ dst->level[level].width,
+ dst->level[level].height))
+ return GL_FALSE;
src += src_image_pitch;
}
+ return GL_TRUE;
}
/* Upload an image into a tree
*/
-void intel_miptree_image_data(struct intel_context *intel,
- struct intel_mipmap_tree *dst,
- GLuint face,
- GLuint level,
- const void *src,
- GLuint src_row_pitch,
- GLuint src_image_pitch);
+GLboolean intel_miptree_image_data(struct intel_context *intel,
+ struct intel_mipmap_tree *dst,
+ GLuint face,
+ GLuint level,
+ const void *src,
+ GLuint src_row_pitch,
+ GLuint src_image_pitch);
/* i915_mipmap_tree.c:
*/
DBG("%s\n", __FUNCTION__);
if (!region->map_refcount++) {
region->map = bmMapBuffer(intel, region->buffer, 0);
+ if (!region->map)
+ region->map_refcount--;
}
return region->map;
*
* Currently always memcpy.
*/
-void intel_region_data(struct intel_context *intel,
- struct intel_region *dst,
- GLuint dst_offset,
- GLuint dstx, GLuint dsty,
- const void *src, GLuint src_pitch,
- GLuint srcx, GLuint srcy,
- GLuint width, GLuint height)
+GLboolean intel_region_data(struct intel_context *intel,
+ struct intel_region *dst,
+ GLuint dst_offset,
+ GLuint dstx, GLuint dsty,
+ const void *src, GLuint src_pitch,
+ GLuint srcx, GLuint srcy,
+ GLuint width, GLuint height)
{
-
DBG("%s\n", __FUNCTION__);
if (width == dst->pitch &&
srcx == 0 &&
srcy == 0)
{
- bmBufferDataAUB(intel,
- dst->buffer,
- dst->cpp * width * dst->height,
- src,
- 0,
- 0, /* DW_NOTYPE */
- 0);
+ return (bmBufferDataAUB(intel,
+ dst->buffer,
+ dst->cpp * width * dst->height,
+ src, 0, 0, 0) == 0);
}
else {
- assert (dst_offset + dstx + width +
- (dsty + height - 1) * dst->pitch * dst->cpp <=
- dst->pitch * dst->cpp * dst->height);
-
- _mesa_copy_rect(intel_region_map(intel, dst) + dst_offset,
- dst->cpp,
- dst->pitch,
- dstx, dsty,
- width, height,
- src,
- src_pitch,
- srcx, srcy);
-
- intel_region_unmap(intel, dst);
+ GLubyte *map = intel_region_map(intel, dst);
+
+ if (map) {
+ assert (dst_offset + dstx + width +
+ (dsty + height - 1) * dst->pitch * dst->cpp <=
+ dst->pitch * dst->cpp * dst->height);
+
+ _mesa_copy_rect(map + dst_offset,
+ dst->cpp,
+ dst->pitch,
+ dstx, dsty,
+ width, height,
+ src,
+ src_pitch,
+ srcx, srcy);
+
+ intel_region_unmap(intel, dst);
+ return GL_TRUE;
+ }
+ else
+ return GL_FALSE;
}
}
/* Upload data to a rectangular sub-region
*/
-void intel_region_data(struct intel_context *intel,
- struct intel_region *dest,
- GLuint dest_offset,
- GLuint destx, GLuint desty,
- const void *src, GLuint src_stride,
- GLuint srcx, GLuint srcy,
- GLuint width, GLuint height);
+GLboolean intel_region_data(struct intel_context *intel,
+ struct intel_region *dest,
+ GLuint dest_offset,
+ GLuint destx, GLuint desty,
+ const void *src, GLuint src_stride,
+ GLuint srcx, GLuint srcy,
+ GLuint width, GLuint height);
/* Copy rectangular sub-regions
*/
intelObj->lastLevel = lastLevel;
}
-static void copy_image_data_to_tree( struct intel_context *intel,
- struct intel_texture_object *intelObj,
- struct gl_texture_image *texImage,
- GLuint face,
- GLuint level)
+static GLboolean copy_image_data_to_tree( struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ struct gl_texture_image *texImage,
+ GLuint face,
+ GLuint level)
{
- intel_miptree_image_data(intel,
- intelObj->mt,
- face,
- level,
- texImage->Data,
- texImage->RowStride,
- (texImage->RowStride *
- texImage->Height *
- texImage->TexFormat->TexelBytes));
+ return intel_miptree_image_data(intel,
+ intelObj->mt,
+ face,
+ level,
+ texImage->Data,
+ texImage->RowStride,
+ (texImage->RowStride *
+ texImage->Height *
+ texImage->TexFormat->TexelBytes));
}
static void intel_texture_invalidate( struct intel_texture_object *intelObj )
struct gl_texture_object *tObj )
{
struct intel_texture_object *intelObj = intel_texture_object(tObj);
-
GLuint face, i;
GLuint nr_faces = 0;
struct gl_texture_image *firstImage;
i,
texImage->Data);
- copy_image_data_to_tree(intel,
- intelObj,
- texImage,
- face,
- i);
+ if (!copy_image_data_to_tree(intel,
+ intelObj,
+ texImage,
+ face,
+ i))
+ return GL_FALSE;
}
}
- intelObj->dirty_images[face] = 0;
}
}
+ /* Only clear the dirty flags if everything went ok:
+ */
+ for (face = 0; face < nr_faces; face++) {
+ intelObj->dirty_images[face] = 0;
+ }
+
intelObj->dirty = 0;
}