Make sure 'used' tracks the right value through the whole function.
Also, use GLint for intel_batchbuffer_space in case we do bad things
in the future.
int line)
{
struct intel_context *intel = batch->intel;
int line)
{
struct intel_context *intel = batch->intel;
+ GLuint used = batch->ptr - batch->map;
GLboolean was_locked = intel->locked;
if (used == 0)
GLboolean was_locked = intel->locked;
if (used == 0)
if (!intel->ttm) {
*(GLuint *) (batch->ptr) = intel->vtbl.flush_cmd();
batch->ptr += 4;
if (!intel->ttm) {
*(GLuint *) (batch->ptr) = intel->vtbl.flush_cmd();
batch->ptr += 4;
+ used = batch->ptr - batch->map;
}
/* Round batchbuffer usage to 2 DWORDs. */
}
/* Round batchbuffer usage to 2 DWORDs. */
- used = batch->ptr - batch->map;
if ((used & 4) == 0) {
*(GLuint *) (batch->ptr) = 0; /* noop */
batch->ptr += 4;
if ((used & 4) == 0) {
*(GLuint *) (batch->ptr) = 0; /* noop */
batch->ptr += 4;
+ used = batch->ptr - batch->map;
}
/* Mark the end of the buffer. */
*(GLuint *) (batch->ptr) = MI_BATCH_BUFFER_END; /* noop */
batch->ptr += 4;
}
/* Mark the end of the buffer. */
*(GLuint *) (batch->ptr) = MI_BATCH_BUFFER_END; /* noop */
batch->ptr += 4;
used = batch->ptr - batch->map;
/* Workaround for recursive batchbuffer flushing: If the window is
used = batch->ptr - batch->map;
/* Workaround for recursive batchbuffer flushing: If the window is
+ if (batch->ptr - batch->map > batch->buf->size)
+ _mesa_printf ("bad relocation ptr %p map %p offset %d size %d\n",
+ batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size);
ret = dri_emit_reloc(batch->buf, read_domains, write_domain,
delta, batch->ptr - batch->map, buffer);
ret = dri_emit_reloc(batch->buf, read_domains, write_domain,
delta, batch->ptr - batch->map, buffer);
* be passed as structs rather than dwords, but that's a little bit of
* work...
*/
* be passed as structs rather than dwords, but that's a little bit of
* work...
*/
intel_batchbuffer_space(struct intel_batchbuffer *batch)
{
return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
intel_batchbuffer_space(struct intel_batchbuffer *batch)
{
return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);