* referred.
*/
boolean preemptive_flush;
+
+ boolean throttle_set;
+ uint32_t throttle_us;
};
}
+static INLINE unsigned
+vmw_translate_to_pb_flags(unsigned flags)
+{
+ unsigned f = 0;
+ if (flags & SVGA_RELOC_READ)
+ f |= PB_USAGE_GPU_READ;
+
+ if (flags & SVGA_RELOC_WRITE)
+ f |= PB_USAGE_GPU_WRITE;
+
+ return f;
+}
+
static enum pipe_error
vmw_swc_flush(struct svga_winsys_context *swc,
struct pipe_fence_handle **pfence)
struct pipe_fence_handle *fence = NULL;
unsigned i;
enum pipe_error ret;
+ uint32_t throttle_us;
ret = pb_validate_validate(vswc->validate);
assert(ret == PIPE_OK);
*reloc->where = ptr;
}
+ throttle_us = vswc->throttle_set ?
+ vswc->throttle_us : vswc->vws->default_throttle_us;
+
if (vswc->command.used)
vmw_ioctl_command(vswc->vws,
+ vswc->base.cid,
+ throttle_us,
vswc->command.buffer,
vswc->command.used,
&vswc->last_fence);
{
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
struct vmw_region_relocation *reloc;
+ unsigned translated_flags;
enum pipe_error ret;
assert(vswc->region.staged < vswc->region.reserved);
++vswc->region.staged;
- ret = pb_validate_add_buffer(vswc->validate, reloc->buffer, flags);
+ translated_flags = vmw_translate_to_pb_flags(flags);
+ ret = pb_validate_add_buffer(vswc->validate, reloc->buffer, translated_flags);
/* TODO: Update pipebuffer to reserve buffers and not fail here */
assert(ret == PIPE_OK);
* SVGA virtual device it's not a performance issue since flushing commands
* to the FIFO won't cause flushing in the host.
*/
- vswc->seen_regions += reloc->buffer->base.size;
- if(vswc->seen_regions >= VMW_GMR_POOL_SIZE/2)
+ vswc->seen_regions += reloc->buffer->size;
+ if(vswc->seen_regions >= VMW_GMR_POOL_SIZE/3)
vswc->preemptive_flush = TRUE;
}
}
+void
+vmw_svga_context_set_throttling(struct pipe_context *pipe,
+ uint32_t throttle_us)
+{
+ struct svga_winsys_context *swc = svga_winsys_context(pipe);
+ struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
+
+ vswc->throttle_us = throttle_us;
+ vswc->throttle_set = TRUE;
+}