int cpu_index;
/* Return the number of CPU metrics we support. */
- pipe_mutex_lock(gcpufreq_mutex);
+ mtx_lock(&gcpufreq_mutex);
if (gcpufreq_count) {
pipe_mutex_unlock(gcpufreq_mutex);
return gcpufreq_count;
char name[64];
/* Return the number of block devices and partitions. */
- pipe_mutex_lock(gdiskstat_mutex);
+ mtx_lock(&gdiskstat_mutex);
if (gdiskstat_count) {
pipe_mutex_unlock(gdiskstat_mutex);
return gdiskstat_count;
char name[64];
/* Return the number if network interfaces. */
- pipe_mutex_lock(gnic_mutex);
+ mtx_lock(&gnic_mutex);
if (gnic_count) {
pipe_mutex_unlock(gnic_mutex);
return gnic_count;
hud_get_num_sensors(bool displayhelp)
{
/* Return the number of sensors detected. */
- pipe_mutex_lock(gsensor_temp_mutex);
+ mtx_lock(&gsensor_temp_mutex);
if (gsensors_temp_count) {
pipe_mutex_unlock(gsensor_temp_mutex);
return gsensors_temp_count;
return 0;
}
-#define pipe_mutex_lock(mutex) \
- (void) mtx_lock(&(mutex))
-
#define pipe_mutex_unlock(mutex) \
(void) mtx_unlock(&(mutex))
static inline void pipe_barrier_wait(pipe_barrier *barrier)
{
- pipe_mutex_lock(barrier->mutex);
+ mtx_lock(&barrier->mutex);
assert(barrier->waiters < barrier->count);
barrier->waiters++;
static inline void
pipe_semaphore_signal(pipe_semaphore *sema)
{
- pipe_mutex_lock(sema->mutex);
+ mtx_lock(&sema->mutex);
sema->counter++;
cnd_signal(&sema->cond);
pipe_mutex_unlock(sema->mutex);
static inline void
pipe_semaphore_wait(pipe_semaphore *sema)
{
- pipe_mutex_lock(sema->mutex);
+ mtx_lock(&sema->mutex);
while (sema->counter <= 0) {
cnd_wait(&sema->cond, &sema->mutex);
}
finished = ops->fence_finish(ops, fenced_buf->fence, 0);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(!pipe_is_referenced(&fenced_buf->base.reference));
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
struct pb_fence_ops *ops = fenced_mgr->ops;
void *map = NULL;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(!(flags & PB_USAGE_GPU_READ_WRITE));
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(fenced_buf->mapcount);
if (fenced_buf->mapcount) {
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
enum pipe_error ret;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
if (!vl) {
/* Invalidate. */
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
struct pb_fence_ops *ops = fenced_mgr->ops;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(fenced_buf->buffer);
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/* This should only be called when the buffer is validated. Typically
* when processing relocations.
fenced_buf->base.vtbl = &fenced_buffer_vtbl;
fenced_buf->mgr = fenced_mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/* Try to create GPU storage without stalling. */
ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
pipe_mutex_unlock(fenced_mgr->mutex);
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/* Wait on outstanding fences. */
while (fenced_mgr->num_fenced) {
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
sched_yield();
#endif
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
}
pb_debug_buffer_check(buf);
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
LIST_DEL(&buf->head);
pipe_mutex_unlock(mgr->mutex);
if (!map)
return NULL;
- pipe_mutex_lock(buf->mutex);
+ mtx_lock(&buf->mutex);
++buf->map_count;
debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
pipe_mutex_unlock(buf->mutex);
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
- pipe_mutex_lock(buf->mutex);
+ mtx_lock(&buf->mutex);
assert(buf->map_count);
if(buf->map_count)
--buf->map_count;
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
- pipe_mutex_lock(buf->mutex);
+ mtx_lock(&buf->mutex);
if(buf->map_count) {
debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__);
debug_printf("last map backtrace is\n");
if(!buf->buffer) {
FREE(buf);
#if 0
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
debug_printf("%s: failed to create buffer\n", __FUNCTION__);
if(!LIST_IS_EMPTY(&mgr->list))
pb_debug_manager_dump_locked(mgr);
(void) mtx_init(&buf->mutex, mtx_plain);
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
LIST_ADDTAIL(&buf->head, &mgr->list);
pipe_mutex_unlock(mgr->mutex);
{
struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
if(!LIST_IS_EMPTY(&mgr->list)) {
debug_printf("%s: unfreed buffers\n", __FUNCTION__);
pb_debug_manager_dump_locked(mgr);
assert(!pipe_is_referenced(&mm_buf->base.reference));
- pipe_mutex_lock(mm->mutex);
+ mtx_lock(&mm->mutex);
u_mmFreeMem(mm_buf->block);
FREE(mm_buf);
pipe_mutex_unlock(mm->mutex);
if(!pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2))
return NULL;
- pipe_mutex_lock(mm->mutex);
+ mtx_lock(&mm->mutex);
mm_buf = CALLOC_STRUCT(mm_buffer);
if (!mm_buf) {
{
struct mm_pb_manager *mm = mm_pb_manager(mgr);
- pipe_mutex_lock(mm->mutex);
+ mtx_lock(&mm->mutex);
u_mmDestroy(mm->heap);
assert(!pipe_is_referenced(&pool_buf->base.reference));
- pipe_mutex_lock(pool->mutex);
+ mtx_lock(&pool->mutex);
LIST_ADD(&pool_buf->head, &pool->free);
pool->numFree++;
pipe_mutex_unlock(pool->mutex);
/* XXX: it will be necessary to remap here to propagate flush_ctx */
- pipe_mutex_lock(pool->mutex);
+ mtx_lock(&pool->mutex);
map = (unsigned char *) pool->map + pool_buf->start;
pipe_mutex_unlock(pool->mutex);
return map;
assert(size == pool->bufSize);
assert(pool->bufAlign % desc->alignment == 0);
- pipe_mutex_lock(pool->mutex);
+ mtx_lock(&pool->mutex);
if (pool->numFree == 0) {
pipe_mutex_unlock(pool->mutex);
pool_bufmgr_destroy(struct pb_manager *mgr)
{
struct pool_pb_manager *pool = pool_pb_manager(mgr);
- pipe_mutex_lock(pool->mutex);
+ mtx_lock(&pool->mutex);
FREE(pool->bufs);
struct pb_slab_manager *mgr = slab->mgr;
struct list_head *list = &buf->head;
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
assert(!pipe_is_referenced(&buf->base.reference));
if(!pb_check_usage(desc->usage, mgr->desc.usage))
return NULL;
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
/* Create a new slab, if we run out of partial slabs */
if (mgr->slabs.next == &mgr->slabs) {
struct pb_buffer *buf = entry->buffer;
unsigned i;
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
assert(!pipe_is_referenced(&buf->reference));
for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++)
int ret = 0;
struct list_head *cache = &mgr->buckets[bucket_index];
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
entry = NULL;
cur = cache->next;
struct pb_cache_entry *buf;
unsigned i;
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++) {
struct list_head *cache = &mgr->buckets[i];
group_index = heap * slabs->num_orders + (order - slabs->min_order);
group = &slabs->groups[group_index];
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
/* If there is no candidate slab at all, or the first slab has no free
* entries, try reclaiming entries.
slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index);
if (!slab)
return NULL;
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
LIST_ADD(&slab->head, &group->slabs);
}
void
pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
{
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
LIST_ADDTAIL(&entry->head, &slabs->reclaim);
pipe_mutex_unlock(slabs->mutex);
}
void
pb_slabs_reclaim(struct pb_slabs *slabs)
{
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
pb_slabs_reclaim_locked(slabs);
pipe_mutex_unlock(slabs->mutex);
}
struct mem_block *block = NULL;
void *addr = NULL;
- pipe_mutex_lock(exec_mutex);
+ mtx_lock(&exec_mutex);
if (!init_heap())
goto bail;
void
rtasm_exec_free(void *addr)
{
- pipe_mutex_lock(exec_mutex);
+ mtx_lock(&exec_mutex);
if (exec_heap) {
struct mem_block *block = u_mmFindBlock(exec_heap, (unsigned char *)addr - exec_mem);
goto out_no_ref_hash;
fctx->bt_depth = bt_depth;
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
list_addtail(&fctx->head, &ctx_list);
pipe_mutex_unlock(list_mutex);
if (!fbuf)
return;
- pipe_mutex_lock(fbuf->mutex);
+ mtx_lock(&fbuf->mutex);
if (fbuf->mapped) {
debug_flush_alert("Recursive map detected.", "Map",
2, fbuf->bt_depth, TRUE, TRUE, NULL);
if (mapped_sync) {
struct debug_flush_ctx *fctx;
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
LIST_FOR_EACH_ENTRY(fctx, &ctx_list, head) {
struct debug_flush_item *item =
util_hash_table_get(fctx->ref_hash, fbuf);
if (!fbuf)
return;
- pipe_mutex_lock(fbuf->mutex);
+ mtx_lock(&fbuf->mutex);
if (!fbuf->mapped)
debug_flush_alert("Unmap not previously mapped detected.", "Map",
2, fbuf->bt_depth, FALSE, TRUE, NULL);
item = util_hash_table_get(fctx->ref_hash, fbuf);
- pipe_mutex_lock(fbuf->mutex);
+ mtx_lock(&fbuf->mutex);
if (fbuf->mapped_sync) {
debug_flush_alert("Reference of mapped buffer detected.", "Reference",
2, fctx->bt_depth, TRUE, TRUE, NULL);
util_snprintf(message, sizeof(message),
"%s referenced mapped buffer detected.", reason);
- pipe_mutex_lock(fbuf->mutex);
+ mtx_lock(&fbuf->mutex);
if (fbuf->mapped_sync) {
debug_flush_alert(message, reason, 3, item->bt_depth, TRUE, TRUE, NULL);
debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, TRUE, FALSE,
ftr = footer_from_header(hdr);
ftr->magic = DEBUG_MEMORY_MAGIC;
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
LIST_ADDTAIL(&hdr->head, &list);
pipe_mutex_unlock(list_mutex);
/* set freed memory to special value */
memset(ptr, DEBUG_FREED_BYTE, hdr->size);
#else
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
LIST_DEL(&hdr->head);
pipe_mutex_unlock(list_mutex);
hdr->magic = 0;
new_ftr = footer_from_header(new_hdr);
new_ftr->magic = DEBUG_MEMORY_MAGIC;
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
LIST_REPLACE(&old_hdr->head, &new_hdr->head);
pipe_mutex_unlock(list_mutex);
}
#endif
- pipe_mutex_lock(serials_mutex);
+ mtx_lock(&serials_mutex);
if (!serials_hash)
serials_hash = util_hash_table_create(hash_ptr, compare_ptr);
static void
debug_serial_delete(void *p)
{
- pipe_mutex_lock(serials_mutex);
+ mtx_lock(&serials_mutex);
util_hash_table_remove(serials_hash, p);
pipe_mutex_unlock(serials_mutex);
}
}
#endif
- pipe_mutex_lock(symbols_mutex);
+ mtx_lock(&symbols_mutex);
if(!symbols_hash)
symbols_hash = util_hash_table_create(hash_ptr, compare_ptr);
name = util_hash_table_get(symbols_hash, (void*)addr);
{
struct util_queue *iter;
- pipe_mutex_lock(exit_mutex);
+ mtx_lock(&exit_mutex);
/* Wait for all queues to assert idle. */
LIST_FOR_EACH_ENTRY(iter, &queue_list, head) {
util_queue_killall_and_wait(iter);
{
call_once(&atexit_once_flag, global_init);
- pipe_mutex_lock(exit_mutex);
+ mtx_lock(&exit_mutex);
LIST_ADD(&queue->head, &queue_list);
pipe_mutex_unlock(exit_mutex);
}
{
struct util_queue *iter, *tmp;
- pipe_mutex_lock(exit_mutex);
+ mtx_lock(&exit_mutex);
LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &queue_list, head) {
if (iter == queue) {
LIST_DEL(&iter->head);
static void
util_queue_fence_signal(struct util_queue_fence *fence)
{
- pipe_mutex_lock(fence->mutex);
+ mtx_lock(&fence->mutex);
fence->signalled = true;
cnd_broadcast(&fence->cond);
pipe_mutex_unlock(fence->mutex);
void
util_queue_fence_wait(struct util_queue_fence *fence)
{
- pipe_mutex_lock(fence->mutex);
+ mtx_lock(&fence->mutex);
while (!fence->signalled)
cnd_wait(&fence->cond, &fence->mutex);
pipe_mutex_unlock(fence->mutex);
while (1) {
struct util_queue_job job;
- pipe_mutex_lock(queue->lock);
+ mtx_lock(&queue->lock);
assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
/* wait if the queue is empty */
}
/* signal remaining jobs before terminating */
- pipe_mutex_lock(queue->lock);
+ mtx_lock(&queue->lock);
while (queue->jobs[queue->read_idx].job) {
util_queue_fence_signal(queue->jobs[queue->read_idx].fence);
unsigned i;
/* Signal all threads to terminate. */
- pipe_mutex_lock(queue->lock);
+ mtx_lock(&queue->lock);
queue->kill_threads = 1;
cnd_broadcast(&queue->has_queued_cond);
pipe_mutex_unlock(queue->lock);
assert(fence->signalled);
fence->signalled = false;
- pipe_mutex_lock(queue->lock);
+ mtx_lock(&queue->lock);
assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
/* if the queue is full, wait until there is space */
util_range_add(struct util_range *range, unsigned start, unsigned end)
{
if (start < range->start || end > range->end) {
- pipe_mutex_lock(range->write_mutex);
+ mtx_lock(&range->write_mutex);
range->start = MIN2(start, range->start);
range->end = MAX2(end, range->end);
pipe_mutex_unlock(range->write_mutex);
/* XXX: over-reliance on mutexes, etc:
*/
- pipe_mutex_lock(ring->mutex);
+ mtx_lock(&ring->mutex);
/* make sure we don't request an impossible amount of space
*/
/* XXX: over-reliance on mutexes, etc:
*/
- pipe_mutex_lock(ring->mutex);
+ mtx_lock(&ring->mutex);
/* Get next ring entry:
*/
struct pipe_context *pipe = dctx->pipe;
if (dctx->thread) {
- pipe_mutex_lock(dctx->mutex);
+ mtx_lock(&dctx->mutex);
dctx->kill_thread = 1;
pipe_mutex_unlock(dctx->mutex);
pipe_thread_wait(dctx->thread);
struct dd_context *dctx = (struct dd_context *)input;
struct dd_screen *dscreen = dd_screen(dctx->base.screen);
- pipe_mutex_lock(dctx->mutex);
+ mtx_lock(&dctx->mutex);
while (!dctx->kill_thread) {
struct dd_draw_record **record = &dctx->records;
/* Unlock and sleep before starting all over again. */
pipe_mutex_unlock(dctx->mutex);
os_time_sleep(10000); /* 10 ms */
- pipe_mutex_lock(dctx->mutex);
+ mtx_lock(&dctx->mutex);
}
/* Thread termination. */
dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
/* Add the record to the list. */
- pipe_mutex_lock(dctx->mutex);
+ mtx_lock(&dctx->mutex);
record->next = dctx->records;
dctx->records = record;
pipe_mutex_unlock(dctx->mutex);
static void
batch_reset_resources(struct fd_batch *batch)
{
- pipe_mutex_lock(batch->ctx->screen->lock);
+ mtx_lock(&batch->ctx->screen->lock);
batch_reset_resources_locked(batch);
pipe_mutex_unlock(batch->ctx->screen->lock);
}
util_copy_framebuffer_state(&batch->framebuffer, NULL);
- pipe_mutex_lock(batch->ctx->screen->lock);
+ mtx_lock(&batch->ctx->screen->lock);
fd_bc_invalidate_batch(batch, true);
pipe_mutex_unlock(batch->ctx->screen->lock);
if (batch == batch->ctx->batch) {
batch_reset(batch);
} else {
- pipe_mutex_lock(batch->ctx->screen->lock);
+ mtx_lock(&batch->ctx->screen->lock);
fd_bc_invalidate_batch(batch, false);
pipe_mutex_unlock(batch->ctx->screen->lock);
}
DBG("%p: flush forced on %p!", batch, dep);
pipe_mutex_unlock(batch->ctx->screen->lock);
fd_batch_flush(dep, false);
- pipe_mutex_lock(batch->ctx->screen->lock);
+ mtx_lock(&batch->ctx->screen->lock);
} else {
struct fd_batch *other = NULL;
fd_batch_reference_locked(&other, dep);
struct hash_entry *entry;
struct fd_batch *last_batch = NULL;
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
hash_table_foreach(cache->ht, entry) {
struct fd_batch *batch = NULL;
pipe_mutex_unlock(ctx->screen->lock);
fd_batch_reference(&last_batch, batch);
fd_batch_flush(batch, false);
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
}
fd_batch_reference_locked(&batch, NULL);
}
struct fd_batch_cache *cache = &ctx->screen->batch_cache;
struct fd_batch *batch;
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
foreach_batch(batch, cache, cache->batch_mask) {
if (batch->ctx == ctx)
struct fd_screen *screen = fd_screen(rsc->base.b.screen);
struct fd_batch *batch;
- pipe_mutex_lock(screen->lock);
+ mtx_lock(&screen->lock);
if (destroy) {
foreach_batch(batch, &screen->batch_cache, rsc->batch_mask) {
struct fd_batch *batch;
uint32_t idx;
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
while ((idx = ffs(~cache->batch_mask)) == 0) {
#if 0
pipe_mutex_unlock(ctx->screen->lock);
DBG("%p: too many batches! flush forced!", flush_batch);
fd_batch_flush(flush_batch, true);
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
/* While the resources get cleaned up automatically, the flush_batch
* doesn't get removed from the dependencies of other batches, so
if (!batch)
return NULL;
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
_mesa_hash_table_insert_pre_hashed(cache->ht, hash, key, batch);
batch->key = key;
static inline void
fd_context_lock(struct fd_context *ctx)
{
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
}
static inline void
* Figure out the buffers/features we need:
*/
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
if (fd_depth_enabled(ctx)) {
buffers |= FD_BUFFER_DEPTH;
batch->resolve |= buffers;
batch->needs_flush = true;
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
if (buffers & PIPE_CLEAR_COLOR)
for (i = 0; i < pfb->nr_cbufs; i++)
*/
fd_bc_invalidate_resource(rsc, false);
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
/* Swap the backing bo's, so shadow becomes the old buffer,
* blit from shadow to new buffer. From here on out, we
if (LP_DEBUG & DEBUG_FENCE)
debug_printf("%s %d\n", __FUNCTION__, fence->id);
- pipe_mutex_lock(fence->mutex);
+ mtx_lock(&fence->mutex);
fence->count++;
assert(fence->count <= fence->rank);
if (LP_DEBUG & DEBUG_FENCE)
debug_printf("%s %d\n", __FUNCTION__, f->id);
- pipe_mutex_lock(f->mutex);
+ mtx_lock(&f->mutex);
assert(f->issued);
while (f->count < f->rank) {
cnd_wait(&f->signalled, &f->mutex);
{
struct cmd_bin *bin = NULL;
- pipe_mutex_lock(scene->mutex);
+ mtx_lock(&scene->mutex);
if (scene->curr_x < 0) {
/* first bin */
if (setup->last_fence)
setup->last_fence->issued = TRUE;
- pipe_mutex_lock(screen->rast_mutex);
+ mtx_lock(&screen->rast_mutex);
/* FIXME: We enqueue the scene then wait on the rasterizer to finish.
* This means we never actually run any vertex stuff in parallel to
const unsigned mode = ctx->mode;
if (!blitter->fp[targ][mode]) {
- pipe_mutex_lock(blitter->mutex);
+ mtx_lock(&blitter->mutex);
if (!blitter->fp[targ][mode])
blitter->fp[targ][mode] =
nv50_blitter_make_fp(&ctx->nv50->base.pipe, mode, ptarg);
const unsigned mode = ctx->mode;
if (!blitter->fp[targ][mode]) {
- pipe_mutex_lock(blitter->mutex);
+ mtx_lock(&blitter->mutex);
if (!blitter->fp[targ][mode])
blitter->fp[targ][mode] =
nv50_blitter_make_fp(&ctx->nvc0->base.pipe, mode, ptarg);
/* Pair the resource with the CMASK to avoid other resources
* accessing it. */
if (!r300->screen->cmask_resource) {
- pipe_mutex_lock(r300->screen->cmask_mutex);
+ mtx_lock(&r300->screen->cmask_mutex);
/* Double checking (first unlocked, then locked). */
if (!r300->screen->cmask_resource) {
/* Don't reference this, so that the texture can be
struct r300_resource* tex = (struct r300_resource*)texture;
if (tex->tex.cmask_dwords) {
- pipe_mutex_lock(rscreen->cmask_mutex);
+ mtx_lock(&rscreen->cmask_mutex);
if (texture == rscreen->cmask_resource) {
rscreen->cmask_resource = NULL;
}
{
/* Start the thread if needed. */
if (!rscreen->gpu_load_thread) {
- pipe_mutex_lock(rscreen->gpu_load_mutex);
+ mtx_lock(&rscreen->gpu_load_mutex);
/* Check again inside the mutex. */
if (!rscreen->gpu_load_thread)
rscreen->gpu_load_thread =
{
struct r600_common_context *rctx = (struct r600_common_context*)rscreen->aux_context;
- pipe_mutex_lock(rscreen->aux_context_lock);
+ mtx_lock(&rscreen->aux_context_lock);
rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
rscreen->aux_context->flush(rscreen->aux_context, NULL, 0);
pipe_mutex_unlock(rscreen->aux_context_lock);
struct pipe_context *ctx = &rctx->b;
if (ctx == rscreen->aux_context)
- pipe_mutex_lock(rscreen->aux_context_lock);
+ mtx_lock(&rscreen->aux_context_lock);
ctx->flush_resource(ctx, &rtex->resource.b.b);
ctx->flush(ctx, NULL, 0);
return false;
if (&rctx->b == rscreen->aux_context)
- pipe_mutex_lock(rscreen->aux_context_lock);
+ mtx_lock(&rscreen->aux_context_lock);
/* Decompress DCC. */
rctx->decompress_dcc(&rctx->b, rtex);
{
struct si_shader_part *result;
- pipe_mutex_lock(sscreen->shader_parts_mutex);
+ mtx_lock(&sscreen->shader_parts_mutex);
/* Find existing. */
for (result = *list; result; result = result->next) {
if (thread_index < 0)
util_queue_fence_wait(&sel->ready);
- pipe_mutex_lock(sel->mutex);
+ mtx_lock(&sel->mutex);
/* Find the shader variant. */
for (iter = sel->first_variant; iter; iter = iter->next_variant) {
tgsi_binary = si_get_tgsi_binary(sel);
/* Try to load the shader from the shader cache. */
- pipe_mutex_lock(sscreen->shader_cache_mutex);
+ mtx_lock(&sscreen->shader_cache_mutex);
if (tgsi_binary &&
si_shader_cache_load_shader(sscreen, tgsi_binary, shader)) {
}
if (tgsi_binary) {
- pipe_mutex_lock(sscreen->shader_cache_mutex);
+ mtx_lock(&sscreen->shader_cache_mutex);
if (!si_shader_cache_insert_shader(sscreen, tgsi_binary, shader, true))
FREE(tgsi_binary);
pipe_mutex_unlock(sscreen->shader_cache_mutex);
rbug_screen_remove_from_list(rb_screen, contexts, rb_pipe);
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->destroy(pipe);
rb_pipe->pipe = NULL;
pipe_mutex_unlock(rb_pipe->call_mutex);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->draw_mutex);
+ mtx_lock(&rb_pipe->draw_mutex);
rbug_draw_block_locked(rb_pipe, RBUG_BLOCK_BEFORE);
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
/* XXX loop over PIPE_SHADER_x here */
if (!(rb_pipe->curr.shader[PIPE_SHADER_FRAGMENT] && rb_pipe->curr.shader[PIPE_SHADER_FRAGMENT]->disabled) &&
!(rb_pipe->curr.shader[PIPE_SHADER_GEOMETRY] && rb_pipe->curr.shader[PIPE_SHADER_GEOMETRY]->disabled) &&
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_query *query;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
query = pipe->create_query(pipe,
query_type,
index);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->destroy_query(pipe,
query);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct pipe_context *pipe = rb_pipe->pipe;
boolean ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->begin_query(pipe, query);
pipe_mutex_unlock(rb_pipe->call_mutex);
return ret;
struct pipe_context *pipe = rb_pipe->pipe;
bool ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->end_query(pipe,
query);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct pipe_context *pipe = rb_pipe->pipe;
boolean ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->get_query_result(pipe,
query,
wait,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_active_query_state(pipe, enable);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_blend_state(pipe,
blend);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->bind_blend_state(pipe,
blend);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->delete_blend_state(pipe,
blend);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_sampler_state(pipe,
sampler);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->bind_sampler_states(pipe, shader, start, count, samplers);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->delete_sampler_state(pipe,
sampler);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_rasterizer_state(pipe,
rasterizer);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->bind_rasterizer_state(pipe,
rasterizer);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->delete_rasterizer_state(pipe,
rasterizer);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_depth_stencil_alpha_state(pipe,
depth_stencil_alpha);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->bind_depth_stencil_alpha_state(pipe,
depth_stencil_alpha);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->delete_depth_stencil_alpha_state(pipe,
depth_stencil_alpha);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct pipe_context *pipe = rb_pipe->pipe;
void *result;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_fs_state(pipe, state);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct pipe_context *pipe = rb_pipe->pipe;
void *fs;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
fs = rbug_shader_unwrap(_fs);
rb_pipe->curr.shader[PIPE_SHADER_FRAGMENT] = rbug_shader(_fs);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_shader *rb_shader = rbug_shader(_fs);
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
rbug_shader_destroy(rb_pipe, rb_shader);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
struct pipe_context *pipe = rb_pipe->pipe;
void *result;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_vs_state(pipe, state);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct pipe_context *pipe = rb_pipe->pipe;
void *vs;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
vs = rbug_shader_unwrap(_vs);
rb_pipe->curr.shader[PIPE_SHADER_VERTEX] = rbug_shader(_vs);
struct pipe_context *pipe = rb_pipe->pipe;
void *result;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_gs_state(pipe, state);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct pipe_context *pipe = rb_pipe->pipe;
void *gs;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
gs = rbug_shader_unwrap(_gs);
rb_pipe->curr.shader[PIPE_SHADER_GEOMETRY] = rbug_shader(_gs);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_shader *rb_shader = rbug_shader(_gs);
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
rbug_shader_destroy(rb_pipe, rb_shader);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_vertex_elements_state(pipe,
num_elements,
vertex_elements);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->bind_vertex_elements_state(pipe,
velems);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->delete_vertex_elements_state(pipe,
velems);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_blend_color(pipe,
blend_color);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_stencil_ref(pipe,
stencil_ref);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_clip_state(pipe,
clip);
pipe_mutex_unlock(rb_pipe->call_mutex);
cb.buffer = rbug_resource_unwrap(_cb->buffer);
}
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_constant_buffer(pipe,
shader,
index,
unsigned i;
/* must protect curr status */
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
rb_pipe->curr.nr_cbufs = 0;
memset(rb_pipe->curr.cbufs, 0, sizeof(rb_pipe->curr.cbufs));
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_polygon_stipple(pipe,
poly_stipple);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_scissor_states(pipe, start_slot, num_scissors, scissor);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_viewport_states(pipe, start_slot, num_viewports, viewport);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
assert(start == 0); /* XXX fix */
/* must protect curr status */
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
rb_pipe->curr.num_views[shader] = 0;
memset(rb_pipe->curr.views[shader], 0, sizeof(rb_pipe->curr.views[shader]));
struct pipe_vertex_buffer *buffers = NULL;
unsigned i;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
if (num_buffers && _buffers) {
memcpy(unwrapped_buffers, _buffers, num_buffers * sizeof(*_buffers));
ib = &unwrapped_ib;
}
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_index_buffer(pipe, ib);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_sample_mask(pipe, sample_mask);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
struct pipe_resource *res = rbug_resource_unwrap(_res);
struct pipe_stream_output_target *target;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
target = pipe->create_stream_output_target(pipe, res, buffer_offset,
buffer_size);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->stream_output_target_destroy(pipe, target);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_stream_output_targets(pipe, num_targets, targets, offsets);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
struct pipe_resource *dst = rb_resource_dst->resource;
struct pipe_resource *src = rb_resource_src->resource;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->resource_copy_region(pipe,
dst,
dst_level,
blit_info.dst.resource = dst;
blit_info.src.resource = src;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->blit(pipe, &blit_info);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_resource *res = rb_resource_res->resource;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->flush_resource(pipe, res);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->clear(pipe,
buffers,
color,
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_surface *dst = rb_surface_dst->surface;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->clear_render_target(pipe,
dst,
color,
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_surface *dst = rb_surface_dst->surface;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->clear_depth_stencil(pipe,
dst,
clear_flags,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->flush(pipe, fence, flags);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
struct pipe_resource *resource = rb_resource->resource;
struct pipe_sampler_view *result;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_sampler_view(pipe,
resource,
templ);
struct pipe_resource *resource = rb_resource->resource;
struct pipe_surface *result;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_surface(pipe,
resource,
surf_tmpl);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_surface *rb_surface = rbug_surface(_surface);
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
rbug_surface_destroy(rb_pipe,
rb_surface);
pipe_mutex_unlock(rb_pipe->call_mutex);
struct pipe_transfer *result;
void *map;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
map = context->transfer_map(context,
resource,
level,
struct pipe_context *context = rb_pipe->pipe;
struct pipe_transfer *transfer = rb_transfer->transfer;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
context->transfer_flush_region(context,
transfer,
box);
struct pipe_context *context = rb_pipe->pipe;
struct pipe_transfer *transfer = rb_transfer->transfer;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
context->transfer_unmap(context,
transfer);
rbug_transfer_destroy(rb_pipe,
struct pipe_context *context = rb_pipe->pipe;
struct pipe_resource *resource = rb_resource->resource;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
context->buffer_subdata(context, resource, usage, offset, size, data);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
struct pipe_context *context = rb_pipe->pipe;
struct pipe_resource *resource = rb_resource->resource;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
context->texture_subdata(context,
resource,
level,
rbug_texture_t *texs;
int i = 0;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
texs = MALLOC(rb_screen->num_resources * sizeof(rbug_texture_t));
foreach(ptr, &rb_screen->resources) {
tr_tex = container_of(ptr, struct rbug_resource, list);
struct pipe_resource *t;
unsigned num_layers;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
foreach(ptr, &rb_screen->resources) {
tr_tex = container_of(ptr, struct rbug_resource, list);
if (gpti->texture == VOID2U64(tr_tex))
void *map;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
foreach(ptr, &rb_screen->resources) {
tr_tex = container_of(ptr, struct rbug_resource, list);
if (gptr->texture == VOID2U64(tr_tex))
rbug_context_t *ctxs;
int i = 0;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
ctxs = MALLOC(rb_screen->num_contexts * sizeof(rbug_context_t));
foreach(ptr, &rb_screen->contexts) {
rb_context = container_of(ptr, struct rbug_context, list);
rbug_texture_t texs[PIPE_MAX_SHADER_SAMPLER_VIEWS];
unsigned i;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, info->context);
if (!rb_context) {
}
/* protect the pipe context */
- pipe_mutex_lock(rb_context->draw_mutex);
- pipe_mutex_lock(rb_context->call_mutex);
+ mtx_lock(&rb_context->draw_mutex);
+ mtx_lock(&rb_context->call_mutex);
for (i = 0; i < rb_context->curr.nr_cbufs; i++)
cbufs[i] = VOID2U64(rb_context->curr.cbufs[i]);
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, block->context);
if (!rb_context) {
return -ESRCH;
}
- pipe_mutex_lock(rb_context->draw_mutex);
+ mtx_lock(&rb_context->draw_mutex);
rb_context->draw_blocker |= block->block;
pipe_mutex_unlock(rb_context->draw_mutex);
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, step->context);
if (!rb_context) {
return -ESRCH;
}
- pipe_mutex_lock(rb_context->draw_mutex);
+ mtx_lock(&rb_context->draw_mutex);
if (rb_context->draw_blocked & RBUG_BLOCK_RULE) {
if (step->step & RBUG_BLOCK_RULE)
rb_context->draw_blocked &= ~RBUG_BLOCK_MASK;
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, unblock->context);
if (!rb_context) {
return -ESRCH;
}
- pipe_mutex_lock(rb_context->draw_mutex);
+ mtx_lock(&rb_context->draw_mutex);
if (rb_context->draw_blocked & RBUG_BLOCK_RULE) {
if (unblock->unblock & RBUG_BLOCK_RULE)
rb_context->draw_blocked &= ~RBUG_BLOCK_MASK;
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, rule->context);
if (!rb_context) {
return -ESRCH;
}
- pipe_mutex_lock(rb_context->draw_mutex);
+ mtx_lock(&rb_context->draw_mutex);
rb_context->draw_rule.shader[PIPE_SHADER_VERTEX] = U642VOID(rule->vertex);
rb_context->draw_rule.shader[PIPE_SHADER_FRAGMENT] = U642VOID(rule->fragment);
rb_context->draw_rule.texture = U642VOID(rule->texture);
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, flush->context);
if (!rb_context) {
}
/* protect the pipe context */
- pipe_mutex_lock(rb_context->call_mutex);
+ mtx_lock(&rb_context->call_mutex);
rb_context->pipe->flush(rb_context->pipe, NULL, 0);
rbug_shader_t *shdrs;
int i = 0;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, list->context);
if (!rb_context) {
return -ESRCH;
}
- pipe_mutex_lock(rb_context->list_mutex);
+ mtx_lock(&rb_context->list_mutex);
shdrs = MALLOC(rb_context->num_shaders * sizeof(rbug_shader_t));
foreach(ptr, &rb_context->shaders) {
tr_shdr = container_of(ptr, struct rbug_shader, list);
unsigned original_len;
unsigned replaced_len;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, info->context);
if (!rb_context) {
return -ESRCH;
}
- pipe_mutex_lock(rb_context->list_mutex);
+ mtx_lock(&rb_context->list_mutex);
tr_shdr = rbug_get_shader_locked(rb_context, info->shader);
struct rbug_context *rb_context = NULL;
struct rbug_shader *tr_shdr = NULL;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, dis->context);
if (!rb_context) {
return -ESRCH;
}
- pipe_mutex_lock(rb_context->list_mutex);
+ mtx_lock(&rb_context->list_mutex);
tr_shdr = rbug_get_shader_locked(rb_context, dis->shader);
struct pipe_context *pipe = NULL;
void *state;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, rep->context);
if (!rb_context) {
return -ESRCH;
}
- pipe_mutex_lock(rb_context->list_mutex);
+ mtx_lock(&rb_context->list_mutex);
tr_shdr = rbug_get_shader_locked(rb_context, rep->shader);
}
/* protect the pipe context */
- pipe_mutex_lock(rb_context->call_mutex);
+ mtx_lock(&rb_context->call_mutex);
pipe = rb_context->pipe;
#define rbug_screen_add_to_list(scr, name, obj) \
do { \
- pipe_mutex_lock(scr->list_mutex); \
+ mtx_lock(&scr->list_mutex); \
insert_at_head(&scr->name, &obj->list); \
scr->num_##name++; \
pipe_mutex_unlock(scr->list_mutex); \
#define rbug_screen_remove_from_list(scr, name, obj) \
do { \
- pipe_mutex_lock(scr->list_mutex); \
+ mtx_lock(&scr->list_mutex); \
remove_from_list(&obj->list); \
scr->num_##name--; \
pipe_mutex_unlock(scr->list_mutex); \
assert(transfer->usage & PIPE_TRANSFER_WRITE);
assert(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT);
- pipe_mutex_lock(ss->swc_mutex);
+ mtx_lock(&ss->swc_mutex);
svga_buffer_add_range(sbuf, offset, offset + length);
pipe_mutex_unlock(ss->swc_mutex);
}
SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERTRANSFERUNMAP);
- pipe_mutex_lock(ss->swc_mutex);
+ mtx_lock(&ss->swc_mutex);
assert(sbuf->map.count);
if (sbuf->map.count) {
if (ret != PIPE_OK)
return ret;
- pipe_mutex_lock(ss->swc_mutex);
+ mtx_lock(&ss->swc_mutex);
map = svga_buffer_hw_storage_map(svga, sbuf, PIPE_TRANSFER_WRITE, &retry);
assert(map);
assert(!retry);
/* First try the cache */
if (view) {
- pipe_mutex_lock(ss->tex_mutex);
+ mtx_lock(&ss->tex_mutex);
if (tex->cached_view &&
tex->cached_view->min_lod == min_lod &&
tex->cached_view->max_lod == max_lod) {
return sv;
}
- pipe_mutex_lock(ss->tex_mutex);
+ mtx_lock(&ss->tex_mutex);
svga_sampler_view_reference(&tex->cached_view, sv);
pipe_mutex_unlock(ss->tex_mutex);
bucket = svga_screen_cache_bucket(key);
- pipe_mutex_lock(cache->mutex);
+ mtx_lock(&cache->mutex);
curr = cache->bucket[bucket].next;
next = curr->next;
surf_size = surface_size(key);
*p_handle = NULL;
- pipe_mutex_lock(cache->mutex);
+ mtx_lock(&cache->mutex);
if (surf_size >= SVGA_HOST_SURFACE_CACHE_BYTES) {
/* this surface is too large to cache, just free it */
struct list_head *curr, *next;
unsigned bucket;
- pipe_mutex_lock(cache->mutex);
+ mtx_lock(&cache->mutex);
/* Loop over entries in the invalidated list */
curr = cache->invalidated.next;
void trace_dump_call_lock(void)
{
- pipe_mutex_lock(call_mutex);
+ mtx_lock(&call_mutex);
}
void trace_dump_call_unlock(void)
void trace_dumping_start(void)
{
- pipe_mutex_lock(call_mutex);
+ mtx_lock(&call_mutex);
trace_dumping_start_locked();
pipe_mutex_unlock(call_mutex);
}
void trace_dumping_stop(void)
{
- pipe_mutex_lock(call_mutex);
+ mtx_lock(&call_mutex);
trace_dumping_stop_locked();
pipe_mutex_unlock(call_mutex);
}
boolean trace_dumping_enabled(void)
{
boolean ret;
- pipe_mutex_lock(call_mutex);
+ mtx_lock(&call_mutex);
ret = trace_dumping_enabled_locked();
pipe_mutex_unlock(call_mutex);
return ret;
void trace_dump_call_begin(const char *klass, const char *method)
{
- pipe_mutex_lock(call_mutex);
+ mtx_lock(&call_mutex);
trace_dump_call_begin_locked(klass, method);
}
return NULL;
struct vc4_bo *bo = NULL;
- pipe_mutex_lock(cache->lock);
+ mtx_lock(&cache->lock);
if (!list_empty(&cache->size_list[page_index])) {
bo = LIST_ENTRY(struct vc4_bo, cache->size_list[page_index].next,
size_list);
struct timespec time;
clock_gettime(CLOCK_MONOTONIC, &time);
- pipe_mutex_lock(screen->bo_cache.lock);
+ mtx_lock(&screen->bo_cache.lock);
vc4_bo_last_unreference_locked_timed(bo, time.tv_sec);
pipe_mutex_unlock(screen->bo_cache.lock);
}
static void
vc4_bo_cache_free_all(struct vc4_bo_cache *cache)
{
- pipe_mutex_lock(cache->lock);
+ mtx_lock(&cache->lock);
list_for_each_entry_safe(struct vc4_bo, bo, &cache->time_list,
time_list) {
vc4_bo_remove_from_cache(cache, bo);
assert(size);
- pipe_mutex_lock(screen->bo_handles_mutex);
+ mtx_lock(&screen->bo_handles_mutex);
bo = util_hash_table_get(screen->bo_handles, (void*)(uintptr_t)handle);
if (bo) {
return -1;
}
- pipe_mutex_lock(bo->screen->bo_handles_mutex);
+ mtx_lock(&bo->screen->bo_handles_mutex);
bo->private = false;
util_hash_table_set(bo->screen->bo_handles, (void *)(uintptr_t)bo->handle, bo);
pipe_mutex_unlock(bo->screen->bo_handles_mutex);
vc4_bo_last_unreference(*bo);
} else {
screen = (*bo)->screen;
- pipe_mutex_lock(screen->bo_handles_mutex);
+ mtx_lock(&screen->bo_handles_mutex);
if (pipe_reference(&(*bo)->reference, NULL)) {
util_hash_table_remove(screen->bo_handles,
#if defined(RTLD_DEFAULT)
bool success;
- pipe_mutex_lock(screen->opencl_func_mutex);
+ mtx_lock(&screen->opencl_func_mutex);
if (dri2_is_opencl_interop_loaded_locked(screen)) {
pipe_mutex_unlock(screen->opencl_func_mutex);
return NULL;
}
- pipe_mutex_lock(init_mutex);
+ mtx_lock(&init_mutex);
/* Look for XMesaDisplay which corresponds to this display */
info = MesaExtInfo.head;
XMesaDisplay xmdpy = xmesa_init_display(dpy);
Status stat;
- pipe_mutex_lock(xmdpy->mutex);
+ mtx_lock(&xmdpy->mutex);
stat = get_drawable_size(dpy, b->ws.drawable, width, height);
pipe_mutex_unlock(xmdpy->mutex);
#if 0
struct stw_st_framebuffer *stwfb = stw_st_framebuffer(stfb);
- pipe_mutex_lock(stwfb->fb->mutex);
+ mtx_lock(&stwfb->fb->mutex);
struct pipe_resource* resource = textures[statt];
if (resource)
void
NineLockGlobalMutex()
{
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
}
void
UINT *pCertificateSize )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineAuthenticatedChannel9_GetCertificateSize(This, pCertificateSize);
pipe_mutex_unlock(d3dlock_global);
return r;
BYTE *ppCertificate )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineAuthenticatedChannel9_GetCertificate(This, CertifacteSize, ppCertificate);
pipe_mutex_unlock(d3dlock_global);
return r;
void *pData )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineAuthenticatedChannel9_NegotiateKeyExchange(This, DataSize, pData);
pipe_mutex_unlock(d3dlock_global);
return r;
void *pOutput )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineAuthenticatedChannel9_Query(This, InputSize, pInput, OutputSize, pOutput);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DAUTHENTICATEDCHANNEL_CONFIGURE_OUTPUT *pOutput )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineAuthenticatedChannel9_Configure(This, InputSize, pInput, pOutput);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_SetPrivateData(This, refguid, pData, SizeOfData, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD *pSizeOfData )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetPrivateData(This, refguid, pData, pSizeOfData);
pipe_mutex_unlock(d3dlock_global);
return r;
REFGUID refguid )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_FreePrivateData(This, refguid);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD PriorityNew )
{
DWORD r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineResource9_SetPriority(This, PriorityNew);
pipe_mutex_unlock(d3dlock_global);
return r;
LockResource9_GetPriority( struct NineResource9 *This )
{
DWORD r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineResource9_GetPriority(This);
pipe_mutex_unlock(d3dlock_global);
return r;
static void NINE_WINAPI
LockResource9_PreLoad( struct NineResource9 *This )
{
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
NineResource9_PreLoad(This);
pipe_mutex_unlock(d3dlock_global);
}
LockResource9_GetType( struct NineResource9 *This )
{
D3DRESOURCETYPE r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineResource9_GetType(This);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD LODNew )
{
DWORD r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineBaseTexture9_SetLOD(This, LODNew);
pipe_mutex_unlock(d3dlock_global);
return r;
LockBaseTexture9_GetLOD( struct NineBaseTexture9 *This )
{
DWORD r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineBaseTexture9_GetLOD(This);
pipe_mutex_unlock(d3dlock_global);
return r;
LockBaseTexture9_GetLevelCount( struct NineBaseTexture9 *This )
{
DWORD r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineBaseTexture9_GetLevelCount(This);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DTEXTUREFILTERTYPE FilterType )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineBaseTexture9_SetAutoGenFilterType(This, FilterType);
pipe_mutex_unlock(d3dlock_global);
return r;
LockBaseTexture9_GetAutoGenFilterType( struct NineBaseTexture9 *This )
{
D3DTEXTUREFILTERTYPE r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineBaseTexture9_GetAutoGenFilterType(This);
pipe_mutex_unlock(d3dlock_global);
return r;
static void NINE_WINAPI
LockBaseTexture9_PreLoad( struct NineBaseTexture9 *This )
{
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
NineBaseTexture9_PreLoad(This);
pipe_mutex_unlock(d3dlock_global);
}
static void NINE_WINAPI
LockBaseTexture9_GenerateMipSubLevels( struct NineBaseTexture9 *This )
{
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
NineBaseTexture9_GenerateMipSubLevels(This);
pipe_mutex_unlock(d3dlock_global);
}
UINT *pCertificateSize )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_GetCertificateSize(This, pCertificateSize);
pipe_mutex_unlock(d3dlock_global);
return r;
BYTE *ppCertificate )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_GetCertificate(This, CertifacteSize, ppCertificate);
pipe_mutex_unlock(d3dlock_global);
return r;
void *pData )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_NegotiateKeyExchange(This, DataSize, pData);
pipe_mutex_unlock(d3dlock_global);
return r;
void *pIV )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_EncryptionBlt(This, pSrcSurface, pDstSurface, DstSurfaceSize, pIV);
pipe_mutex_unlock(d3dlock_global);
return r;
void *pIV )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_DecryptionBlt(This, pSrcSurface, pDstSurface, SrcSurfaceSize, pEncryptedBlockInfo, pContentKey, pIV);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT *pSurfacePitch )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_GetSurfacePitch(This, pSrcSurface, pSurfacePitch);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT RandomNumberSize )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_StartSessionKeyRefresh(This, pRandomNumber, RandomNumberSize);
pipe_mutex_unlock(d3dlock_global);
return r;
LockCryptoSession9_FinishSessionKeyRefresh( struct NineCryptoSession9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_FinishSessionKeyRefresh(This);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT KeySize )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_GetEncryptionBltKey(This, pReadbackKey, KeySize);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DSURFACE_DESC *pDesc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCubeTexture9_GetLevelDesc(This, Level, pDesc);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DSurface9 **ppCubeMapSurface )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCubeTexture9_GetCubeMapSurface(This, FaceType, Level, ppCubeMapSurface);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCubeTexture9_LockRect(This, FaceType, Level, pLockedRect, pRect, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT Level )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCubeTexture9_UnlockRect(This, FaceType, Level);
pipe_mutex_unlock(d3dlock_global);
return r;
const RECT *pDirtyRect )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCubeTexture9_AddDirtyRect(This, FaceType, pDirtyRect);
pipe_mutex_unlock(d3dlock_global);
return r;
LockDevice9_TestCooperativeLevel( struct NineDevice9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_TestCooperativeLevel(This);
pipe_mutex_unlock(d3dlock_global);
return r;
LockDevice9_GetAvailableTextureMem( struct NineDevice9 *This )
{
UINT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetAvailableTextureMem(This);
pipe_mutex_unlock(d3dlock_global);
return r;
LockDevice9_EvictManagedResources( struct NineDevice9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_EvictManagedResources(This);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3D9 **ppD3D9 )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetDirect3D(This, ppD3D9);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DCAPS9 *pCaps )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetDeviceCaps(This, pCaps);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DDISPLAYMODE *pMode )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetDisplayMode(This, iSwapChain, pMode);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DDEVICE_CREATION_PARAMETERS *pParameters )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetCreationParameters(This, pParameters);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DSurface9 *pCursorBitmap )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetCursorProperties(This, XHotSpot, YHotSpot, pCursorBitmap);
pipe_mutex_unlock(d3dlock_global);
return r;
int Y,
DWORD Flags )
{
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
NineDevice9_SetCursorPosition(This, X, Y, Flags);
pipe_mutex_unlock(d3dlock_global);
}
BOOL bShow )
{
BOOL r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_ShowCursor(This, bShow);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DSwapChain9 **pSwapChain )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateAdditionalSwapChain(This, pPresentationParameters, pSwapChain);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DSwapChain9 **pSwapChain )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetSwapChain(This, iSwapChain, pSwapChain);
pipe_mutex_unlock(d3dlock_global);
return r;
LockDevice9_GetNumberOfSwapChains( struct NineDevice9 *This )
{
UINT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetNumberOfSwapChains(This);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DPRESENT_PARAMETERS *pPresentationParameters )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_Reset(This, pPresentationParameters);
pipe_mutex_unlock(d3dlock_global);
return r;
const RGNDATA *pDirtyRegion )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_Present(This, pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DSurface9 **ppBackBuffer )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetBackBuffer(This, iSwapChain, iBackBuffer, Type, ppBackBuffer);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DRASTER_STATUS *pRasterStatus )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetRasterStatus(This, iSwapChain, pRasterStatus);
pipe_mutex_unlock(d3dlock_global);
return r;
BOOL bEnableDialogs )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetDialogBoxMode(This, bEnableDialogs);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Flags,
const D3DGAMMARAMP *pRamp )
{
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
NineDevice9_SetGammaRamp(This, iSwapChain, Flags, pRamp);
pipe_mutex_unlock(d3dlock_global);
}
UINT iSwapChain,
D3DGAMMARAMP *pRamp )
{
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
NineDevice9_GetGammaRamp(This, iSwapChain, pRamp);
pipe_mutex_unlock(d3dlock_global);
}
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateTexture(This, Width, Height, Levels, Usage, Format, Pool, ppTexture, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateVolumeTexture(This, Width, Height, Depth, Levels, Usage, Format, Pool, ppVolumeTexture, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateCubeTexture(This, EdgeLength, Levels, Usage, Format, Pool, ppCubeTexture, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateVertexBuffer(This, Length, Usage, FVF, Pool, ppVertexBuffer, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateIndexBuffer(This, Length, Usage, Format, Pool, ppIndexBuffer, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateRenderTarget(This, Width, Height, Format, MultiSample, MultisampleQuality, Lockable, ppSurface, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateDepthStencilSurface(This, Width, Height, Format, MultiSample, MultisampleQuality, Discard, ppSurface, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
const POINT *pDestPoint )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_UpdateSurface(This, pSourceSurface, pSourceRect, pDestinationSurface, pDestPoint);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DBaseTexture9 *pDestinationTexture )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_UpdateTexture(This, pSourceTexture, pDestinationTexture);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DSurface9 *pDestSurface )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetRenderTargetData(This, pRenderTarget, pDestSurface);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DSurface9 *pDestSurface )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetFrontBufferData(This, iSwapChain, pDestSurface);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DTEXTUREFILTERTYPE Filter )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_StretchRect(This, pSourceSurface, pSourceRect, pDestSurface, pDestRect, Filter);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DCOLOR color )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_ColorFill(This, pSurface, pRect, color);
pipe_mutex_unlock(d3dlock_global);
return r;
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateOffscreenPlainSurface(This, Width, Height, Format, Pool, ppSurface, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DSurface9 *pRenderTarget )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetRenderTarget(This, RenderTargetIndex, pRenderTarget);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DSurface9 **ppRenderTarget )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetRenderTarget(This, RenderTargetIndex, ppRenderTarget);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DSurface9 *pNewZStencil )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetDepthStencilSurface(This, pNewZStencil);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DSurface9 **ppZStencilSurface )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetDepthStencilSurface(This, ppZStencilSurface);
pipe_mutex_unlock(d3dlock_global);
return r;
LockDevice9_BeginScene( struct NineDevice9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_BeginScene(This);
pipe_mutex_unlock(d3dlock_global);
return r;
LockDevice9_EndScene( struct NineDevice9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_EndScene(This);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Stencil )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_Clear(This, Count, pRects, Flags, Color, Z, Stencil);
pipe_mutex_unlock(d3dlock_global);
return r;
const D3DMATRIX *pMatrix )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetTransform(This, State, pMatrix);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DMATRIX *pMatrix )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetTransform(This, State, pMatrix);
pipe_mutex_unlock(d3dlock_global);
return r;
const D3DMATRIX *pMatrix )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_MultiplyTransform(This, State, pMatrix);
pipe_mutex_unlock(d3dlock_global);
return r;
const D3DVIEWPORT9 *pViewport )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetViewport(This, pViewport);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DVIEWPORT9 *pViewport )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetViewport(This, pViewport);
pipe_mutex_unlock(d3dlock_global);
return r;
const D3DMATERIAL9 *pMaterial )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetMaterial(This, pMaterial);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DMATERIAL9 *pMaterial )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetMaterial(This, pMaterial);
pipe_mutex_unlock(d3dlock_global);
return r;
const D3DLIGHT9 *pLight )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetLight(This, Index, pLight);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DLIGHT9 *pLight )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetLight(This, Index, pLight);
pipe_mutex_unlock(d3dlock_global);
return r;
BOOL Enable )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_LightEnable(This, Index, Enable);
pipe_mutex_unlock(d3dlock_global);
return r;
BOOL *pEnable )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetLightEnable(This, Index, pEnable);
pipe_mutex_unlock(d3dlock_global);
return r;
const float *pPlane )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetClipPlane(This, Index, pPlane);
pipe_mutex_unlock(d3dlock_global);
return r;
float *pPlane )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetClipPlane(This, Index, pPlane);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Value )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetRenderState(This, State, Value);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD *pValue )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetRenderState(This, State, pValue);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DStateBlock9 **ppSB )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateStateBlock(This, Type, ppSB);
pipe_mutex_unlock(d3dlock_global);
return r;
LockDevice9_BeginStateBlock( struct NineDevice9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_BeginStateBlock(This);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DStateBlock9 **ppSB )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_EndStateBlock(This, ppSB);
pipe_mutex_unlock(d3dlock_global);
return r;
const D3DCLIPSTATUS9 *pClipStatus )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetClipStatus(This, pClipStatus);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DCLIPSTATUS9 *pClipStatus )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetClipStatus(This, pClipStatus);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DBaseTexture9 **ppTexture )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetTexture(This, Stage, ppTexture);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DBaseTexture9 *pTexture )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetTexture(This, Stage, pTexture);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD *pValue )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetTextureStageState(This, Stage, Type, pValue);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Value )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetTextureStageState(This, Stage, Type, Value);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD *pValue )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetSamplerState(This, Sampler, Type, pValue);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Value )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetSamplerState(This, Sampler, Type, Value);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD *pNumPasses )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_ValidateDevice(This, pNumPasses);
pipe_mutex_unlock(d3dlock_global);
return r;
const PALETTEENTRY *pEntries )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetPaletteEntries(This, PaletteNumber, pEntries);
pipe_mutex_unlock(d3dlock_global);
return r;
PALETTEENTRY *pEntries )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetPaletteEntries(This, PaletteNumber, pEntries);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT PaletteNumber )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetCurrentTexturePalette(This, PaletteNumber);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT *PaletteNumber )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetCurrentTexturePalette(This, PaletteNumber);
pipe_mutex_unlock(d3dlock_global);
return r;
const RECT *pRect )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetScissorRect(This, pRect);
pipe_mutex_unlock(d3dlock_global);
return r;
RECT *pRect )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetScissorRect(This, pRect);
pipe_mutex_unlock(d3dlock_global);
return r;
BOOL bSoftware )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetSoftwareVertexProcessing(This, bSoftware);
pipe_mutex_unlock(d3dlock_global);
return r;
LockDevice9_GetSoftwareVertexProcessing( struct NineDevice9 *This )
{
BOOL r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetSoftwareVertexProcessing(This);
pipe_mutex_unlock(d3dlock_global);
return r;
float nSegments )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetNPatchMode(This, nSegments);
pipe_mutex_unlock(d3dlock_global);
return r;
LockDevice9_GetNPatchMode( struct NineDevice9 *This )
{
float r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetNPatchMode(This);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT PrimitiveCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_DrawPrimitive(This, PrimitiveType, StartVertex, PrimitiveCount);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT primCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_DrawIndexedPrimitive(This, PrimitiveType, BaseVertexIndex, MinVertexIndex, NumVertices, startIndex, primCount);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT VertexStreamZeroStride )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_DrawPrimitiveUP(This, PrimitiveType, PrimitiveCount, pVertexStreamZeroData, VertexStreamZeroStride);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT VertexStreamZeroStride )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_DrawIndexedPrimitiveUP(This, PrimitiveType, MinVertexIndex, NumVertices, PrimitiveCount, pIndexData, IndexDataFormat, pVertexStreamZeroData, VertexStreamZeroStride);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_ProcessVertices(This, SrcStartIndex, DestIndex, VertexCount, pDestBuffer, pVertexDecl, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DVertexDeclaration9 **ppDecl )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateVertexDeclaration(This, pVertexElements, ppDecl);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DVertexDeclaration9 *pDecl )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetVertexDeclaration(This, pDecl);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DVertexDeclaration9 **ppDecl )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetVertexDeclaration(This, ppDecl);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD FVF )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetFVF(This, FVF);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD *pFVF )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetFVF(This, pFVF);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DVertexShader9 **ppShader )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateVertexShader(This, pFunction, ppShader);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DVertexShader9 *pShader )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetVertexShader(This, pShader);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DVertexShader9 **ppShader )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetVertexShader(This, ppShader);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT Vector4fCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetVertexShaderConstantF(This, StartRegister, pConstantData, Vector4fCount);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT Vector4fCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetVertexShaderConstantF(This, StartRegister, pConstantData, Vector4fCount);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT Vector4iCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetVertexShaderConstantI(This, StartRegister, pConstantData, Vector4iCount);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT Vector4iCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetVertexShaderConstantI(This, StartRegister, pConstantData, Vector4iCount);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT BoolCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetVertexShaderConstantB(This, StartRegister, pConstantData, BoolCount);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT BoolCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetVertexShaderConstantB(This, StartRegister, pConstantData, BoolCount);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT Stride )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetStreamSource(This, StreamNumber, pStreamData, OffsetInBytes, Stride);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT *pStride )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetStreamSource(This, StreamNumber, ppStreamData, pOffsetInBytes, pStride);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT Setting )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetStreamSourceFreq(This, StreamNumber, Setting);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT *pSetting )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetStreamSourceFreq(This, StreamNumber, pSetting);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DIndexBuffer9 *pIndexData )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetIndices(This, pIndexData);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DIndexBuffer9 **ppIndexData )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetIndices(This, ppIndexData);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DPixelShader9 **ppShader )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreatePixelShader(This, pFunction, ppShader);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DPixelShader9 *pShader )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetPixelShader(This, pShader);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DPixelShader9 **ppShader )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetPixelShader(This, ppShader);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT Vector4fCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetPixelShaderConstantF(This, StartRegister, pConstantData, Vector4fCount);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT Vector4fCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetPixelShaderConstantF(This, StartRegister, pConstantData, Vector4fCount);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT Vector4iCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetPixelShaderConstantI(This, StartRegister, pConstantData, Vector4iCount);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT Vector4iCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetPixelShaderConstantI(This, StartRegister, pConstantData, Vector4iCount);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT BoolCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetPixelShaderConstantB(This, StartRegister, pConstantData, BoolCount);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT BoolCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetPixelShaderConstantB(This, StartRegister, pConstantData, BoolCount);
pipe_mutex_unlock(d3dlock_global);
return r;
const D3DRECTPATCH_INFO *pRectPatchInfo )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_DrawRectPatch(This, Handle, pNumSegs, pRectPatchInfo);
pipe_mutex_unlock(d3dlock_global);
return r;
const D3DTRIPATCH_INFO *pTriPatchInfo )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_DrawTriPatch(This, Handle, pNumSegs, pTriPatchInfo);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT Handle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_DeletePatch(This, Handle);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DQuery9 **ppQuery )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateQuery(This, Type, ppQuery);
pipe_mutex_unlock(d3dlock_global);
return r;
float *columns )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_SetConvolutionMonoKernel(This, width, height, rows, columns);
pipe_mutex_unlock(d3dlock_global);
return r;
int Yoffset )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_ComposeRects(This, pSrc, pDst, pSrcRectDescs, NumRects, pDstRectDescs, Operation, Xoffset, Yoffset);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD dwFlags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_PresentEx(This, pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion, dwFlags);
pipe_mutex_unlock(d3dlock_global);
return r;
INT *pPriority )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_GetGPUThreadPriority(This, pPriority);
pipe_mutex_unlock(d3dlock_global);
return r;
INT Priority )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_SetGPUThreadPriority(This, Priority);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT iSwapChain )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_WaitForVBlank(This, iSwapChain);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT32 NumResources )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_CheckResourceResidency(This, pResourceArray, NumResources);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT MaxLatency )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_SetMaximumFrameLatency(This, MaxLatency);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT *pMaxLatency )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_GetMaximumFrameLatency(This, pMaxLatency);
pipe_mutex_unlock(d3dlock_global);
return r;
HWND hDestinationWindow )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_CheckDeviceState(This, hDestinationWindow);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Usage )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_CreateRenderTargetEx(This, Width, Height, Format, MultiSample, MultisampleQuality, Lockable, ppSurface, pSharedHandle, Usage);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Usage )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_CreateOffscreenPlainSurfaceEx(This, Width, Height, Format, Pool, ppSurface, pSharedHandle, Usage);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Usage )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_CreateDepthStencilSurfaceEx(This, Width, Height, Format, MultiSample, MultisampleQuality, Discard, ppSurface, pSharedHandle, Usage);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DDISPLAYMODEEX *pFullscreenDisplayMode )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_ResetEx(This, pPresentationParameters, pFullscreenDisplayMode);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DDISPLAYROTATION *pRotation )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_GetDisplayModeEx(This, iSwapChain, pMode, pRotation);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DCONTENTPROTECTIONCAPS *pCaps )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Video_GetContentProtectionCaps(This, pCryptoType, pDecodeProfile, pCaps);
pipe_mutex_unlock(d3dlock_global);
return r;
HANDLE *pChannelHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Video_CreateAuthenticatedChannel(This, ChannelType, ppAuthenticatedChannel, pChannelHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
HANDLE *pCryptoHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Video_CreateCryptoSession(This, pCryptoType, pDecodeProfile, ppCryptoSession, pCryptoHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineIndexBuffer9_Lock(This, OffsetToLock, SizeToLock, ppbData, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
LockIndexBuffer9_Unlock( struct NineIndexBuffer9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineIndexBuffer9_Unlock(This);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DINDEXBUFFER_DESC *pDesc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineIndexBuffer9_GetDesc(This, pDesc);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT *pSizeOfData )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NinePixelShader9_GetFunction(This, pData, pSizeOfData);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
LockQuery9_GetType( struct NineQuery9 *This )
{
D3DQUERYTYPE r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineQuery9_GetType(This);
pipe_mutex_unlock(d3dlock_global);
return r;
LockQuery9_GetDataSize( struct NineQuery9 *This )
{
DWORD r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineQuery9_GetDataSize(This);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD dwIssueFlags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineQuery9_Issue(This, dwIssueFlags);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD dwGetDataFlags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineQuery9_GetData(This, pData, dwSize, dwGetDataFlags);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
LockStateBlock9_Capture( struct NineStateBlock9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineStateBlock9_Capture(This);
pipe_mutex_unlock(d3dlock_global);
return r;
LockStateBlock9_Apply( struct NineStateBlock9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineStateBlock9_Apply(This);
pipe_mutex_unlock(d3dlock_global);
return r;
void **ppContainer )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSurface9_GetContainer(This, riid, ppContainer);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DSURFACE_DESC *pDesc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSurface9_GetDesc(This, pDesc);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSurface9_LockRect(This, pLockedRect, pRect, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
LockSurface9_UnlockRect( struct NineSurface9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSurface9_UnlockRect(This);
pipe_mutex_unlock(d3dlock_global);
return r;
HDC *phdc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSurface9_GetDC(This, phdc);
pipe_mutex_unlock(d3dlock_global);
return r;
HDC hdc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSurface9_ReleaseDC(This, hdc);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD dwFlags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9_Present(This, pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion, dwFlags);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DSurface9 *pDestSurface )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9_GetFrontBufferData(This, pDestSurface);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DSurface9 **ppBackBuffer )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9_GetBackBuffer(This, iBackBuffer, Type, ppBackBuffer);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DRASTER_STATUS *pRasterStatus )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9_GetRasterStatus(This, pRasterStatus);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DDISPLAYMODE *pMode )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9_GetDisplayMode(This, pMode);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DPRESENT_PARAMETERS *pPresentationParameters )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9_GetPresentParameters(This, pPresentationParameters);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT *pLastPresentCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9Ex_GetLastPresentCount(This, pLastPresentCount);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DPRESENTSTATS *pPresentationStatistics )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9Ex_GetPresentStats(This, pPresentationStatistics);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DDISPLAYROTATION *pRotation )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9Ex_GetDisplayModeEx(This, pMode, pRotation);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DSURFACE_DESC *pDesc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineTexture9_GetLevelDesc(This, Level, pDesc);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DSurface9 **ppSurfaceLevel )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineTexture9_GetSurfaceLevel(This, Level, ppSurfaceLevel);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineTexture9_LockRect(This, Level, pLockedRect, pRect, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT Level )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineTexture9_UnlockRect(This, Level);
pipe_mutex_unlock(d3dlock_global);
return r;
const RECT *pDirtyRect )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineTexture9_AddDirtyRect(This, pDirtyRect);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVertexBuffer9_Lock(This, OffsetToLock, SizeToLock, ppbData, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
LockVertexBuffer9_Unlock( struct NineVertexBuffer9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVertexBuffer9_Unlock(This);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DVERTEXBUFFER_DESC *pDesc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVertexBuffer9_GetDesc(This, pDesc);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT *pNumElements )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVertexDeclaration9_GetDeclaration(This, pElement, pNumElements);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT *pSizeOfData )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVertexShader9_GetFunction(This, pData, pSizeOfData);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
void **ppContainer )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolume9_GetContainer(This, riid, ppContainer);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DVOLUME_DESC *pDesc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolume9_GetDesc(This, pDesc);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolume9_LockBox(This, pLockedVolume, pBox, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
LockVolume9_UnlockBox( struct NineVolume9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolume9_UnlockBox(This);
pipe_mutex_unlock(d3dlock_global);
return r;
D3DVOLUME_DESC *pDesc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolumeTexture9_GetLevelDesc(This, Level, pDesc);
pipe_mutex_unlock(d3dlock_global);
return r;
IDirect3DVolume9 **ppVolumeLevel )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolumeTexture9_GetVolumeLevel(This, Level, ppVolumeLevel);
pipe_mutex_unlock(d3dlock_global);
return r;
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolumeTexture9_LockBox(This, Level, pLockedVolume, pBox, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
UINT Level )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolumeTexture9_UnlockBox(This, Level);
pipe_mutex_unlock(d3dlock_global);
return r;
const D3DBOX *pDirtyBox )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolumeTexture9_AddDirtyBox(This, pDirtyBox);
pipe_mutex_unlock(d3dlock_global);
return r;
struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->tail];
/* wait for cmdbuf full */
- pipe_mutex_lock(ctx->mutex_push);
+ mtx_lock(&ctx->mutex_push);
while (!cmdbuf->full)
{
DBG("waiting for full cmdbuf\n");
if (ctx->cur_instr == cmdbuf->num_instr) {
/* signal waiting producer */
- pipe_mutex_lock(ctx->mutex_pop);
+ mtx_lock(&ctx->mutex_pop);
DBG("freeing cmdbuf=%p\n", cmdbuf);
cmdbuf->full = 0;
cnd_signal(&ctx->event_pop);
return;
/* signal waiting worker */
- pipe_mutex_lock(ctx->mutex_push);
+ mtx_lock(&ctx->mutex_push);
cmdbuf->full = 1;
cnd_signal(&ctx->event_push);
pipe_mutex_unlock(ctx->mutex_push);
cmdbuf = &ctx->pool[ctx->head];
/* wait for queue empty */
- pipe_mutex_lock(ctx->mutex_pop);
+ mtx_lock(&ctx->mutex_pop);
while (cmdbuf->full)
{
DBG("waiting for empty cmdbuf\n");
static void
nine_csmt_wait_processed(struct csmt_context *ctx)
{
- pipe_mutex_lock(ctx->mutex_processed);
+ mtx_lock(&ctx->mutex_processed);
while (!p_atomic_read(&ctx->processed)) {
cnd_wait(&ctx->event_processed, &ctx->mutex_processed);
}
while (1) {
nine_queue_wait_flush(ctx->pool);
- pipe_mutex_lock(ctx->thread_running);
+ mtx_lock(&ctx->thread_running);
/* Get instruction. NULL on empty cmdbuf. */
while (!p_atomic_read(&ctx->terminate) &&
/* decode */
if (instr->func(ctx->device, instr)) {
- pipe_mutex_lock(ctx->mutex_processed);
+ mtx_lock(&ctx->mutex_processed);
p_atomic_set(&ctx->processed, TRUE);
cnd_signal(&ctx->event_processed);
pipe_mutex_unlock(ctx->mutex_processed);
if (p_atomic_read(&ctx->toPause)) {
pipe_mutex_unlock(ctx->thread_running);
/* will wait here the thread can be resumed */
- pipe_mutex_lock(ctx->thread_resume);
- pipe_mutex_lock(ctx->thread_running);
+ mtx_lock(&ctx->thread_resume);
+ mtx_lock(&ctx->thread_running);
pipe_mutex_unlock(ctx->thread_resume);
}
}
pipe_mutex_unlock(ctx->thread_running);
if (p_atomic_read(&ctx->terminate)) {
- pipe_mutex_lock(ctx->mutex_processed);
+ mtx_lock(&ctx->mutex_processed);
p_atomic_set(&ctx->processed, TRUE);
cnd_signal(&ctx->event_processed);
pipe_mutex_unlock(ctx->mutex_processed);
if (nine_queue_no_flushed_work(ctx->pool))
return;
- pipe_mutex_lock(ctx->thread_resume);
+ mtx_lock(&ctx->thread_resume);
p_atomic_set(&ctx->toPause, TRUE);
/* Wait the thread is paused */
- pipe_mutex_lock(ctx->thread_running);
+ mtx_lock(&ctx->thread_running);
ctx->hasPaused = TRUE;
p_atomic_set(&ctx->toPause, FALSE);
}
struct vl_screen *omx_get_screen(void)
{
static bool first_time = true;
- pipe_mutex_lock(omx_lock);
+ mtx_lock(&omx_lock);
if (!omx_screen) {
if (first_time) {
void omx_put_screen(void)
{
- pipe_mutex_lock(omx_lock);
+ mtx_lock(&omx_lock);
if ((--omx_usecount) == 0) {
omx_screen->destroy(omx_screen);
omx_screen = NULL;
memcpy(buf->data, data, size * num_elements);
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
*buf_id = handle_table_add(drv->htab, buf);
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
if (!buf)
if (!pbuff)
return VA_STATUS_ERROR_INVALID_PARAMETER;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
if (!buf || buf->export_refcount > 0) {
pipe_mutex_unlock(drv->mutex);
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
if (!buf || buf->export_refcount > 0) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
if (!buf) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
if (!buf)
drv = VL_VA_DRIVER(ctx);
screen = VL_VA_PSCREEN(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
buf = handle_table_get(VL_VA_DRIVER(ctx)->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
case VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME: {
struct winsys_handle whandle;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
drv->pipe->flush(drv->pipe, NULL, 0);
memset(&whandle, 0, sizeof(whandle));
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
if (!config->rt_format)
config->rt_format = VA_RT_FORMAT_YUV420 | VA_RT_FORMAT_RGB32;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
*config_id = handle_table_add(drv->htab, config);
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_SUCCESS;
if (!config->rt_format)
config->rt_format = VA_RT_FORMAT_YUV420;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
*config_id = handle_table_add(drv->htab, config);
pipe_mutex_unlock(drv->mutex);
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
if (!config)
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
pipe_mutex_unlock(drv->mutex);
if (config->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE)
context->desc.h264enc.rate_ctrl.rate_ctrl_method = config->rc;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
*context_id = handle_table_add(drv->htab, context);
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
pipe_mutex_unlock(drv->mutex);
img = CALLOC(1, sizeof(VAImage));
if (!img)
return VA_STATUS_ERROR_ALLOCATION_FAILED;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
img->image_id = handle_table_add(drv->htab, img);
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
img->image_id = handle_table_add(drv->htab, img);
img_buf->type = VAImageBufferType;
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
vaimage = handle_table_get(drv->htab, image);
if (!vaimage) {
pipe_mutex_unlock(drv->mutex);
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, surface);
if (!surf || !surf->buffer) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, surface);
if (!surf || !surf->buffer) {
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
pipe_mutex_unlock(drv->mutex);
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
pipe_mutex_unlock(drv->mutex);
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
pipe_mutex_unlock(drv->mutex);
if (!context)
return VA_STATUS_SUCCESS;
}
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, context->target_id);
context->mpeg4.frame_num++;
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
img = handle_table_get(drv->htab, image);
if (!img) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
sub = handle_table_get(drv->htab, subpicture);
if (!sub) {
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
img = handle_table_get(drv->htab, image);
if (!img) {
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
sub = handle_table_get(drv->htab, subpicture);
if (!sub) {
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
sub = handle_table_get(drv->htab, subpicture);
if (!sub) {
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
for (i = 0; i < num_surfaces; ++i) {
vlVaSurface *surf = handle_table_get(drv->htab, surface_list[i]);
if (!surf) {
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, render_target);
if (!surf || !surf->buffer) {
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, surface_id);
if (!surf) {
pipe_mutex_unlock(drv->mutex);
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
pipe_mutex_unlock(drv->mutex);
memset(surfaces, VA_INVALID_ID, num_surfaces * sizeof(VASurfaceID));
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
for (i = 0; i < num_surfaces; i++) {
vlVaSurface *surf = CALLOC(1, sizeof(vlVaSurface));
if (!surf)
res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
res_tmpl.usage = frequently_accessed ? PIPE_USAGE_DYNAMIC : PIPE_USAGE_DEFAULT;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
if (!CheckSurfaceParams(pipe->screen, &res_tmpl)) {
ret = VDP_STATUS_RESOURCES;
*surface = vlAddDataHTAB(vlsurface);
if (*surface == 0) {
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
ret = VDP_STATUS_ERROR;
goto err_sampler;
}
if (!vlsurface)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
pipe_mutex_unlock(vlsurface->device->mutex);
pipe = vlsurface->device->context;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
dst_box = RectToPipeBox(destination_rect, vlsurface->sampler_view->texture);
pipe->texture_subdata(pipe, vlsurface->sampler_view->texture, 0,
pipe = dev->context;
screen = dev->vscreen->pscreen;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
supported = screen->get_video_param
(
if (!vldecoder)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(vldecoder->mutex);
+ mtx_lock(&vldecoder->mutex);
vldecoder->decoder->destroy(vldecoder->decoder);
pipe_mutex_unlock(vldecoder->mutex);
mtx_destroy(&vldecoder->mutex);
dec->profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM) ||
!buffer_support[vlsurf->video_buffer->interlaced]) {
- pipe_mutex_lock(vlsurf->device->mutex);
+ mtx_lock(&vlsurf->device->mutex);
/* destroy the old one */
if (vlsurf->video_buffer)
if (ret != VDP_STATUS_OK)
return ret;
- pipe_mutex_lock(vldecoder->mutex);
+ mtx_lock(&vldecoder->mutex);
dec->begin_frame(dec, vlsurf->video_buffer, &desc.base);
dec->decode_bitstream(dec, vlsurf->video_buffer, &desc.base, bitstream_buffer_count, buffers, sizes);
dec->end_frame(dec, vlsurf->video_buffer, &desc.base);
/* Make sure handle table handles match VDPAU handles. */
assert(sizeof(unsigned) <= sizeof(vlHandle));
- pipe_mutex_lock(htab_lock);
+ mtx_lock(&htab_lock);
if (!htab)
htab = handle_table_create();
ret = htab != NULL;
void vlDestroyHTAB(void)
{
- pipe_mutex_lock(htab_lock);
+ mtx_lock(&htab_lock);
if (htab && !handle_table_get_first_handle(htab)) {
handle_table_destroy(htab);
htab = NULL;
vlHandle handle = 0;
assert(data);
- pipe_mutex_lock(htab_lock);
+ mtx_lock(&htab_lock);
if (htab)
handle = handle_table_add(htab, data);
pipe_mutex_unlock(htab_lock);
void *data = NULL;
assert(handle);
- pipe_mutex_lock(htab_lock);
+ mtx_lock(&htab_lock);
if (htab)
data = handle_table_get(htab, handle);
pipe_mutex_unlock(htab_lock);
void vlRemoveDataHTAB(vlHandle handle)
{
- pipe_mutex_lock(htab_lock);
+ mtx_lock(&htab_lock);
if (htab)
handle_table_remove(htab, handle);
pipe_mutex_unlock(htab_lock);
DeviceReference(&vmixer->device, dev);
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
if (!vl_compositor_init_state(&vmixer->cstate, dev->context)) {
ret = VDP_STATUS_ERROR;
if (!vmixer)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(vmixer->device->mutex);
+ mtx_lock(&vmixer->device->mutex);
vlRemoveDataHTAB(mixer);
return VDP_STATUS_INVALID_HANDLE;
}
- pipe_mutex_lock(vmixer->device->mutex);
+ mtx_lock(&vmixer->device->mutex);
vl_compositor_clear_layers(&vmixer->cstate);
if (!vmixer)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(vmixer->device->mutex);
+ mtx_lock(&vmixer->device->mutex);
for (i = 0; i < feature_count; ++i) {
switch (features[i]) {
/* they are valid, but we doesn't support them */
if (!vmixer)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(vmixer->device->mutex);
+ mtx_lock(&vmixer->device->mutex);
for (i = 0; i < attribute_count; ++i) {
switch (attributes[i]) {
case VDP_VIDEO_MIXER_ATTRIBUTE_BACKGROUND_COLOR:
if (!vmixer)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(vmixer->device->mutex);
+ mtx_lock(&vmixer->device->mutex);
for (i = 0; i < attribute_count; ++i) {
switch (attributes[i]) {
case VDP_VIDEO_MIXER_ATTRIBUTE_BACKGROUND_COLOR:
PIPE_BIND_SHARED | PIPE_BIND_SCANOUT;
res_tmpl.usage = PIPE_USAGE_DEFAULT;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
if (!CheckSurfaceParams(pipe->screen, &res_tmpl))
goto err_unlock;
pipe = vlsurface->device->context;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
pipe_surface_reference(&vlsurface->surface, NULL);
pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
if (!destination_data || !destination_pitches)
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
res = vlsurface->sampler_view->texture;
box = RectToPipeBox(source_rect, res);
if (!source_data || !source_pitches)
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
dst_box = RectToPipeBox(destination_rect, vlsurface->sampler_view->texture);
res_tmpl.usage = PIPE_USAGE_STAGING;
res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
if (!CheckSurfaceParams(context->screen, &res_tmpl))
goto error_resource;
if (!source_data || !source_pitches)
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
memset(&vtmpl, 0, sizeof(vtmpl));
vtmpl.buffer_format = format;
vtmpl.chroma_format = FormatYCBCRToPipeChroma(source_ycbcr_format);
src_sv = src_vlsurface->sampler_view;
}
- pipe_mutex_lock(dst_vlsurface->device->mutex);
+ mtx_lock(&dst_vlsurface->device->mutex);
context = dst_vlsurface->device->context;
compositor = &dst_vlsurface->device->compositor;
compositor = &dst_vlsurface->device->compositor;
cstate = &dst_vlsurface->cstate;
- pipe_mutex_lock(dst_vlsurface->device->mutex);
+ mtx_lock(&dst_vlsurface->device->mutex);
blend = BlenderToPipe(context, blend_state);
if (!vlsurface || !vlsurface->surface)
return NULL;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
vlsurface->device->context->flush(vlsurface->device->context, NULL, 0);
pipe_mutex_unlock(vlsurface->device->mutex);
if (!vlsurface || !vlsurface->surface)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
vlsurface->device->context->flush(vlsurface->device->context, NULL, 0);
memset(&whandle, 0, sizeof(struct winsys_handle));
DeviceReference(&pq->device, dev);
pq->drawable = pqt->drawable;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
if (!vl_compositor_init_state(&pq->cstate, dev->context)) {
pipe_mutex_unlock(dev->mutex);
ret = VDP_STATUS_ERROR;
if (!pq)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(pq->device->mutex);
+ mtx_lock(&pq->device->mutex);
vl_compositor_cleanup_state(&pq->cstate);
pipe_mutex_unlock(pq->device->mutex);
color.f[2] = background_color->blue;
color.f[3] = background_color->alpha;
- pipe_mutex_lock(pq->device->mutex);
+ mtx_lock(&pq->device->mutex);
vl_compositor_set_clear_color(&pq->cstate, &color);
pipe_mutex_unlock(pq->device->mutex);
if (!pq)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(pq->device->mutex);
+ mtx_lock(&pq->device->mutex);
vl_compositor_get_clear_color(&pq->cstate, &color);
pipe_mutex_unlock(pq->device->mutex);
if (!pq)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(pq->device->mutex);
+ mtx_lock(&pq->device->mutex);
*current_time = pq->device->vscreen->get_timestamp(pq->device->vscreen,
(void *)pq->drawable);
pipe_mutex_unlock(pq->device->mutex);
cstate = &pq->cstate;
vscreen = pq->device->vscreen;
- pipe_mutex_lock(pq->device->mutex);
+ mtx_lock(&pq->device->mutex);
if (vscreen->set_back_texture_from_output && surf->send_to_X)
vscreen->set_back_texture_from_output(vscreen, surf->surface->texture, clip_width, clip_height);
tex = vscreen->texture_from_drawable(vscreen, (void *)pq->drawable);
if (!surf)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(pq->device->mutex);
+ mtx_lock(&pq->device->mutex);
if (surf->fence) {
screen = pq->device->vscreen->pscreen;
screen->fence_finish(screen, NULL, surf->fence, PIPE_TIMEOUT_INFINITE);
else
*status = VDP_PRESENTATION_QUEUE_STATUS_IDLE;
} else {
- pipe_mutex_lock(pq->device->mutex);
+ mtx_lock(&pq->device->mutex);
screen = pq->device->vscreen->pscreen;
if (screen->fence_finish(screen, NULL, surf->fence, 0)) {
screen->fence_reference(screen, &surf->fence, NULL);
if (!pscreen)
return VDP_STATUS_RESOURCES;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
/* XXX: Current limits */
*is_supported = true;
if (!pscreen)
return VDP_STATUS_RESOURCES;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
switch(bits_ycbcr_format) {
case VDP_YCBCR_FORMAT_NV12:
return VDP_STATUS_OK;
}
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
*is_supported = pscreen->get_video_param(pscreen, p_profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CAP_SUPPORTED);
if (*is_supported) {
if (!(is_supported && max_width && max_height))
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, format, PIPE_TEXTURE_3D, 1,
if (!is_supported)
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, format, PIPE_TEXTURE_2D, 1,
if (!is_supported)
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, rgba_format, PIPE_TEXTURE_2D, 1,
if (!is_supported)
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, rgba_format, PIPE_TEXTURE_2D, 1,
if (!(is_supported && max_width && max_height))
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, format, PIPE_TEXTURE_3D, 1,
if (!(min_value && max_value))
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
screen = dev->vscreen->pscreen;
switch (parameter) {
case VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH:
DeviceReference(&p_surf->device, dev);
pipe = dev->context;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
memset(&p_surf->templat, 0, sizeof(p_surf->templat));
p_surf->templat.buffer_format = pipe->screen->get_video_param
(
if (!p_surf)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(p_surf->device->mutex);
+ mtx_lock(&p_surf->device->mutex);
if (p_surf->video_buffer)
p_surf->video_buffer->destroy(p_surf->video_buffer);
pipe_mutex_unlock(p_surf->device->mutex);
return VDP_STATUS_NO_IMPLEMENTATION;
}
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
sampler_views = vlsurface->video_buffer->get_sampler_view_planes(vlsurface->video_buffer);
if (!sampler_views) {
pipe_mutex_unlock(vlsurface->device->mutex);
if (!source_data || !source_pitches)
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(p_surf->device->mutex);
+ mtx_lock(&p_surf->device->mutex);
if (p_surf->video_buffer == NULL ||
((pformat != p_surf->video_buffer->buffer_format))) {
if (!p_surf)
return NULL;
- pipe_mutex_lock(p_surf->device->mutex);
+ mtx_lock(&p_surf->device->mutex);
if (p_surf->video_buffer == NULL) {
struct pipe_context *pipe = p_surf->device->context;
memset(result, 0, sizeof(*result));
result->handle = -1;
- pipe_mutex_lock(p_surf->device->mutex);
+ mtx_lock(&p_surf->device->mutex);
if (p_surf->video_buffer == NULL) {
struct pipe_context *pipe = p_surf->device->context;
GalliumContext::Lock()
{
CALLED();
- pipe_mutex_lock(fMutex);
+ mtx_lock(&fMutex);
}
unsigned idle_fences;
bool buffer_idle;
- pipe_mutex_lock(ws->bo_fence_lock);
+ mtx_lock(&ws->bo_fence_lock);
for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
} else {
bool buffer_idle = true;
- pipe_mutex_lock(ws->bo_fence_lock);
+ mtx_lock(&ws->bo_fence_lock);
while (bo->num_fences && buffer_idle) {
struct pipe_fence_handle *fence = NULL;
bool fence_idle = false;
fence_idle = true;
else
buffer_idle = false;
- pipe_mutex_lock(ws->bo_fence_lock);
+ mtx_lock(&ws->bo_fence_lock);
/* Release an idle fence to avoid checking it again later, keeping in
* mind that the fence array may have been modified by other threads.
assert(bo->bo && "must not be called for slab entries");
- pipe_mutex_lock(bo->ws->global_bo_list_lock);
+ mtx_lock(&bo->ws->global_bo_list_lock);
LIST_DEL(&bo->u.real.global_list_item);
bo->ws->num_buffers--;
pipe_mutex_unlock(bo->ws->global_bo_list_lock);
assert(bo->bo);
- pipe_mutex_lock(ws->global_bo_list_lock);
+ mtx_lock(&ws->global_bo_list_lock);
LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
ws->num_buffers++;
pipe_mutex_unlock(ws->global_bo_list_lock);
amdgpu_bo_handle *handles;
unsigned num = 0;
- pipe_mutex_lock(ws->global_bo_list_lock);
+ mtx_lock(&ws->global_bo_list_lock);
handles = malloc(sizeof(handles[0]) * ws->num_buffers);
if (!handles) {
* that the order of fence dependency updates matches the order of
* submissions.
*/
- pipe_mutex_lock(ws->bo_fence_lock);
+ mtx_lock(&ws->bo_fence_lock);
amdgpu_add_fence_dependencies(cs);
/* Swap command streams. "cst" is going to be submitted. */
* This must happen while the mutex is locked, so that
* amdgpu_winsys_create in another thread doesn't get the winsys
* from the table when the counter drops to 0. */
- pipe_mutex_lock(dev_tab_mutex);
+ mtx_lock(&dev_tab_mutex);
destroy = pipe_reference(&ws->reference, NULL);
if (destroy && dev_tab)
drmFreeVersion(version);
/* Look up the winsys from the dev table. */
- pipe_mutex_lock(dev_tab_mutex);
+ mtx_lock(&dev_tab_mutex);
if (!dev_tab)
dev_tab = util_hash_table_create(hash_dev, compare_dev);
struct etna_screen *screen = etna_screen(pscreen);
boolean destroy;
- pipe_mutex_lock(etna_screen_mutex);
+ mtx_lock(&etna_screen_mutex);
destroy = --screen->refcnt == 0;
if (destroy) {
int fd = etna_device_fd(screen->dev);
{
struct pipe_screen *pscreen = NULL;
- pipe_mutex_lock(etna_screen_mutex);
+ mtx_lock(&etna_screen_mutex);
if (!etna_tab) {
etna_tab = util_hash_table_create(hash_fd, compare_fd);
if (!etna_tab)
struct fd_screen *screen = fd_screen(pscreen);
boolean destroy;
- pipe_mutex_lock(fd_screen_mutex);
+ mtx_lock(&fd_screen_mutex);
destroy = --screen->refcnt == 0;
if (destroy) {
int fd = fd_device_fd(screen->dev);
{
struct pipe_screen *pscreen = NULL;
- pipe_mutex_lock(fd_screen_mutex);
+ mtx_lock(&fd_screen_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
if (!fd_tab)
if (screen->refcount == -1)
return true;
- pipe_mutex_lock(nouveau_screen_mutex);
+ mtx_lock(&nouveau_screen_mutex);
ret = --screen->refcount;
assert(ret >= 0);
if (ret == 0)
struct nouveau_screen *screen = NULL;
int ret, dupfd;
- pipe_mutex_lock(nouveau_screen_mutex);
+ mtx_lock(&nouveau_screen_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
if (!fd_tab) {
if (bo->handle)
return radeon_real_bo_is_busy(bo);
- pipe_mutex_lock(bo->rws->bo_fence_lock);
+ mtx_lock(&bo->rws->bo_fence_lock);
for (num_idle = 0; num_idle < bo->u.slab.num_fences; ++num_idle) {
if (radeon_real_bo_is_busy(bo->u.slab.fences[num_idle])) {
busy = true;
if (bo->handle) {
radeon_real_bo_wait_idle(bo);
} else {
- pipe_mutex_lock(bo->rws->bo_fence_lock);
+ mtx_lock(&bo->rws->bo_fence_lock);
while (bo->u.slab.num_fences) {
struct radeon_bo *fence = NULL;
radeon_bo_reference(&fence, bo->u.slab.fences[0]);
/* Wait without holding the fence lock. */
radeon_real_bo_wait_idle(fence);
- pipe_mutex_lock(bo->rws->bo_fence_lock);
+ mtx_lock(&bo->rws->bo_fence_lock);
if (bo->u.slab.num_fences && fence == bo->u.slab.fences[0]) {
radeon_bo_reference(&bo->u.slab.fences[0], NULL);
memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[1],
*/
size = align(size, rws->info.gart_page_size);
- pipe_mutex_lock(rws->bo_va_mutex);
+ mtx_lock(&rws->bo_va_mutex);
/* first look for a hole */
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &rws->va_holes, list) {
offset = hole->offset;
size = align(size, rws->info.gart_page_size);
- pipe_mutex_lock(rws->bo_va_mutex);
+ mtx_lock(&rws->bo_va_mutex);
if ((va + size) == rws->va_offset) {
rws->va_offset = va;
/* Delete uppermost hole if it reaches the new top */
memset(&args, 0, sizeof(args));
- pipe_mutex_lock(rws->bo_handles_mutex);
+ mtx_lock(&rws->bo_handles_mutex);
util_hash_table_remove(rws->bo_handles, (void*)(uintptr_t)bo->handle);
if (bo->flink_name) {
util_hash_table_remove(rws->bo_names,
}
/* Map the buffer. */
- pipe_mutex_lock(bo->u.real.map_mutex);
+ mtx_lock(&bo->u.real.map_mutex);
/* Return the pointer if it's already mapped. */
if (bo->u.real.ptr) {
bo->u.real.map_count++;
if (!bo->handle)
bo = bo->u.slab.real;
- pipe_mutex_lock(bo->u.real.map_mutex);
+ mtx_lock(&bo->u.real.map_mutex);
if (!bo->u.real.ptr) {
pipe_mutex_unlock(bo->u.real.map_mutex);
return; /* it's not been mapped */
radeon_bo_destroy(&bo->base);
return NULL;
}
- pipe_mutex_lock(rws->bo_handles_mutex);
+ mtx_lock(&rws->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
bo->u.real.use_reusable_pool = true;
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
pipe_mutex_unlock(ws->bo_handles_mutex);
assert(args.handle != 0);
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
/* Initialize it. */
pipe_reference_init(&bo->base.reference, 1);
radeon_bo_destroy(&bo->base);
return NULL;
}
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
* we would hit a deadlock in the kernel.
*
* The list of pairs is guarded by a mutex, of course. */
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
/* First check if there already is an existing bo for the handle. */
radeon_bo_destroy(&bo->base);
return NULL;
}
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
bo->flink_name = flink.name;
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
pipe_mutex_unlock(ws->bo_handles_mutex);
}
if (pfence)
radeon_fence_reference(pfence, fence);
- pipe_mutex_lock(cs->ws->bo_fence_lock);
+ mtx_lock(&cs->ws->bo_fence_lock);
for (unsigned i = 0; i < cs->csc->num_slab_buffers; ++i) {
struct radeon_bo *bo = cs->csc->slab_buffers[i].bo;
p_atomic_inc(&bo->num_active_ioctls);
memset(&info, 0, sizeof(info));
- pipe_mutex_lock(*mutex);
+ mtx_lock(&*mutex);
/* Early exit if we are sure the request will fail. */
if (enable) {
* This must happen while the mutex is locked, so that
* radeon_drm_winsys_create in another thread doesn't get the winsys
* from the table when the counter drops to 0. */
- pipe_mutex_lock(fd_tab_mutex);
+ mtx_lock(&fd_tab_mutex);
destroy = pipe_reference(&rws->reference, NULL);
if (destroy && fd_tab)
{
struct radeon_drm_winsys *ws;
- pipe_mutex_lock(fd_tab_mutex);
+ mtx_lock(&fd_tab_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
}
finished = ops->fence_finish(ops, fenced_buf->fence, 0);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(!pipe_is_referenced(&fenced_buf->base.reference));
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
struct pb_fence_ops *ops = fenced_mgr->ops;
void *map = NULL;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(!(flags & PB_USAGE_GPU_READ_WRITE));
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(fenced_buf->mapcount);
if(fenced_buf->mapcount) {
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
enum pipe_error ret;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
if(!vl) {
/* invalidate */
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
struct pb_fence_ops *ops = fenced_mgr->ops;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(fenced_buf->buffer);
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(fenced_buf->buffer);
fenced_buf->base.vtbl = &fenced_buffer_vtbl;
fenced_buf->mgr = fenced_mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/*
* Try to create GPU storage without stalling,
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
pipe_mutex_unlock(fenced_mgr->mutex);
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/* Wait on outstanding fences */
while (fenced_mgr->num_fenced) {
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
sched_yield();
#endif
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
}
* Make sure backup buffer ends up fenced.
*/
- pipe_mutex_lock(vsurf->mutex);
+ mtx_lock(&vsurf->mutex);
assert(vsurf->buf != NULL);
vmw_swc_mob_relocation(swc, mobid, NULL, (struct svga_winsys_buffer *)
{
struct vmw_fence *fence, *n;
- pipe_mutex_lock(ops->mutex);
+ mtx_lock(&ops->mutex);
LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list)
LIST_DELINIT(&fence->ops_list);
pipe_mutex_unlock(ops->mutex);
return;
ops = vmw_fence_ops(fence_ops);
- pipe_mutex_lock(ops->mutex);
+ mtx_lock(&ops->mutex);
if (!has_emitted) {
emitted = ops->last_emitted;
fence->mask = mask;
fence->seqno = seqno;
p_atomic_set(&fence->signalled, 0);
- pipe_mutex_lock(ops->mutex);
+ mtx_lock(&ops->mutex);
if (vmw_fence_seq_is_signaled(seqno, ops->last_signaled, seqno)) {
p_atomic_set(&fence->signalled, 1);
vmw_ioctl_fence_unref(vws, vfence->handle);
- pipe_mutex_lock(ops->mutex);
+ mtx_lock(&ops->mutex);
LIST_DELINIT(&vfence->ops_list);
pipe_mutex_unlock(ops->mutex);
*retry = FALSE;
assert((flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE)) != 0);
- pipe_mutex_lock(vsrf->mutex);
+ mtx_lock(&vsrf->mutex);
if (vsrf->mapcount) {
/*
boolean *rebind)
{
struct vmw_svga_winsys_surface *vsrf = vmw_svga_winsys_surface(srf);
- pipe_mutex_lock(vsrf->mutex);
+ mtx_lock(&vsrf->mutex);
if (--vsrf->mapcount == 0) {
*rebind = vsrf->rebind;
vsrf->rebind = FALSE;
struct drm_gem_close args;
if (res->flinked) {
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_remove(qdws->bo_names,
(void *)(uintptr_t)res->flink);
pipe_mutex_unlock(qdws->bo_handles_mutex);
}
if (res->bo_handle) {
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_remove(qdws->bo_handles,
(void *)(uintptr_t)res->bo_handle);
pipe_mutex_unlock(qdws->bo_handles_mutex);
struct list_head *curr, *next;
struct virgl_hw_res *res;
- pipe_mutex_lock(qdws->mutex);
+ mtx_lock(&qdws->mutex);
curr = qdws->delayed.next;
next = curr->next;
if (!can_cache_resource(old)) {
virgl_hw_res_destroy(qdws, old);
} else {
- pipe_mutex_lock(qdws->mutex);
+ mtx_lock(&qdws->mutex);
virgl_cache_list_check_free(qdws);
old->start = os_time_get();
bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
goto alloc;
- pipe_mutex_lock(qdws->mutex);
+ mtx_lock(&qdws->mutex);
res = NULL;
curr = qdws->delayed.next;
return NULL;
}
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
res->flinked = TRUE;
res->flink = flink.name;
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink, res);
pipe_mutex_unlock(qdws->bo_handles_mutex);
}
} else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
return FALSE;
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
pipe_mutex_unlock(qdws->bo_handles_mutex);
}
struct virgl_screen *screen = virgl_screen(pscreen);
boolean destroy;
- pipe_mutex_lock(virgl_screen_mutex);
+ mtx_lock(&virgl_screen_mutex);
destroy = --screen->refcnt == 0;
if (destroy) {
int fd = virgl_drm_winsys(screen->vws)->fd;
{
struct pipe_screen *pscreen = NULL;
- pipe_mutex_lock(virgl_screen_mutex);
+ mtx_lock(&virgl_screen_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
if (!fd_tab)
struct list_head *curr, *next;
struct virgl_hw_res *res;
- pipe_mutex_lock(vtws->mutex);
+ mtx_lock(&vtws->mutex);
curr = vtws->delayed.next;
next = curr->next;
if (!can_cache_resource(old)) {
virgl_hw_res_destroy(vtws, old);
} else {
- pipe_mutex_lock(vtws->mutex);
+ mtx_lock(&vtws->mutex);
virgl_cache_list_check_free(vtws);
old->start = os_time_get();
bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
goto alloc;
- pipe_mutex_lock(vtws->mutex);
+ mtx_lock(&vtws->mutex);
res = NULL;
curr = vtws->delayed.next;