return 0;
}
-/* pipe_mutex
- */
-typedef mtx_t pipe_mutex;
-
#define pipe_static_mutex(mutex) \
- static pipe_mutex mutex = _MTX_INITIALIZER_NP
+ static mtx_t mutex = _MTX_INITIALIZER_NP
#define pipe_mutex_init(mutex) \
(void) mtx_init(&(mutex), mtx_plain)
__pipe_mutex_assert_locked(&(mutex))
static inline void
-__pipe_mutex_assert_locked(pipe_mutex *mutex)
+__pipe_mutex_assert_locked(mtx_t *mutex)
{
#ifdef DEBUG
/* NOTE: this would not work for recursive mutexes, but
- * pipe_mutex doesn't support those
+ * mtx_t doesn't support those
*/
int ret = mtx_trylock(mutex);
assert(ret == thrd_busy);
unsigned count;
unsigned waiters;
uint64_t sequence;
- pipe_mutex mutex;
+ mtx_t mutex;
pipe_condvar condvar;
} pipe_barrier;
typedef struct
{
- pipe_mutex mutex;
+ mtx_t mutex;
pipe_condvar cond;
int counter;
} pipe_semaphore;
/**
* Following members are mutable and protected by this mutex.
*/
- pipe_mutex mutex;
+ mtx_t mutex;
/**
* Fenced buffer list.
struct debug_stack_frame create_backtrace[PB_DEBUG_CREATE_BACKTRACE];
- pipe_mutex mutex;
+ mtx_t mutex;
unsigned map_count;
struct debug_stack_frame map_backtrace[PB_DEBUG_MAP_BACKTRACE];
pb_size underflow_size;
pb_size overflow_size;
- pipe_mutex mutex;
+ mtx_t mutex;
struct list_head list;
};
{
struct pb_manager base;
- pipe_mutex mutex;
+ mtx_t mutex;
pb_size size;
struct mem_block *heap;
{
struct pb_manager base;
- pipe_mutex mutex;
+ mtx_t mutex;
pb_size bufSize;
pb_size bufAlign;
*/
struct list_head slabs;
- pipe_mutex mutex;
+ mtx_t mutex;
};
*/
struct list_head buckets[4];
- pipe_mutex mutex;
+ mtx_t mutex;
uint64_t cache_size;
uint64_t max_cache_size;
unsigned usecs;
*/
struct pb_slabs
{
- pipe_mutex mutex;
+ mtx_t mutex;
unsigned min_order;
unsigned num_orders;
struct debug_flush_buf {
/* Atomic */
struct pipe_reference reference; /* Must be the first member. */
- pipe_mutex mutex;
+ mtx_t mutex;
/* Immutable */
boolean supports_unsync;
unsigned bt_depth;
* Put this into your job structure.
*/
struct util_queue_fence {
- pipe_mutex mutex;
+ mtx_t mutex;
pipe_condvar cond;
int signalled;
};
/* Put this into your context. */
struct util_queue {
const char *name;
- pipe_mutex lock;
+ mtx_t lock;
pipe_condvar has_queued_cond;
pipe_condvar has_space_cond;
pipe_thread *threads;
unsigned end; /* exclusive */
/* for the range to be consistent with multiple contexts: */
- pipe_mutex write_mutex;
+ mtx_t write_mutex;
};
unsigned head;
unsigned tail;
pipe_condvar change;
- pipe_mutex mutex;
+ mtx_t mutex;
};
* the thread dumps the record of the oldest unsignalled fence.
*/
pipe_thread thread;
- pipe_mutex mutex;
+ mtx_t mutex;
int kill_thread;
struct pipe_resource *fence;
struct pipe_transfer *fence_transfer;
struct fd_screen {
struct pipe_screen base;
- pipe_mutex lock;
+ mtx_t lock;
/* it would be tempting to use pipe_reference here, but that
* really doesn't work well if it isn't the first member of
struct pipe_reference reference;
unsigned id;
- pipe_mutex mutex;
+ mtx_t mutex;
pipe_condvar signalled;
boolean issued;
unsigned tiles_x, tiles_y;
int curr_x, curr_y; /**< for iterating over bins */
- pipe_mutex mutex;
+ mtx_t mutex;
struct cmd_bin tile[TILES_X][TILES_Y];
struct data_block_list data;
unsigned timestamp;
struct lp_rasterizer *rast;
- pipe_mutex rast_mutex;
+ mtx_t rast_mutex;
};
struct nv50_tsc_entry sampler[2]; /* nearest, bilinear */
- pipe_mutex mutex;
+ mtx_t mutex;
};
struct nv50_blitctx
struct nv50_tsc_entry sampler[2]; /* nearest, bilinear */
- pipe_mutex mutex;
+ mtx_t mutex;
struct nvc0_screen *screen;
};
/* The MSAA texture with CMASK access; */
struct pipe_resource *cmask_resource;
- pipe_mutex cmask_mutex;
+ mtx_t cmask_mutex;
};
/* Auxiliary context. Mainly used to initialize resources.
* It must be locked prior to using and flushed before unlocking. */
struct pipe_context *aux_context;
- pipe_mutex aux_context_lock;
+ mtx_t aux_context_lock;
/* This must be in the screen, because UE4 uses one context for
* compilation and another one for rendering.
unsigned num_shader_cache_hits;
/* GPU load thread. */
- pipe_mutex gpu_load_mutex;
+ mtx_t gpu_load_mutex;
pipe_thread gpu_load_thread;
union r600_mmio_counters mmio_counters;
volatile unsigned gpu_load_stop_thread; /* bool */
bool use_monolithic_shaders;
bool record_llvm_ir;
- pipe_mutex shader_parts_mutex;
+ mtx_t shader_parts_mutex;
struct si_shader_part *vs_prologs;
struct si_shader_part *vs_epilogs;
struct si_shader_part *tcs_epilogs;
* - GS and CS aren't cached, but it's certainly possible to cache
* those as well.
*/
- pipe_mutex shader_cache_mutex;
+ mtx_t shader_cache_mutex;
struct hash_table *shader_cache;
/* Shader compiler queue for multithreaded compilation. */
struct util_queue_fence ready;
struct si_compiler_ctx_state compiler_ctx_state;
- pipe_mutex mutex;
+ mtx_t mutex;
struct si_shader *first_variant; /* immutable after the first variant */
struct si_shader *last_variant; /* mutable */
struct rbug_list list;
/* call locking */
- pipe_mutex call_mutex;
+ mtx_t call_mutex;
/* current state */
struct {
} curr;
/* draw locking */
- pipe_mutex draw_mutex;
+ mtx_t draw_mutex;
pipe_condvar draw_cond;
unsigned draw_num_rules;
int draw_blocker;
} draw_rule;
/* list of state objects */
- pipe_mutex list_mutex;
+ mtx_t list_mutex;
unsigned num_shaders;
struct rbug_list shaders;
};
/* remote debugger */
struct rbug_rbug *rbug;
- pipe_mutex list_mutex;
+ mtx_t list_mutex;
int num_contexts;
int num_resources;
int num_surfaces;
} debug;
unsigned texture_timestamp;
- pipe_mutex tex_mutex;
+ mtx_t tex_mutex;
- pipe_mutex swc_mutex; /* Used for buffer uploads */
+ mtx_t swc_mutex; /* Used for buffer uploads */
/* which formats to translate depth formats into */
struct {
*/
struct svga_host_surface_cache
{
- pipe_mutex mutex;
+ mtx_t mutex;
/* Unused buffers are put in buckets to speed up lookups */
struct list_head bucket[SVGA_HOST_SURFACE_CACHE_BUCKETS];
struct list_head *size_list;
uint32_t size_list_size;
- pipe_mutex lock;
+ mtx_t lock;
uint32_t bo_size;
uint32_t bo_count;
} bo_cache;
struct util_hash_table *bo_handles;
- pipe_mutex bo_handles_mutex;
+ mtx_t bo_handles_mutex;
uint32_t bo_size;
uint32_t bo_count;
__DRIimage * (*lookup_egl_image)(struct dri_screen *ctx, void *handle);
/* OpenCL interop */
- pipe_mutex opencl_func_mutex;
+ mtx_t opencl_func_mutex;
opencl_dri_event_add_ref_t opencl_dri_event_add_ref;
opencl_dri_event_release_t opencl_dri_event_release;
opencl_dri_event_wait_t opencl_dri_event_wait;
struct xmesa_display {
- pipe_mutex mutex;
+ mtx_t mutex;
Display *display;
struct pipe_screen *screen;
Bitmap* bitmap;
color_space colorSpace;
- pipe_mutex fbMutex;
+ mtx_t fbMutex;
struct hgl_buffer* draw;
struct hgl_buffer* read;
BOOL worker_wait;
pipe_condvar event_pop;
pipe_condvar event_push;
- pipe_mutex mutex_pop;
- pipe_mutex mutex_push;
+ mtx_t mutex_pop;
+ mtx_t mutex_push;
};
/* Consumer functions: */
struct nine_queue_pool* pool;
BOOL terminate;
pipe_condvar event_processed;
- pipe_mutex mutex_processed;
+ mtx_t mutex_processed;
struct NineDevice9 *device;
BOOL processed;
BOOL toPause;
BOOL hasPaused;
- pipe_mutex thread_running;
- pipe_mutex thread_resume;
+ mtx_t thread_running;
+ mtx_t thread_resume;
};
/* Wait for instruction to be processed.
struct vl_compositor compositor;
struct vl_compositor_state cstate;
vl_csc_matrix csc;
- pipe_mutex mutex;
+ mtx_t mutex;
} vlVaDriver;
typedef struct {
struct pipe_context *context;
struct vl_compositor compositor;
struct pipe_sampler_view *dummy_sv;
- pipe_mutex mutex;
+ mtx_t mutex;
} vlVdpDevice;
typedef struct
typedef struct
{
vlVdpDevice *device;
- pipe_mutex mutex;
+ mtx_t mutex;
struct pipe_video_codec *decoder;
} vlVdpDecoder;
// Context Management
struct hgl_context* fContext[CONTEXT_MAX];
context_id fCurrentContext;
- pipe_mutex fMutex;
+ mtx_t fMutex;
};
amdgpu_device_handle dev;
- pipe_mutex bo_fence_lock;
+ mtx_t bo_fence_lock;
int num_cs; /* The number of command streams created. */
unsigned num_total_rejected_cs;
bool check_vm;
/* List of all allocated buffers */
- pipe_mutex global_bo_list_lock;
+ mtx_t global_bo_list_lock;
struct list_head global_bo_list;
unsigned num_buffers;
};
struct pb_cache_entry cache_entry;
void *ptr;
- pipe_mutex map_mutex;
+ mtx_t map_mutex;
unsigned map_count;
bool use_reusable_pool;
} real;
* with multiple contexts (here command streams) backed by one winsys. */
static bool radeon_set_fd_access(struct radeon_drm_cs *applier,
struct radeon_drm_cs **owner,
- pipe_mutex *mutex,
+ mtx_t *mutex,
unsigned request, const char *request_name,
bool enable)
{
struct util_hash_table *bo_handles;
/* List of buffer virtual memory ranges. Protectded by bo_handles_mutex. */
struct util_hash_table *bo_vas;
- pipe_mutex bo_handles_mutex;
- pipe_mutex bo_va_mutex;
- pipe_mutex bo_fence_lock;
+ mtx_t bo_handles_mutex;
+ mtx_t bo_va_mutex;
+ mtx_t bo_fence_lock;
uint64_t va_offset;
struct list_head va_holes;
uint32_t num_cpus; /* Number of CPUs. */
struct radeon_drm_cs *hyperz_owner;
- pipe_mutex hyperz_owner_mutex;
+ mtx_t hyperz_owner_mutex;
struct radeon_drm_cs *cmask_owner;
- pipe_mutex cmask_owner_mutex;
+ mtx_t cmask_owner_mutex;
/* multithreaded command submission */
struct util_queue cs_queue;
/**
* Following members are mutable and protected by this mutex.
*/
- pipe_mutex mutex;
+ mtx_t mutex;
/**
* Fenced buffer list.
struct pb_fence_ops base;
struct vmw_winsys_screen *vws;
- pipe_mutex mutex;
+ mtx_t mutex;
/*
* Protected by mutex;
unsigned next_present_no;
uint32_t present_fences[VMW_MAX_PRESENTS];
- pipe_mutex mutex;
+ mtx_t mutex;
struct svga_winsys_buffer *buf; /* Current backing guest buffer */
uint32_t mapcount; /* Number of mappers */
uint32_t map_mode; /* PIPE_TRANSFER_[READ|WRITE] */
struct list_head delayed;
int num_delayed;
unsigned usecs;
- pipe_mutex mutex;
+ mtx_t mutex;
struct util_hash_table *bo_handles;
struct util_hash_table *bo_names;
- pipe_mutex bo_handles_mutex;
+ mtx_t bo_handles_mutex;
};
struct virgl_drm_cmd_buf {
struct list_head delayed;
int num_delayed;
unsigned usecs;
- pipe_mutex mutex;
+ mtx_t mutex;
};
struct virgl_hw_res {