uint64_t gart_size;
uint64_t vram_size;
bool has_dedicated_vram;
- boolean has_virtual_memory;
+ bool has_virtual_memory;
bool gfx_ib_pad_with_type2;
- boolean has_sdma;
- boolean has_uvd;
+ bool has_sdma;
+ bool has_uvd;
uint32_t vce_fw_version;
uint32_t vce_harvest_config;
uint32_t clock_crystal_freq;
uint32_t drm_major; /* version */
uint32_t drm_minor;
uint32_t drm_patchlevel;
- boolean has_userptr;
+ bool has_userptr;
/* Shader cores. */
uint32_t r600_max_quad_pipes; /* wave size / 16 */
uint32_t r300_num_gb_pipes;
uint32_t r300_num_z_pipes;
uint32_t r600_gb_backend_map; /* R600 harvest config */
- boolean r600_gb_backend_map_valid;
+ bool r600_gb_backend_map_valid;
uint32_t r600_num_banks;
uint32_t num_render_backends;
uint32_t num_tile_pipes; /* pipe count from PIPE_CONFIG */
* \param buf A winsys buffer object to get the handle from.
* \param whandle A winsys handle pointer.
* \param stride A stride of the buffer in bytes, for texturing.
- * \return TRUE on success.
+ * \return true on success.
*/
- boolean (*buffer_get_handle)(struct pb_buffer *buf,
- unsigned stride, unsigned offset,
- unsigned slice_size,
- struct winsys_handle *whandle);
+ bool (*buffer_get_handle)(struct pb_buffer *buf,
+ unsigned stride, unsigned offset,
+ unsigned slice_size,
+ struct winsys_handle *whandle);
/**
* Return the virtual address of a buffer.
struct pb_buffer *buf);
/**
- * Return TRUE if there is enough memory in VRAM and GTT for the buffers
+ * Return true if there is enough memory in VRAM and GTT for the buffers
* added so far. If the validation fails, all buffers which have
* been added since the last call of cs_validate will be removed and
* the CS will be flushed (provided there are still any buffers).
*
* \param cs A command stream to validate.
*/
- boolean (*cs_validate)(struct radeon_winsys_cs *cs);
+ bool (*cs_validate)(struct radeon_winsys_cs *cs);
/**
* Check whether the given number of dwords is available in the IB.
bool (*cs_check_space)(struct radeon_winsys_cs *cs, unsigned dw);
/**
- * Return TRUE if there is enough memory in VRAM and GTT for the buffers
+ * Return true if there is enough memory in VRAM and GTT for the buffers
* added so far.
*
* \param cs A command stream to validate.
* \param vram VRAM memory size pending to be use
* \param gtt GTT memory size pending to be use
*/
- boolean (*cs_memory_below_limit)(struct radeon_winsys_cs *cs, uint64_t vram, uint64_t gtt);
+ bool (*cs_memory_below_limit)(struct radeon_winsys_cs *cs,
+ uint64_t vram, uint64_t gtt);
uint64_t (*cs_query_memory_usage)(struct radeon_winsys_cs *cs);
struct pipe_fence_handle **fence);
/**
- * Return TRUE if a buffer is referenced by a command stream.
+ * Return true if a buffer is referenced by a command stream.
*
* \param cs A command stream.
* \param buf A winsys buffer.
*/
- boolean (*cs_is_buffer_referenced)(struct radeon_winsys_cs *cs,
- struct pb_buffer *buf,
- enum radeon_bo_usage usage);
+ bool (*cs_is_buffer_referenced)(struct radeon_winsys_cs *cs,
+ struct pb_buffer *buf,
+ enum radeon_bo_usage usage);
/**
* Request access to a feature for a command stream.
* \param fid Feature ID, one of RADEON_FID_*
* \param enable Whether to enable or disable the feature.
*/
- boolean (*cs_request_feature)(struct radeon_winsys_cs *cs,
- enum radeon_feature_id fid,
- boolean enable);
+ bool (*cs_request_feature)(struct radeon_winsys_cs *cs,
+ enum radeon_feature_id fid,
+ bool enable);
/**
* Make sure all asynchronous flush of the cs have completed
*
return NULL;
}
-static boolean amdgpu_bo_get_handle(struct pb_buffer *buffer,
- unsigned stride, unsigned offset,
- unsigned slice_size,
- struct winsys_handle *whandle)
+static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
+ unsigned stride, unsigned offset,
+ unsigned slice_size,
+ struct winsys_handle *whandle)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
enum amdgpu_bo_handle_type type;
type = amdgpu_bo_handle_type_kms;
break;
default:
- return FALSE;
+ return false;
}
r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
if (r)
- return FALSE;
+ return false;
whandle->stride = stride;
whandle->offset = offset;
whandle->offset += slice_size * whandle->layer;
bo->is_shared = true;
- return TRUE;
+ return true;
}
static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
&expired);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
- return FALSE;
+ return false;
}
if (expired) {
ib->max_ib_size = MAX2(ib->max_ib_size, ib->base.prev_dw + ib->base.current.cdw);
}
-static boolean amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
- enum ring_type ring_type)
+static bool amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
+ enum ring_type ring_type)
{
int i;
cs->buffers = (struct amdgpu_cs_buffer*)
CALLOC(1, cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer));
if (!cs->buffers) {
- return FALSE;
+ return false;
}
cs->handles = CALLOC(1, cs->max_num_buffers * sizeof(amdgpu_bo_handle));
if (!cs->handles) {
FREE(cs->buffers);
- return FALSE;
+ return false;
}
cs->flags = CALLOC(1, cs->max_num_buffers);
if (!cs->flags) {
FREE(cs->handles);
FREE(cs->buffers);
- return FALSE;
+ return false;
}
for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
cs->ib[IB_CONST_PREAMBLE].flags = AMDGPU_IB_FLAG_CE |
AMDGPU_IB_FLAG_PREAMBLE;
- return TRUE;
+ return true;
}
static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *cs)
return amdgpu_lookup_buffer(cs->csc, (struct amdgpu_winsys_bo*)buf);
}
-static boolean amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
+static bool amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
{
- return TRUE;
+ return true;
}
static bool amdgpu_cs_check_space(struct radeon_winsys_cs *rcs, unsigned dw)
return true;
}
-static boolean amdgpu_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
+static bool amdgpu_cs_memory_below_limit(struct radeon_winsys_cs *rcs,
+ uint64_t vram, uint64_t gtt)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
struct amdgpu_winsys *ws = cs->ctx->ws;
return cs->num_buffers;
}
-DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", FALSE)
+DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
/* Since the kernel driver doesn't synchronize execution between different
* rings automatically, we have to add fence dependencies manually.
util_queue_job_wait(&cs->flush_completed);
}
-DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
+DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", false)
static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
unsigned flags,
FREE(cs);
}
-static boolean amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
- struct pb_buffer *_buf,
- enum radeon_bo_usage usage)
+static bool amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
+ struct pb_buffer *_buf,
+ enum radeon_bo_usage usage)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
}
}
-static inline boolean
+static inline bool
amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
struct amdgpu_winsys_bo *bo)
{
(num_refs && amdgpu_lookup_buffer(cs->csc, bo) != -1);
}
-static inline boolean
+static inline bool
amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
struct amdgpu_winsys_bo *bo,
enum radeon_bo_usage usage)
int index;
if (!bo->num_cs_references)
- return FALSE;
+ return false;
index = amdgpu_lookup_buffer(cs->csc, bo);
if (index == -1)
- return FALSE;
+ return false;
return (cs->csc->buffers[index].usage & usage) != 0;
}
-static inline boolean
+static inline bool
amdgpu_bo_is_referenced_by_any_cs(struct amdgpu_winsys_bo *bo)
{
return bo->num_cs_references != 0;
}
/* Helper function to do the ioctls needed for setup and init. */
-static boolean do_winsys_init(struct amdgpu_winsys *ws, int fd)
+static bool do_winsys_init(struct amdgpu_winsys *ws, int fd)
{
struct amdgpu_buffer_size_alignments alignment_info = {};
struct amdgpu_heap_info vram, gtt;
ws->info.has_uvd = uvd.available_rings != 0;
ws->info.vce_fw_version =
vce.available_rings ? vce_version : 0;
- ws->info.has_userptr = TRUE;
+ ws->info.has_userptr = true;
ws->info.num_render_backends = ws->amdinfo.rb_pipes;
ws->info.clock_crystal_freq = ws->amdinfo.gpu_counter_freq;
ws->info.num_tile_pipes = cik_get_num_tile_pipes(&ws->amdinfo);
ws->info.pipe_interleave_bytes = 256 << ((ws->amdinfo.gb_addr_cfg >> 4) & 0x7);
- ws->info.has_virtual_memory = TRUE;
+ ws->info.has_virtual_memory = true;
ws->info.has_sdma = dma.available_rings != 0;
/* Get the number of good compute units. */
ws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL;
- return TRUE;
+ return true;
fail:
if (ws->addrlib)
AddrDestroy(ws->addrlib);
amdgpu_device_deinitialize(ws->dev);
ws->dev = NULL;
- return FALSE;
+ return false;
}
static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
*info = ((struct amdgpu_winsys *)rws)->info;
}
-static boolean amdgpu_cs_request_feature(struct radeon_winsys_cs *rcs,
- enum radeon_feature_id fid,
- boolean enable)
+static bool amdgpu_cs_request_feature(struct radeon_winsys_cs *rcs,
+ enum radeon_feature_id fid,
+ bool enable)
{
- return FALSE;
+ return false;
}
static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
return key1 != key2;
}
-DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
+DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", true)
static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
{
return NULL;
}
-static boolean radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
- unsigned stride, unsigned offset,
- unsigned slice_size,
- struct winsys_handle *whandle)
+static bool radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
+ unsigned stride, unsigned offset,
+ unsigned slice_size,
+ struct winsys_handle *whandle)
{
struct drm_gem_flink flink;
struct radeon_bo *bo = radeon_bo(buffer);
flink.handle = bo->handle;
if (ioctl(ws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
- return FALSE;
+ return false;
}
bo->flink_name = flink.name;
whandle->handle = bo->handle;
} else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
if (drmPrimeHandleToFD(ws->fd, bo->handle, DRM_CLOEXEC, (int*)&whandle->handle))
- return FALSE;
+ return false;
}
whandle->stride = stride;
whandle->offset = offset;
whandle->offset += slice_size * whandle->layer;
- return TRUE;
+ return true;
}
static bool radeon_winsys_bo_is_user_ptr(struct pb_buffer *buf)
/* No context support here. */
}
-static boolean radeon_init_cs_context(struct radeon_cs_context *csc,
- struct radeon_drm_winsys *ws)
+static bool radeon_init_cs_context(struct radeon_cs_context *csc,
+ struct radeon_drm_winsys *ws)
{
int i;
csc->relocs_bo = (struct radeon_bo_item*)
CALLOC(1, csc->nrelocs * sizeof(csc->relocs_bo[0]));
if (!csc->relocs_bo) {
- return FALSE;
+ return false;
}
csc->relocs = (struct drm_radeon_cs_reloc*)
CALLOC(1, csc->nrelocs * sizeof(struct drm_radeon_cs_reloc));
if (!csc->relocs) {
FREE(csc->relocs_bo);
- return FALSE;
+ return false;
}
csc->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
for (i = 0; i < ARRAY_SIZE(csc->reloc_indices_hashlist); i++) {
csc->reloc_indices_hashlist[i] = -1;
}
- return TRUE;
+ return true;
}
static void radeon_cs_context_cleanup(struct radeon_cs_context *csc)
return radeon_lookup_buffer(cs->csc, (struct radeon_bo*)buf);
}
-static boolean radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
+static bool radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
- boolean status =
+ bool status =
cs->csc->used_gart < cs->ws->info.gart_size * 0.8 &&
cs->csc->used_vram < cs->ws->info.vram_size * 0.8;
return rcs->current.max_dw - rcs->current.cdw >= dw;
}
-static boolean radeon_drm_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
+static bool radeon_drm_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "radeon: Not enough memory for command submission.\n");
- else if (debug_get_bool_option("RADEON_DUMP_CS", FALSE)) {
+ else if (debug_get_bool_option("RADEON_DUMP_CS", false)) {
unsigned i;
fprintf(stderr, "radeon: The kernel rejected CS, dumping...\n");
util_queue_job_wait(&cs->flush_completed);
}
-DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
+DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", false)
static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
unsigned flags,
FREE(cs);
}
-static boolean radeon_bo_is_referenced(struct radeon_winsys_cs *rcs,
- struct pb_buffer *_buf,
- enum radeon_bo_usage usage)
+static bool radeon_bo_is_referenced(struct radeon_winsys_cs *rcs,
+ struct pb_buffer *_buf,
+ enum radeon_bo_usage usage)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_bo *bo = (struct radeon_bo*)_buf;
int index;
if (!bo->num_cs_references)
- return FALSE;
+ return false;
index = radeon_lookup_buffer(cs->csc, bo);
if (index == -1)
- return FALSE;
+ return false;
if ((usage & RADEON_USAGE_WRITE) && cs->csc->relocs[index].write_domain)
- return TRUE;
+ return true;
if ((usage & RADEON_USAGE_READ) && cs->csc->relocs[index].read_domains)
- return TRUE;
+ return true;
- return FALSE;
+ return false;
}
/* FENCES */
return (struct radeon_drm_cs*)base;
}
-static inline boolean
+static inline bool
radeon_bo_is_referenced_by_cs(struct radeon_drm_cs *cs,
struct radeon_bo *bo)
{
(num_refs && radeon_lookup_buffer(cs->csc, bo) != -1);
}
-static inline boolean
+static inline bool
radeon_bo_is_referenced_by_cs_for_write(struct radeon_drm_cs *cs,
struct radeon_bo *bo)
{
int index;
if (!bo->num_cs_references)
- return FALSE;
+ return false;
index = radeon_lookup_buffer(cs->csc, bo);
if (index == -1)
- return FALSE;
+ return false;
return cs->csc->relocs[index].write_domain != 0;
}
-static inline boolean
+static inline bool
radeon_bo_is_referenced_by_any_cs(struct radeon_bo *bo)
{
return bo->num_cs_references != 0;
pipe_static_mutex(fd_tab_mutex);
/* Enable/disable feature access for one command stream.
- * If enable == TRUE, return TRUE on success.
- * Otherwise, return FALSE.
+ * If enable == true, return true on success.
+ * Otherwise, return false.
*
* We basically do the same thing kernel does, because we have to deal
* with multiple contexts (here command streams) backed by one winsys. */
-static boolean radeon_set_fd_access(struct radeon_drm_cs *applier,
- struct radeon_drm_cs **owner,
- pipe_mutex *mutex,
- unsigned request, const char *request_name,
- boolean enable)
+static bool radeon_set_fd_access(struct radeon_drm_cs *applier,
+ struct radeon_drm_cs **owner,
+ pipe_mutex *mutex,
+ unsigned request, const char *request_name,
+ bool enable)
{
struct drm_radeon_info info;
unsigned value = enable ? 1 : 0;
if (enable) {
if (*owner) {
pipe_mutex_unlock(*mutex);
- return FALSE;
+ return false;
}
} else {
if (*owner != applier) {
pipe_mutex_unlock(*mutex);
- return FALSE;
+ return false;
}
}
if (drmCommandWriteRead(applier->ws->fd, DRM_RADEON_INFO,
&info, sizeof(info)) != 0) {
pipe_mutex_unlock(*mutex);
- return FALSE;
+ return false;
}
/* Update the rights in the winsys. */
if (value) {
*owner = applier;
pipe_mutex_unlock(*mutex);
- return TRUE;
+ return true;
}
} else {
*owner = NULL;
}
pipe_mutex_unlock(*mutex);
- return FALSE;
+ return false;
}
-static boolean radeon_get_drm_value(int fd, unsigned request,
- const char *errname, uint32_t *out)
+static bool radeon_get_drm_value(int fd, unsigned request,
+ const char *errname, uint32_t *out)
{
struct drm_radeon_info info;
int retval;
fprintf(stderr, "radeon: Failed to get %s, error number %d\n",
errname, retval);
}
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
/* Helper function to do the ioctls needed for setup and init. */
-static boolean do_winsys_init(struct radeon_drm_winsys *ws)
+static bool do_winsys_init(struct radeon_drm_winsys *ws)
{
struct drm_radeon_gem_info gem_info;
int retval;
version->version_minor,
version->version_patchlevel);
drmFreeVersion(version);
- return FALSE;
+ return false;
}
ws->info.drm_major = version->version_major;
/* Get PCI ID. */
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_DEVICE_ID, "PCI ID",
&ws->info.pci_id))
- return FALSE;
+ return false;
/* Check PCI ID. */
switch (ws->info.pci_id) {
default:
fprintf(stderr, "radeon: Invalid PCI ID.\n");
- return FALSE;
+ return false;
}
switch (ws->info.family) {
default:
case CHIP_UNKNOWN:
fprintf(stderr, "radeon: Unknown family.\n");
- return FALSE;
+ return false;
case CHIP_R300:
case CHIP_R350:
case CHIP_RV350:
}
/* Check for dma */
- ws->info.has_sdma = FALSE;
+ ws->info.has_sdma = false;
/* DMA is disabled on R700. There is IB corruption and hangs. */
if (ws->info.chip_class >= EVERGREEN && ws->info.drm_minor >= 27) {
- ws->info.has_sdma = TRUE;
+ ws->info.has_sdma = true;
}
/* Check for UVD and VCE */
- ws->info.has_uvd = FALSE;
+ ws->info.has_uvd = false;
ws->info.vce_fw_version = 0x00000000;
if (ws->info.drm_minor >= 32) {
uint32_t value = RADEON_CS_RING_UVD;
if (retval) {
fprintf(stderr, "radeon: Failed to get MM info, error number %d\n",
retval);
- return FALSE;
+ return false;
}
ws->info.gart_size = gem_info.gart_size;
ws->info.vram_size = gem_info.vram_size;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_GB_PIPES,
"GB pipe count",
&ws->info.r300_num_gb_pipes))
- return FALSE;
+ return false;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_Z_PIPES,
"Z pipe count",
&ws->info.r300_num_z_pipes))
- return FALSE;
+ return false;
}
else if (ws->gen >= DRV_R600) {
uint32_t tiling_config = 0;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_BACKENDS,
"num backends",
&ws->info.num_render_backends))
- return FALSE;
+ return false;
/* get the GPU counter frequency, failure is not fatal */
radeon_get_drm_value(ws->fd, RADEON_INFO_CLOCK_CRYSTAL_FREQ, NULL,
if (radeon_get_drm_value(ws->fd, RADEON_INFO_BACKEND_MAP, NULL,
&ws->info.r600_gb_backend_map))
- ws->info.r600_gb_backend_map_valid = TRUE;
+ ws->info.r600_gb_backend_map_valid = true;
- ws->info.has_virtual_memory = FALSE;
+ ws->info.has_virtual_memory = false;
if (ws->info.drm_minor >= 13) {
uint32_t ib_vm_max_size;
- ws->info.has_virtual_memory = TRUE;
+ ws->info.has_virtual_memory = true;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_VA_START, NULL,
&ws->va_start))
- ws->info.has_virtual_memory = FALSE;
+ ws->info.has_virtual_memory = false;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_IB_VM_MAX_SIZE, NULL,
&ib_vm_max_size))
- ws->info.has_virtual_memory = FALSE;
+ ws->info.has_virtual_memory = false;
radeon_get_drm_value(ws->fd, RADEON_INFO_VA_UNMAP_WORKING, NULL,
&ws->va_unmap_working);
}
- if (ws->gen == DRV_R600 && !debug_get_bool_option("RADEON_VA", FALSE))
- ws->info.has_virtual_memory = FALSE;
+ if (ws->gen == DRV_R600 && !debug_get_bool_option("RADEON_VA", false))
+ ws->info.has_virtual_memory = false;
}
/* Get max pipes, this is only needed for compute shaders. All evergreen+
"returned accel_working2 value %u is smaller than 2. "
"Please install a newer kernel.\n",
ws->accel_working2);
- return FALSE;
+ return false;
}
if (ws->info.chip_class == CIK) {
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_CIK_MACROTILE_MODE_ARRAY, NULL,
ws->info.cik_macrotile_mode_array)) {
fprintf(stderr, "radeon: Kernel 3.13 is required for CIK support.\n");
- return FALSE;
+ return false;
}
}
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_SI_TILE_MODE_ARRAY, NULL,
ws->info.si_tile_mode_array)) {
fprintf(stderr, "radeon: Kernel 3.10 is required for SI support.\n");
- return FALSE;
+ return false;
}
}
ws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL;
- return TRUE;
+ return true;
}
static void radeon_winsys_destroy(struct radeon_winsys *rws)
*info = ((struct radeon_drm_winsys *)rws)->info;
}
-static boolean radeon_cs_request_feature(struct radeon_winsys_cs *rcs,
- enum radeon_feature_id fid,
- boolean enable)
+static bool radeon_cs_request_feature(struct radeon_winsys_cs *rcs,
+ enum radeon_feature_id fid,
+ bool enable)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
RADEON_INFO_WANT_CMASK, "AA optimizations",
enable);
}
- return FALSE;
+ return false;
}
static uint64_t radeon_query_value(struct radeon_winsys *rws,
stat1.st_rdev != stat2.st_rdev;
}
-DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
+DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", true)
static bool radeon_winsys_unref(struct radeon_winsys *ws)
{