#include <inttypes.h>
#include <stdio.h>
-boolean r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
- struct pb_buffer *buf,
- enum radeon_bo_usage usage)
+bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
+ struct pb_buffer *buf,
+ enum radeon_bo_usage usage)
{
if (ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, buf, usage)) {
- return TRUE;
+ return true;
}
if (radeon_emitted(ctx->dma.cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, buf, usage)) {
- return TRUE;
+ return true;
}
- return FALSE;
+ return false;
}
void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
return NULL;
}
-static boolean r600_init_block_names(struct r600_common_screen *screen,
- struct r600_perfcounter_block *block)
+static bool r600_init_block_names(struct r600_common_screen *screen,
+ struct r600_perfcounter_block *block)
{
unsigned i, j, k;
unsigned groups_shader = 1, groups_se = 1, groups_instance = 1;
block->group_names = MALLOC(block->num_groups * block->group_name_stride);
if (!block->group_names)
- return FALSE;
+ return false;
groupname = block->group_names;
for (i = 0; i < groups_shader; ++i) {
block->selector_names = MALLOC(block->num_groups * block->num_selectors *
block->selector_name_stride);
if (!block->selector_names)
- return FALSE;
+ return false;
groupname = block->group_names;
p = block->selector_names;
groupname += block->group_name_stride;
}
- return TRUE;
+ return true;
}
int r600_get_perfcounter_info(struct r600_common_screen *screen,
rscreen->perfcounters->cleanup(rscreen);
}
-boolean r600_perfcounters_init(struct r600_perfcounters *pc,
- unsigned num_blocks)
+bool r600_perfcounters_init(struct r600_perfcounters *pc,
+ unsigned num_blocks)
{
pc->blocks = CALLOC(num_blocks, sizeof(struct r600_perfcounter_block));
if (!pc->blocks)
- return FALSE;
+ return false;
- pc->separate_se = debug_get_bool_option("RADEON_PC_SEPARATE_SE", FALSE);
- pc->separate_instance = debug_get_bool_option("RADEON_PC_SEPARATE_INSTANCE", FALSE);
+ pc->separate_se = debug_get_bool_option("RADEON_PC_SEPARATE_SE", false);
+ pc->separate_instance = debug_get_bool_option("RADEON_PC_SEPARATE_INSTANCE", false);
- return TRUE;
+ return true;
}
void r600_perfcounters_add_block(struct r600_common_screen *rscreen,
rctx->allocator_zeroed_memory =
u_suballocator_create(&rctx->b, rscreen->info.gart_page_size,
- 0, PIPE_USAGE_DEFAULT, TRUE);
+ 0, PIPE_USAGE_DEFAULT, true);
if (!rctx->allocator_zeroed_memory)
return false;
unsigned dirty_level_mask; /* each bit says if that mipmap is compressed */
unsigned stencil_dirty_level_mask; /* each bit says if that mipmap is compressed */
struct r600_texture *flushed_depth_texture;
- boolean is_flushing_texture;
+ bool is_flushing_texture;
struct radeon_surf surface;
/* Colorbuffer compression and fast clear. */
struct r600_atom render_cond_atom;
struct pipe_query *render_cond;
unsigned render_cond_mode;
- boolean render_cond_invert;
+ bool render_cond_invert;
bool render_cond_force_off; /* for u_blitter */
/* MSAA sample locations.
};
/* r600_buffer.c */
-boolean r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
- struct pb_buffer *buf,
- enum radeon_bo_usage usage);
+bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
+ struct pb_buffer *buf,
+ enum radeon_bo_usage usage);
void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
struct r600_resource *resource,
unsigned usage);
}
}
-static boolean r600_query_sw_begin(struct r600_common_context *rctx,
- struct r600_query *rquery)
+static bool r600_query_sw_begin(struct r600_common_context *rctx,
+ struct r600_query *rquery)
{
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
unreachable("r600_query_sw_begin: bad query type");
}
- return TRUE;
+ return true;
}
static bool r600_query_sw_end(struct r600_common_context *rctx,
return true;
}
-static boolean r600_query_sw_get_result(struct r600_common_context *rctx,
- struct r600_query *rquery,
- boolean wait,
- union pipe_query_result *result)
+static bool r600_query_sw_get_result(struct r600_common_context *rctx,
+ struct r600_query *rquery,
+ bool wait,
+ union pipe_query_result *result)
{
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
/* Convert from cycles per millisecond to cycles per second (Hz). */
result->timestamp_disjoint.frequency =
(uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
- result->timestamp_disjoint.disjoint = FALSE;
- return TRUE;
+ result->timestamp_disjoint.disjoint = false;
+ return true;
case PIPE_QUERY_GPU_FINISHED: {
struct pipe_screen *screen = rctx->b.screen;
result->b = screen->fence_finish(screen, query->fence,
case R600_QUERY_GPIN_ASIC_ID:
result->u32 = 0;
- return TRUE;
+ return true;
case R600_QUERY_GPIN_NUM_SIMD:
result->u32 = rctx->screen->info.num_good_compute_units;
- return TRUE;
+ return true;
case R600_QUERY_GPIN_NUM_RB:
result->u32 = rctx->screen->info.num_render_backends;
- return TRUE;
+ return true;
case R600_QUERY_GPIN_NUM_SPI:
result->u32 = 1; /* all supported chips have one SPI per SE */
- return TRUE;
+ return true;
case R600_QUERY_GPIN_NUM_SE:
result->u32 = rctx->screen->info.max_se;
- return TRUE;
+ return true;
}
result->u64 = query->end_result - query->begin_result;
break;
}
- return TRUE;
+ return true;
}
static struct r600_query_ops sw_query_ops = {
.add_result = r600_query_hw_add_result,
};
-boolean r600_query_hw_init(struct r600_common_context *rctx,
- struct r600_query_hw *query)
+bool r600_query_hw_init(struct r600_common_context *rctx,
+ struct r600_query_hw *query)
{
query->buffer.buf = r600_new_query_buffer(rctx, query);
if (!query->buffer.buf)
- return FALSE;
+ return false;
- return TRUE;
+ return true;
}
static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
r600_update_prims_generated_query_state(ctx, query->b.type, 1);
ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
- TRUE);
+ true);
/* Get a new query buffer if needed. */
if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
/* The queries which need begin already called this in begin_query. */
if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
- ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, FALSE);
+ ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
}
/* emit end query */
}
}
-boolean r600_query_hw_begin(struct r600_common_context *rctx,
- struct r600_query *rquery)
+bool r600_query_hw_begin(struct r600_common_context *rctx,
+ struct r600_query *rquery)
{
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
util_query_clear_result(result, query->b.type);
}
-boolean r600_query_hw_get_result(struct r600_common_context *rctx,
- struct r600_query *rquery,
- boolean wait, union pipe_query_result *result)
+bool r600_query_hw_get_result(struct r600_common_context *rctx,
+ struct r600_query *rquery,
+ bool wait, union pipe_query_result *result)
{
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
struct r600_query_buffer *qbuf;
PIPE_TRANSFER_READ |
(wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
if (!map)
- return FALSE;
+ return false;
while (results_base != qbuf->results_end) {
query->ops->add_result(rctx, query, map + results_base,
rquery->type == PIPE_QUERY_TIMESTAMP) {
result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
}
- return TRUE;
+ return true;
}
static void r600_render_condition(struct pipe_context *ctx,
assert(ctx->num_cs_dw_queries_suspend == 0);
/* Check CS space here. Resuming must not be interrupted by flushes. */
- ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
+ ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
r600_query_hw_emit_start(ctx, query);
struct r600_query_ops {
void (*destroy)(struct r600_common_context *, struct r600_query *);
- boolean (*begin)(struct r600_common_context *, struct r600_query *);
+ bool (*begin)(struct r600_common_context *, struct r600_query *);
bool (*end)(struct r600_common_context *, struct r600_query *);
- boolean (*get_result)(struct r600_common_context *,
- struct r600_query *, boolean wait,
- union pipe_query_result *result);
+ bool (*get_result)(struct r600_common_context *,
+ struct r600_query *, bool wait,
+ union pipe_query_result *result);
};
struct r600_query {
unsigned stream;
};
-boolean r600_query_hw_init(struct r600_common_context *rctx,
- struct r600_query_hw *query);
+bool r600_query_hw_init(struct r600_common_context *rctx,
+ struct r600_query_hw *query);
void r600_query_hw_destroy(struct r600_common_context *rctx,
struct r600_query *rquery);
-boolean r600_query_hw_begin(struct r600_common_context *rctx,
- struct r600_query *rquery);
+bool r600_query_hw_begin(struct r600_common_context *rctx,
+ struct r600_query *rquery);
bool r600_query_hw_end(struct r600_common_context *rctx,
struct r600_query *rquery);
-boolean r600_query_hw_get_result(struct r600_common_context *rctx,
- struct r600_query *rquery,
- boolean wait,
- union pipe_query_result *result);
+bool r600_query_hw_get_result(struct r600_common_context *rctx,
+ struct r600_query *rquery,
+ bool wait,
+ union pipe_query_result *result);
/* Performance counters */
enum {
void (*cleanup)(struct r600_common_screen *);
- boolean separate_se;
- boolean separate_instance;
+ bool separate_se;
+ bool separate_instance;
};
struct pipe_query *r600_create_batch_query(struct pipe_context *ctx,
unsigned index,
struct pipe_driver_query_group_info *info);
-boolean r600_perfcounters_init(struct r600_perfcounters *, unsigned num_blocks);
+bool r600_perfcounters_init(struct r600_perfcounters *, unsigned num_blocks);
void r600_perfcounters_add_block(struct r600_common_screen *,
struct r600_perfcounters *,
const char *name, unsigned flags,
return false;
}
- (*flushed_depth_texture)->is_flushing_texture = TRUE;
+ (*flushed_depth_texture)->is_flushing_texture = true;
(*flushed_depth_texture)->non_disp_tiling = false;
return true;
}
surface_format == PIPE_FORMAT_B5G6R5_SRGB) {
extra_channel = -1;
} else if (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) {
- if(r600_translate_colorswap(surface_format, FALSE) <= 1)
+ if(r600_translate_colorswap(surface_format, false) <= 1)
extra_channel = desc->nr_channels - 1;
else
extra_channel = 0;
LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
LLVMValueRef temp_ptr, temp_ptr2 = NULL;
unsigned chan, chan_index;
- boolean is_vec_store = FALSE;
+ bool is_vec_store = false;
enum tgsi_opcode_type dtype = tgsi_opcode_infer_dst_type(inst->Instruction.Opcode);
if (dst[0]) {
struct lp_build_tgsi_context * bld_base = &ctx->soa.bld_base;
- type.floating = TRUE;
- type.fixed = FALSE;
- type.sign = TRUE;
- type.norm = FALSE;
+ type.floating = true;
+ type.fixed = false;
+ type.sign = true;
+ type.norm = false;
type.width = 32;
type.length = 1;
return NULL;
if (info.drm_major < 3)
- dec->use_legacy = TRUE;
+ dec->use_legacy = true;
dec->base = *templ;
dec->base.context = context;