unsigned r600_get_clock_crystal_freq(struct radeon *radeon);
unsigned r600_get_minor_version(struct radeon *radeon);
unsigned r600_get_num_backends(struct radeon *radeon);
+unsigned r600_get_num_tile_pipes(struct radeon *radeon);
+unsigned r600_get_backend_map(struct radeon *radeon);
/* r600_bo.c */
struct r600_bo;
u32 *pm4;
struct list_head query_list;
unsigned num_query_running;
+ unsigned backend_mask;
struct list_head fenced_bo;
unsigned max_db; /* for OQ */
unsigned num_dest_buffers;
struct r600_bo *indices;
};
+void r600_get_backend_mask(struct r600_context *ctx);
int r600_context_init(struct r600_context *ctx, struct radeon *radeon);
void r600_context_fini(struct r600_context *ctx);
void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state);
#define RADEON_INFO_NUM_BACKENDS 0xa
#endif
+#ifndef RADEON_INFO_NUM_TILE_PIPES
+#define RADEON_INFO_NUM_TILE_PIPES 0xb
+#endif
+
+#ifndef RADEON_INFO_BACKEND_MAP
+#define RADEON_INFO_BACKEND_MAP 0xd
+#endif
+
enum radeon_family r600_get_family(struct radeon *r600)
{
return r600->family;
return radeon->num_backends;
}
+unsigned r600_get_num_tile_pipes(struct radeon *radeon)
+{
+ return radeon->num_tile_pipes;
+}
+
+unsigned r600_get_backend_map(struct radeon *radeon)
+{
+ return radeon->backend_map;
+}
+
unsigned r600_get_minor_version(struct radeon *radeon)
{
return radeon->minor_version;
return 0;
}
+static int radeon_get_num_tile_pipes(struct radeon *radeon)
+{
+ struct drm_radeon_info info = {};
+ uint32_t num_tile_pipes = 0;
+ int r;
+
+ info.request = RADEON_INFO_NUM_TILE_PIPES;
+ info.value = (uintptr_t)&num_tile_pipes;
+ r = drmCommandWriteRead(radeon->fd, DRM_RADEON_INFO, &info,
+ sizeof(struct drm_radeon_info));
+ if (r)
+ return r;
+
+ radeon->num_tile_pipes = num_tile_pipes;
+ return 0;
+}
+
+static int radeon_get_backend_map(struct radeon *radeon)
+{
+ struct drm_radeon_info info = {};
+ uint32_t backend_map = 0;
+ int r;
+
+ info.request = RADEON_INFO_BACKEND_MAP;
+ info.value = (uintptr_t)&backend_map;
+ r = drmCommandWriteRead(radeon->fd, DRM_RADEON_INFO, &info,
+ sizeof(struct drm_radeon_info));
+ if (r)
+ return r;
+
+ radeon->backend_map = backend_map;
+ radeon->backend_map_valid = TRUE;
+
+ return 0;
+}
+
static int radeon_init_fence(struct radeon *radeon)
{
if (radeon->minor_version >= 9)
radeon_get_num_backends(radeon);
+ if (radeon->minor_version >= 11) {
+ radeon_get_num_tile_pipes(radeon);
+ radeon_get_backend_map(radeon);
+ }
+
radeon->bomgr = r600_bomgr_create(radeon, 1000000);
if (radeon->bomgr == NULL) {
return NULL;
#define GROUP_FORCE_NEW_BLOCK 0
+/* Get backends mask */
+void r600_get_backend_mask(struct r600_context *ctx)
+{
+ struct r600_bo * buffer;
+ u32 * results;
+ unsigned num_backends = r600_get_num_backends(ctx->radeon);
+ unsigned i, mask = 0;
+
+ /* if backend_map query is supported by the kernel */
+ if (ctx->radeon->backend_map_valid) {
+ unsigned num_tile_pipes = r600_get_num_tile_pipes(ctx->radeon);
+ unsigned backend_map = r600_get_backend_map(ctx->radeon);
+ unsigned item_width, item_mask;
+
+ if (ctx->radeon->chip_class >= EVERGREEN) {
+ item_width = 4;
+ item_mask = 0x7;
+ } else {
+ item_width = 2;
+ item_mask = 0x3;
+ }
+
+ while(num_tile_pipes--) {
+ i = backend_map & item_mask;
+ mask |= (1<<i);
+ backend_map >>= item_width;
+ }
+ if (mask != 0) {
+ ctx->backend_mask = mask;
+ return;
+ }
+ }
+
+ /* otherwise backup path for older kernels */
+
+ /* create buffer for event data */
+ buffer = r600_bo(ctx->radeon, ctx->max_db*16, 1, 0,
+ PIPE_USAGE_STAGING);
+ if (!buffer)
+ goto err;
+
+ /* initialize buffer with zeroes */
+ results = r600_bo_map(ctx->radeon, buffer, PB_USAGE_CPU_WRITE, NULL);
+ if (results) {
+ memset(results, 0, ctx->max_db * 4 * 4);
+ r600_bo_unmap(ctx->radeon, buffer);
+
+ /* emit EVENT_WRITE for ZPASS_DONE */
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
+ ctx->pm4[ctx->pm4_cdwords++] = 0;
+ ctx->pm4[ctx->pm4_cdwords++] = 0;
+
+ ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, 0);
+ ctx->pm4[ctx->pm4_cdwords++] = 0;
+ r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], buffer);
+
+ /* execute */
+ r600_context_flush(ctx);
+
+ /* analyze results */
+ results = r600_bo_map(ctx->radeon, buffer, PB_USAGE_CPU_READ, NULL);
+ if (results) {
+ for(i = 0; i < ctx->max_db; i++) {
+ /* at least highest bit will be set if backend is used */
+ if (results[i*4 + 1])
+ mask |= (1<<i);
+ }
+ r600_bo_unmap(ctx->radeon, buffer);
+ }
+ }
+
+ r600_bo_reference(ctx->radeon, &buffer, NULL);
+
+ if (mask != 0) {
+ ctx->backend_mask = mask;
+ return;
+ }
+
+err:
+ /* fallback to old method - set num_backends lower bits to 1 */
+ ctx->backend_mask = (~((u32)0))>>(32-num_backends);
+ return;
+}
+
static inline void r600_context_ps_partial_flush(struct r600_context *ctx)
{
if (!(ctx->flags & R600_CONTEXT_DRAW_PENDING))
ctx->max_db = 4;
+ r600_get_backend_mask(ctx);
+
return 0;
out_err:
r600_context_fini(ctx);
void r600_query_begin(struct r600_context *ctx, struct r600_query *query)
{
unsigned required_space, new_results_end;
- int num_backends = r600_get_num_backends(ctx->radeon);
/* query request needs 6/8 dwords for begin + 6/8 dwords for end */
if (query->type == PIPE_QUERY_TIME_ELAPSED)
memset(results, 0, query->result_size);
/* Set top bits for unused backends */
- for (i = num_backends; i < ctx->max_db; i++) {
- results[(i * 4)+1] = 0x80000000;
- results[(i * 4)+3] = 0x80000000;
+ for (i = 0; i < ctx->max_db; i++) {
+ if (!(ctx->backend_mask & (1<<i))) {
+ results[(i * 4)+1] = 0x80000000;
+ results[(i * 4)+3] = 0x80000000;
+ }
}
r600_bo_unmap(ctx->radeon, query->buffer);
}