-/* Get backends mask */
-void r600_get_backend_mask(struct r600_context *ctx)
-{
- struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
- struct r600_resource *buffer;
- uint32_t *results;
- unsigned num_backends = ctx->screen->info.r600_num_backends;
- unsigned i, mask = 0;
- uint64_t va;
-
- /* if backend_map query is supported by the kernel */
- if (ctx->screen->info.r600_backend_map_valid) {
- unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes;
- unsigned backend_map = ctx->screen->info.r600_backend_map;
- unsigned item_width, item_mask;
-
- if (ctx->chip_class >= EVERGREEN) {
- item_width = 4;
- item_mask = 0x7;
- } else {
- item_width = 2;
- item_mask = 0x3;
- }
-
- while(num_tile_pipes--) {
- i = backend_map & item_mask;
- mask |= (1<<i);
- backend_map >>= item_width;
- }
- if (mask != 0) {
- ctx->backend_mask = mask;
- return;
- }
- }
-
- /* otherwise backup path for older kernels */
-
- /* create buffer for event data */
- buffer = (struct r600_resource*)
- pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM,
- PIPE_USAGE_STAGING, ctx->max_db*16);
- if (!buffer)
- goto err;
- va = r600_resource_va(&ctx->screen->screen, (void*)buffer);
-
- /* initialize buffer with zeroes */
- results = r600_buffer_mmap_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
- if (results) {
- memset(results, 0, ctx->max_db * 4 * 4);
- ctx->ws->buffer_unmap(buffer->cs_buf);
-
- /* emit EVENT_WRITE for ZPASS_DONE */
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
- cs->buf[cs->cdw++] = va;
- cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
-
- cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
- cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, &ctx->rings.gfx, buffer, RADEON_USAGE_WRITE);
-
- /* analyze results */
- results = r600_buffer_mmap_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
- if (results) {
- for(i = 0; i < ctx->max_db; i++) {
- /* at least highest bit will be set if backend is used */
- if (results[i*4 + 1])
- mask |= (1<<i);
- }
- ctx->ws->buffer_unmap(buffer->cs_buf);
- }
- }
-
- pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
-
- if (mask != 0) {
- ctx->backend_mask = mask;
- return;
- }
-
-err:
- /* fallback to old method - set num_backends lower bits to 1 */
- ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
- return;
-}
-
-static void r600_init_block(struct r600_context *ctx,
- struct r600_block *block,
- const struct r600_reg *reg, int index, int nreg,
- unsigned opcode, unsigned offset_base)
-{
- int i = index;
- int j, n = nreg;
-
- /* initialize block */
- block->flags = 0;
- block->status |= R600_BLOCK_STATUS_DIRTY; /* dirty all blocks at start */
- block->start_offset = reg[i].offset;
- block->pm4[block->pm4_ndwords++] = PKT3(opcode, n, 0);
- block->pm4[block->pm4_ndwords++] = (block->start_offset - offset_base) >> 2;
- block->reg = &block->pm4[block->pm4_ndwords];
- block->pm4_ndwords += n;
- block->nreg = n;
- block->nreg_dirty = n;
- LIST_INITHEAD(&block->list);
- LIST_INITHEAD(&block->enable_list);
-
- for (j = 0; j < n; j++) {
- if (reg[i+j].flags & REG_FLAG_DIRTY_ALWAYS) {
- block->flags |= REG_FLAG_DIRTY_ALWAYS;
- }
- if (reg[i+j].flags & REG_FLAG_ENABLE_ALWAYS) {
- if (!(block->status & R600_BLOCK_STATUS_ENABLED)) {
- block->status |= R600_BLOCK_STATUS_ENABLED;
- LIST_ADDTAIL(&block->enable_list, &ctx->enable_list);
- LIST_ADDTAIL(&block->list,&ctx->dirty);
- }
- }
- if (reg[i+j].flags & REG_FLAG_FLUSH_CHANGE) {
- block->flags |= REG_FLAG_FLUSH_CHANGE;
- }
-
- if (reg[i+j].flags & REG_FLAG_NEED_BO) {
- block->nbo++;
- assert(block->nbo < R600_BLOCK_MAX_BO);
- block->pm4_bo_index[j] = block->nbo;
- block->pm4[block->pm4_ndwords++] = PKT3(PKT3_NOP, 0, 0);
- block->pm4[block->pm4_ndwords++] = 0x00000000;
- block->reloc[block->nbo].bo_pm4_index = block->pm4_ndwords - 1;
- }
- }
- /* check that we stay in limit */
- assert(block->pm4_ndwords < R600_BLOCK_MAX_REG);
-}
-
-int r600_context_add_block(struct r600_context *ctx, const struct r600_reg *reg, unsigned nreg,
- unsigned opcode, unsigned offset_base)
-{
- struct r600_block *block;
- struct r600_range *range;
- int offset;
-
- for (unsigned i = 0, n = 0; i < nreg; i += n) {
- /* ignore new block balise */
- if (reg[i].offset == GROUP_FORCE_NEW_BLOCK) {
- n = 1;
- continue;
- }
-
- /* register that need relocation are in their own group */
- /* find number of consecutive registers */
- n = 0;
- offset = reg[i].offset;
- while (reg[i + n].offset == offset) {
- n++;
- offset += 4;
- if ((n + i) >= nreg)
- break;
- if (n >= (R600_BLOCK_MAX_REG - 2))
- break;
- }
-
- /* allocate new block */
- block = calloc(1, sizeof(struct r600_block));
- if (block == NULL) {
- return -ENOMEM;
- }
- ctx->nblocks++;
- for (int j = 0; j < n; j++) {
- range = &ctx->range[CTX_RANGE_ID(reg[i + j].offset)];
- /* create block table if it doesn't exist */
- if (!range->blocks)
- range->blocks = calloc(1 << HASH_SHIFT, sizeof(void *));
- if (!range->blocks) {
- free(block);
- return -1;
- }
-
- range->blocks[CTX_BLOCK_ID(reg[i + j].offset)] = block;
- }
-
- r600_init_block(ctx, block, reg, i, n, opcode, offset_base);
-
- }
- return 0;
-}
-
-static const struct r600_reg r600_context_reg_list[] = {
- {R_028D24_DB_HTILE_SURFACE, 0, 0},
- {R_028614_SPI_VS_OUT_ID_0, 0, 0},
- {R_028618_SPI_VS_OUT_ID_1, 0, 0},
- {R_02861C_SPI_VS_OUT_ID_2, 0, 0},
- {R_028620_SPI_VS_OUT_ID_3, 0, 0},
- {R_028624_SPI_VS_OUT_ID_4, 0, 0},
- {R_028628_SPI_VS_OUT_ID_5, 0, 0},
- {R_02862C_SPI_VS_OUT_ID_6, 0, 0},
- {R_028630_SPI_VS_OUT_ID_7, 0, 0},
- {R_028634_SPI_VS_OUT_ID_8, 0, 0},
- {R_028638_SPI_VS_OUT_ID_9, 0, 0},
- {R_0286C4_SPI_VS_OUT_CONFIG, 0, 0},
- {GROUP_FORCE_NEW_BLOCK, 0, 0},
- {R_028858_SQ_PGM_START_VS, REG_FLAG_NEED_BO, 0},
- {GROUP_FORCE_NEW_BLOCK, 0, 0},
- {R_028868_SQ_PGM_RESOURCES_VS, 0, 0},
- {GROUP_FORCE_NEW_BLOCK, 0, 0},
- {R_0288A4_SQ_PGM_RESOURCES_FS, 0, 0},
- {R_0288DC_SQ_PGM_CF_OFFSET_FS, 0, 0},
- {R_028644_SPI_PS_INPUT_CNTL_0, 0, 0},
- {R_028648_SPI_PS_INPUT_CNTL_1, 0, 0},
- {R_02864C_SPI_PS_INPUT_CNTL_2, 0, 0},
- {R_028650_SPI_PS_INPUT_CNTL_3, 0, 0},
- {R_028654_SPI_PS_INPUT_CNTL_4, 0, 0},
- {R_028658_SPI_PS_INPUT_CNTL_5, 0, 0},
- {R_02865C_SPI_PS_INPUT_CNTL_6, 0, 0},
- {R_028660_SPI_PS_INPUT_CNTL_7, 0, 0},
- {R_028664_SPI_PS_INPUT_CNTL_8, 0, 0},
- {R_028668_SPI_PS_INPUT_CNTL_9, 0, 0},
- {R_02866C_SPI_PS_INPUT_CNTL_10, 0, 0},
- {R_028670_SPI_PS_INPUT_CNTL_11, 0, 0},
- {R_028674_SPI_PS_INPUT_CNTL_12, 0, 0},
- {R_028678_SPI_PS_INPUT_CNTL_13, 0, 0},
- {R_02867C_SPI_PS_INPUT_CNTL_14, 0, 0},
- {R_028680_SPI_PS_INPUT_CNTL_15, 0, 0},
- {R_028684_SPI_PS_INPUT_CNTL_16, 0, 0},
- {R_028688_SPI_PS_INPUT_CNTL_17, 0, 0},
- {R_02868C_SPI_PS_INPUT_CNTL_18, 0, 0},
- {R_028690_SPI_PS_INPUT_CNTL_19, 0, 0},
- {R_028694_SPI_PS_INPUT_CNTL_20, 0, 0},
- {R_028698_SPI_PS_INPUT_CNTL_21, 0, 0},
- {R_02869C_SPI_PS_INPUT_CNTL_22, 0, 0},
- {R_0286A0_SPI_PS_INPUT_CNTL_23, 0, 0},
- {R_0286A4_SPI_PS_INPUT_CNTL_24, 0, 0},
- {R_0286A8_SPI_PS_INPUT_CNTL_25, 0, 0},
- {R_0286AC_SPI_PS_INPUT_CNTL_26, 0, 0},
- {R_0286B0_SPI_PS_INPUT_CNTL_27, 0, 0},
- {R_0286B4_SPI_PS_INPUT_CNTL_28, 0, 0},
- {R_0286B8_SPI_PS_INPUT_CNTL_29, 0, 0},
- {R_0286BC_SPI_PS_INPUT_CNTL_30, 0, 0},
- {R_0286C0_SPI_PS_INPUT_CNTL_31, 0, 0},
- {R_0286CC_SPI_PS_IN_CONTROL_0, 0, 0},
- {R_0286D0_SPI_PS_IN_CONTROL_1, 0, 0},
- {R_0286D8_SPI_INPUT_Z, 0, 0},
- {GROUP_FORCE_NEW_BLOCK, 0, 0},
- {R_028840_SQ_PGM_START_PS, REG_FLAG_NEED_BO, 0},
- {GROUP_FORCE_NEW_BLOCK, 0, 0},
- {R_028850_SQ_PGM_RESOURCES_PS, 0, 0},
- {R_028854_SQ_PGM_EXPORTS_PS, 0, 0},
-};
-
-/* initialize */
-void r600_context_fini(struct r600_context *ctx)
-{
- struct r600_block *block;
- struct r600_range *range;
-
- if (ctx->range) {
- for (int i = 0; i < NUM_RANGES; i++) {
- if (!ctx->range[i].blocks)
- continue;
- for (int j = 0; j < (1 << HASH_SHIFT); j++) {
- block = ctx->range[i].blocks[j];
- if (block) {
- for (int k = 0, offset = block->start_offset; k < block->nreg; k++, offset += 4) {
- range = &ctx->range[CTX_RANGE_ID(offset)];
- range->blocks[CTX_BLOCK_ID(offset)] = NULL;
- }
- for (int k = 1; k <= block->nbo; k++) {
- pipe_resource_reference((struct pipe_resource**)&block->reloc[k].bo, NULL);
- }
- free(block);
- }
- }
- free(ctx->range[i].blocks);
- }
- }
- free(ctx->blocks);
-}
-
-int r600_setup_block_table(struct r600_context *ctx)
-{
- /* setup block table */
- int c = 0;
- ctx->blocks = calloc(ctx->nblocks, sizeof(void*));
- if (!ctx->blocks)
- return -ENOMEM;
- for (int i = 0; i < NUM_RANGES; i++) {
- if (!ctx->range[i].blocks)
- continue;
- for (int j = 0, add; j < (1 << HASH_SHIFT); j++) {
- if (!ctx->range[i].blocks[j])
- continue;
-
- add = 1;
- for (int k = 0; k < c; k++) {
- if (ctx->blocks[k] == ctx->range[i].blocks[j]) {
- add = 0;
- break;
- }
- }
- if (add) {
- assert(c < ctx->nblocks);
- ctx->blocks[c++] = ctx->range[i].blocks[j];
- j += (ctx->range[i].blocks[j]->nreg) - 1;
- }
- }
- }
- return 0;
-}
-
-int r600_context_init(struct r600_context *ctx)
-{
- int r;
-
- /* add blocks */
- r = r600_context_add_block(ctx, r600_context_reg_list,
- Elements(r600_context_reg_list), PKT3_SET_CONTEXT_REG, R600_CONTEXT_REG_OFFSET);
- if (r)
- goto out_err;
-
- r = r600_setup_block_table(ctx);
- if (r)
- goto out_err;
-
- ctx->max_db = 4;
- return 0;
-out_err:
- r600_context_fini(ctx);
- return r;
-}