/*
* Copyright 2012 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Christian König <christian.koenig@amd.com>
*/
-#include "radeon/r600_cs.h"
-#include "util/u_memory.h"
#include "si_pipe.h"
#include "sid.h"
-
-#define NUMBER_OF_STATES (sizeof(union si_state) / sizeof(struct si_pm4_state *))
+#include "util/u_memory.h"
void si_pm4_cmd_begin(struct si_pm4_state *state, unsigned opcode)
{
- state->last_opcode = opcode;
- state->last_pm4 = state->ndw++;
+ state->last_opcode = opcode;
+ state->last_pm4 = state->ndw++;
}
void si_pm4_cmd_add(struct si_pm4_state *state, uint32_t dw)
{
- state->pm4[state->ndw++] = dw;
+ state->pm4[state->ndw++] = dw;
}
void si_pm4_cmd_end(struct si_pm4_state *state, bool predicate)
{
- unsigned count;
- count = state->ndw - state->last_pm4 - 2;
- state->pm4[state->last_pm4] =
- PKT3(state->last_opcode, count, predicate)
- | PKT3_SHADER_TYPE_S(state->compute_pkt);
+ unsigned count;
+ count = state->ndw - state->last_pm4 - 2;
+ state->pm4[state->last_pm4] = PKT3(state->last_opcode, count, predicate);
- assert(state->ndw <= SI_PM4_MAX_DW);
+ assert(state->ndw <= SI_PM4_MAX_DW);
}
void si_pm4_set_reg(struct si_pm4_state *state, unsigned reg, uint32_t val)
{
- unsigned opcode;
+ unsigned opcode;
- if (reg >= SI_CONFIG_REG_OFFSET && reg < SI_CONFIG_REG_END) {
- opcode = PKT3_SET_CONFIG_REG;
- reg -= SI_CONFIG_REG_OFFSET;
+ if (reg >= SI_CONFIG_REG_OFFSET && reg < SI_CONFIG_REG_END) {
+ opcode = PKT3_SET_CONFIG_REG;
+ reg -= SI_CONFIG_REG_OFFSET;
- } else if (reg >= SI_SH_REG_OFFSET && reg < SI_SH_REG_END) {
- opcode = PKT3_SET_SH_REG;
- reg -= SI_SH_REG_OFFSET;
+ } else if (reg >= SI_SH_REG_OFFSET && reg < SI_SH_REG_END) {
+ opcode = PKT3_SET_SH_REG;
+ reg -= SI_SH_REG_OFFSET;
- } else if (reg >= SI_CONTEXT_REG_OFFSET && reg < SI_CONTEXT_REG_END) {
- opcode = PKT3_SET_CONTEXT_REG;
- reg -= SI_CONTEXT_REG_OFFSET;
+ } else if (reg >= SI_CONTEXT_REG_OFFSET && reg < SI_CONTEXT_REG_END) {
+ opcode = PKT3_SET_CONTEXT_REG;
+ reg -= SI_CONTEXT_REG_OFFSET;
- } else if (reg >= CIK_UCONFIG_REG_OFFSET && reg < CIK_UCONFIG_REG_END) {
- opcode = PKT3_SET_UCONFIG_REG;
- reg -= CIK_UCONFIG_REG_OFFSET;
+ } else if (reg >= CIK_UCONFIG_REG_OFFSET && reg < CIK_UCONFIG_REG_END) {
+ opcode = PKT3_SET_UCONFIG_REG;
+ reg -= CIK_UCONFIG_REG_OFFSET;
- } else {
- R600_ERR("Invalid register offset %08x!\n", reg);
- return;
- }
+ } else {
+ PRINT_ERR("Invalid register offset %08x!\n", reg);
+ return;
+ }
- reg >>= 2;
+ reg >>= 2;
- if (opcode != state->last_opcode || reg != (state->last_reg + 1)) {
- si_pm4_cmd_begin(state, opcode);
- si_pm4_cmd_add(state, reg);
- }
+ if (opcode != state->last_opcode || reg != (state->last_reg + 1)) {
+ si_pm4_cmd_begin(state, opcode);
+ si_pm4_cmd_add(state, reg);
+ }
- state->last_reg = reg;
- si_pm4_cmd_add(state, val);
- si_pm4_cmd_end(state, false);
+ state->last_reg = reg;
+ si_pm4_cmd_add(state, val);
+ si_pm4_cmd_end(state, false);
}
-void si_pm4_add_bo(struct si_pm4_state *state,
- struct r600_resource *bo,
- enum radeon_bo_usage usage,
- enum radeon_bo_priority priority)
+void si_pm4_add_bo(struct si_pm4_state *state, struct si_resource *bo, enum radeon_bo_usage usage,
+ enum radeon_bo_priority priority)
{
- unsigned idx = state->nbo++;
- assert(idx < SI_PM4_MAX_BO);
+ unsigned idx = state->nbo++;
+ assert(idx < SI_PM4_MAX_BO);
- r600_resource_reference(&state->bo[idx], bo);
- state->bo_usage[idx] = usage;
- state->bo_priority[idx] = priority;
+ si_resource_reference(&state->bo[idx], bo);
+ state->bo_usage[idx] = usage;
+ state->bo_priority[idx] = priority;
}
-void si_pm4_free_state(struct si_context *sctx,
- struct si_pm4_state *state,
- unsigned idx)
+void si_pm4_clear_state(struct si_pm4_state *state)
{
- if (state == NULL)
- return;
-
- if (idx != ~0 && sctx->emitted.array[idx] == state) {
- sctx->emitted.array[idx] = NULL;
- }
-
- for (int i = 0; i < state->nbo; ++i) {
- r600_resource_reference(&state->bo[i], NULL);
- }
- FREE(state);
+ for (int i = 0; i < state->nbo; ++i)
+ si_resource_reference(&state->bo[i], NULL);
+ si_resource_reference(&state->indirect_buffer, NULL);
+ state->nbo = 0;
+ state->ndw = 0;
}
-unsigned si_pm4_dirty_dw(struct si_context *sctx)
+void si_pm4_free_state(struct si_context *sctx, struct si_pm4_state *state, unsigned idx)
{
- unsigned count = 0;
+ if (!state)
+ return;
- for (int i = 0; i < NUMBER_OF_STATES; ++i) {
- struct si_pm4_state *state = sctx->queued.array[i];
+ if (idx != ~0 && sctx->emitted.array[idx] == state) {
+ sctx->emitted.array[idx] = NULL;
+ }
- if (!state || sctx->emitted.array[i] == state)
- continue;
-
- count += state->ndw;
-#if SI_TRACE_CS
- /* for tracing each states */
- if (sctx->screen->b.trace_bo) {
- count += SI_TRACE_CS_DWORDS;
- }
-#endif
- }
-
- return count;
+ si_pm4_clear_state(state);
+ FREE(state);
}
void si_pm4_emit(struct si_context *sctx, struct si_pm4_state *state)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
- for (int i = 0; i < state->nbo; ++i) {
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, state->bo[i],
- state->bo_usage[i], state->bo_priority[i]);
- }
+ for (int i = 0; i < state->nbo; ++i) {
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs, state->bo[i], state->bo_usage[i],
+ state->bo_priority[i]);
+ }
- memcpy(&cs->buf[cs->cdw], state->pm4, state->ndw * 4);
- cs->cdw += state->ndw;
+ if (!state->indirect_buffer) {
+ radeon_emit_array(cs, state->pm4, state->ndw);
+ } else {
+ struct si_resource *ib = state->indirect_buffer;
-#if SI_TRACE_CS
- if (sctx->screen->b.trace_bo) {
- si_trace_emit(sctx);
- }
-#endif
-}
-
-void si_pm4_emit_dirty(struct si_context *sctx)
-{
- for (int i = 0; i < NUMBER_OF_STATES; ++i) {
- struct si_pm4_state *state = sctx->queued.array[i];
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs, ib, RADEON_USAGE_READ, RADEON_PRIO_IB2);
- if (!state || sctx->emitted.array[i] == state)
- continue;
+ radeon_emit(cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
+ radeon_emit(cs, ib->gpu_address);
+ radeon_emit(cs, ib->gpu_address >> 32);
+ radeon_emit(cs, (ib->b.b.width0 >> 2) & 0xfffff);
+ }
- assert(state != sctx->queued.named.init);
- si_pm4_emit(sctx, state);
- sctx->emitted.array[i] = state;
- }
+ if (state->atom.emit)
+ state->atom.emit(sctx);
}
void si_pm4_reset_emitted(struct si_context *sctx)
{
- memset(&sctx->emitted, 0, sizeof(sctx->emitted));
+ memset(&sctx->emitted, 0, sizeof(sctx->emitted));
+ sctx->dirty_states |= u_bit_consecutive(0, SI_NUM_STATES);
}
-void si_pm4_cleanup(struct si_context *sctx)
+void si_pm4_upload_indirect_buffer(struct si_context *sctx, struct si_pm4_state *state)
{
- for (int i = 0; i < NUMBER_OF_STATES; ++i) {
- si_pm4_free_state(sctx, sctx->queued.array[i], i);
- }
+ struct pipe_screen *screen = sctx->b.screen;
+ unsigned aligned_ndw = align(state->ndw, 8);
+
+ /* only supported on GFX7 and later */
+ if (sctx->chip_class < GFX7)
+ return;
+
+ assert(state->ndw);
+ assert(aligned_ndw <= SI_PM4_MAX_DW);
+
+ si_resource_reference(&state->indirect_buffer, NULL);
+ /* TODO: this hangs with 1024 or higher alignment on GFX9. */
+ state->indirect_buffer =
+ si_aligned_buffer_create(screen, 0, PIPE_USAGE_DEFAULT, aligned_ndw * 4, 256);
+ if (!state->indirect_buffer)
+ return;
+
+ /* Pad the IB to 8 DWs to meet CP fetch alignment requirements. */
+ if (sctx->screen->info.gfx_ib_pad_with_type2) {
+ for (int i = state->ndw; i < aligned_ndw; i++)
+ state->pm4[i] = 0x80000000; /* type2 nop packet */
+ } else {
+ for (int i = state->ndw; i < aligned_ndw; i++)
+ state->pm4[i] = 0xffff1000; /* type3 nop packet */
+ }
+
+ pipe_buffer_write(&sctx->b, &state->indirect_buffer->b.b, 0, aligned_ndw * 4, state->pm4);
}