2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
27 #include "radeon/r600_cs.h"
28 #include "util/u_memory.h"
32 #define NUMBER_OF_STATES (sizeof(union si_state) / sizeof(struct si_pm4_state *))
34 void si_pm4_cmd_begin(struct si_pm4_state
*state
, unsigned opcode
)
36 state
->last_opcode
= opcode
;
37 state
->last_pm4
= state
->ndw
++;
40 void si_pm4_cmd_add(struct si_pm4_state
*state
, uint32_t dw
)
42 state
->pm4
[state
->ndw
++] = dw
;
45 void si_pm4_cmd_end(struct si_pm4_state
*state
, bool predicate
)
48 count
= state
->ndw
- state
->last_pm4
- 2;
49 state
->pm4
[state
->last_pm4
] =
50 PKT3(state
->last_opcode
, count
, predicate
)
51 | PKT3_SHADER_TYPE_S(state
->compute_pkt
);
53 assert(state
->ndw
<= SI_PM4_MAX_DW
);
56 void si_pm4_set_reg(struct si_pm4_state
*state
, unsigned reg
, uint32_t val
)
60 if (reg
>= SI_CONFIG_REG_OFFSET
&& reg
< SI_CONFIG_REG_END
) {
61 opcode
= PKT3_SET_CONFIG_REG
;
62 reg
-= SI_CONFIG_REG_OFFSET
;
64 } else if (reg
>= SI_SH_REG_OFFSET
&& reg
< SI_SH_REG_END
) {
65 opcode
= PKT3_SET_SH_REG
;
66 reg
-= SI_SH_REG_OFFSET
;
68 } else if (reg
>= SI_CONTEXT_REG_OFFSET
&& reg
< SI_CONTEXT_REG_END
) {
69 opcode
= PKT3_SET_CONTEXT_REG
;
70 reg
-= SI_CONTEXT_REG_OFFSET
;
72 } else if (reg
>= CIK_UCONFIG_REG_OFFSET
&& reg
< CIK_UCONFIG_REG_END
) {
73 opcode
= PKT3_SET_UCONFIG_REG
;
74 reg
-= CIK_UCONFIG_REG_OFFSET
;
77 R600_ERR("Invalid register offset %08x!\n", reg
);
83 if (opcode
!= state
->last_opcode
|| reg
!= (state
->last_reg
+ 1)) {
84 si_pm4_cmd_begin(state
, opcode
);
85 si_pm4_cmd_add(state
, reg
);
88 state
->last_reg
= reg
;
89 si_pm4_cmd_add(state
, val
);
90 si_pm4_cmd_end(state
, false);
93 void si_pm4_add_bo(struct si_pm4_state
*state
,
94 struct r600_resource
*bo
,
95 enum radeon_bo_usage usage
,
96 enum radeon_bo_priority priority
)
98 unsigned idx
= state
->nbo
++;
99 assert(idx
< SI_PM4_MAX_BO
);
101 r600_resource_reference(&state
->bo
[idx
], bo
);
102 state
->bo_usage
[idx
] = usage
;
103 state
->bo_priority
[idx
] = priority
;
106 void si_pm4_clear_state(struct si_pm4_state
*state
)
108 for (int i
= 0; i
< state
->nbo
; ++i
)
109 r600_resource_reference(&state
->bo
[i
], NULL
);
110 r600_resource_reference(&state
->indirect_buffer
, NULL
);
115 void si_pm4_free_state_simple(struct si_pm4_state
*state
)
117 si_pm4_clear_state(state
);
121 void si_pm4_free_state(struct si_context
*sctx
,
122 struct si_pm4_state
*state
,
128 if (idx
!= ~0 && sctx
->emitted
.array
[idx
] == state
) {
129 sctx
->emitted
.array
[idx
] = NULL
;
132 si_pm4_free_state_simple(state
);
135 void si_pm4_emit(struct si_context
*sctx
, struct si_pm4_state
*state
)
137 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
139 for (int i
= 0; i
< state
->nbo
; ++i
) {
140 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, state
->bo
[i
],
141 state
->bo_usage
[i
], state
->bo_priority
[i
]);
144 if (!state
->indirect_buffer
) {
145 radeon_emit_array(cs
, state
->pm4
, state
->ndw
);
147 struct r600_resource
*ib
= state
->indirect_buffer
;
149 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, ib
,
153 radeon_emit(cs
, PKT3(PKT3_INDIRECT_BUFFER_CIK
, 2, 0));
154 radeon_emit(cs
, ib
->gpu_address
);
155 radeon_emit(cs
, (ib
->gpu_address
>> 32) & 0xffff);
156 radeon_emit(cs
, (ib
->b
.b
.width0
>> 2) & 0xfffff);
160 void si_pm4_emit_dirty(struct si_context
*sctx
)
162 for (int i
= 0; i
< NUMBER_OF_STATES
; ++i
) {
163 struct si_pm4_state
*state
= sctx
->queued
.array
[i
];
165 if (!state
|| sctx
->emitted
.array
[i
] == state
)
168 si_pm4_emit(sctx
, state
);
169 sctx
->emitted
.array
[i
] = state
;
173 void si_pm4_reset_emitted(struct si_context
*sctx
)
175 memset(&sctx
->emitted
, 0, sizeof(sctx
->emitted
));
178 void si_pm4_upload_indirect_buffer(struct si_context
*sctx
,
179 struct si_pm4_state
*state
)
181 struct pipe_screen
*screen
= sctx
->b
.b
.screen
;
182 unsigned aligned_ndw
= align(state
->ndw
, 8);
184 /* only supported on CIK and later */
185 if (sctx
->b
.chip_class
< CIK
)
189 assert(aligned_ndw
<= SI_PM4_MAX_DW
);
191 r600_resource_reference(&state
->indirect_buffer
, NULL
);
192 state
->indirect_buffer
= (struct r600_resource
*)
193 pipe_buffer_create(screen
, 0,
194 PIPE_USAGE_DEFAULT
, aligned_ndw
* 4);
195 if (!state
->indirect_buffer
)
198 /* Pad the IB to 8 DWs to meet CP fetch alignment requirements. */
199 if (sctx
->screen
->b
.info
.gfx_ib_pad_with_type2
) {
200 for (int i
= state
->ndw
; i
< aligned_ndw
; i
++)
201 state
->pm4
[i
] = 0x80000000; /* type2 nop packet */
203 for (int i
= state
->ndw
; i
< aligned_ndw
; i
++)
204 state
->pm4
[i
] = 0xffff1000; /* type3 nop packet */
207 pipe_buffer_write(&sctx
->b
.b
, &state
->indirect_buffer
->b
.b
,
208 0, aligned_ndw
*4, state
->pm4
);