2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
27 #include "radeon/r600_cs.h"
28 #include "util/u_memory.h"
32 void si_pm4_cmd_begin(struct si_pm4_state
*state
, unsigned opcode
)
34 state
->last_opcode
= opcode
;
35 state
->last_pm4
= state
->ndw
++;
38 void si_pm4_cmd_add(struct si_pm4_state
*state
, uint32_t dw
)
40 state
->pm4
[state
->ndw
++] = dw
;
43 void si_pm4_cmd_end(struct si_pm4_state
*state
, bool predicate
)
46 count
= state
->ndw
- state
->last_pm4
- 2;
47 state
->pm4
[state
->last_pm4
] =
48 PKT3(state
->last_opcode
, count
, predicate
)
49 | PKT3_SHADER_TYPE_S(state
->compute_pkt
);
51 assert(state
->ndw
<= SI_PM4_MAX_DW
);
54 void si_pm4_set_reg(struct si_pm4_state
*state
, unsigned reg
, uint32_t val
)
58 if (reg
>= SI_CONFIG_REG_OFFSET
&& reg
< SI_CONFIG_REG_END
) {
59 opcode
= PKT3_SET_CONFIG_REG
;
60 reg
-= SI_CONFIG_REG_OFFSET
;
62 } else if (reg
>= SI_SH_REG_OFFSET
&& reg
< SI_SH_REG_END
) {
63 opcode
= PKT3_SET_SH_REG
;
64 reg
-= SI_SH_REG_OFFSET
;
66 } else if (reg
>= SI_CONTEXT_REG_OFFSET
&& reg
< SI_CONTEXT_REG_END
) {
67 opcode
= PKT3_SET_CONTEXT_REG
;
68 reg
-= SI_CONTEXT_REG_OFFSET
;
70 } else if (reg
>= CIK_UCONFIG_REG_OFFSET
&& reg
< CIK_UCONFIG_REG_END
) {
71 opcode
= PKT3_SET_UCONFIG_REG
;
72 reg
-= CIK_UCONFIG_REG_OFFSET
;
75 R600_ERR("Invalid register offset %08x!\n", reg
);
81 if (opcode
!= state
->last_opcode
|| reg
!= (state
->last_reg
+ 1)) {
82 si_pm4_cmd_begin(state
, opcode
);
83 si_pm4_cmd_add(state
, reg
);
86 state
->last_reg
= reg
;
87 si_pm4_cmd_add(state
, val
);
88 si_pm4_cmd_end(state
, false);
91 void si_pm4_add_bo(struct si_pm4_state
*state
,
92 struct r600_resource
*bo
,
93 enum radeon_bo_usage usage
,
94 enum radeon_bo_priority priority
)
96 unsigned idx
= state
->nbo
++;
97 assert(idx
< SI_PM4_MAX_BO
);
99 r600_resource_reference(&state
->bo
[idx
], bo
);
100 state
->bo_usage
[idx
] = usage
;
101 state
->bo_priority
[idx
] = priority
;
104 void si_pm4_clear_state(struct si_pm4_state
*state
)
106 for (int i
= 0; i
< state
->nbo
; ++i
)
107 r600_resource_reference(&state
->bo
[i
], NULL
);
108 r600_resource_reference(&state
->indirect_buffer
, NULL
);
113 void si_pm4_free_state(struct si_context
*sctx
,
114 struct si_pm4_state
*state
,
120 if (idx
!= ~0 && sctx
->emitted
.array
[idx
] == state
) {
121 sctx
->emitted
.array
[idx
] = NULL
;
124 si_pm4_clear_state(state
);
128 void si_pm4_emit(struct si_context
*sctx
, struct si_pm4_state
*state
)
130 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
132 for (int i
= 0; i
< state
->nbo
; ++i
) {
133 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, state
->bo
[i
],
134 state
->bo_usage
[i
], state
->bo_priority
[i
]);
137 if (!state
->indirect_buffer
) {
138 radeon_emit_array(cs
, state
->pm4
, state
->ndw
);
140 struct r600_resource
*ib
= state
->indirect_buffer
;
142 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, ib
,
146 radeon_emit(cs
, PKT3(PKT3_INDIRECT_BUFFER_CIK
, 2, 0));
147 radeon_emit(cs
, ib
->gpu_address
);
148 radeon_emit(cs
, ib
->gpu_address
>> 32);
149 radeon_emit(cs
, (ib
->b
.b
.width0
>> 2) & 0xfffff);
153 void si_pm4_reset_emitted(struct si_context
*sctx
)
155 memset(&sctx
->emitted
, 0, sizeof(sctx
->emitted
));
156 sctx
->dirty_states
|= u_bit_consecutive(0, SI_NUM_STATES
);
159 void si_pm4_upload_indirect_buffer(struct si_context
*sctx
,
160 struct si_pm4_state
*state
)
162 struct pipe_screen
*screen
= sctx
->b
.b
.screen
;
163 unsigned aligned_ndw
= align(state
->ndw
, 8);
165 /* only supported on CIK and later */
166 if (sctx
->b
.chip_class
< CIK
)
170 assert(aligned_ndw
<= SI_PM4_MAX_DW
);
172 r600_resource_reference(&state
->indirect_buffer
, NULL
);
173 state
->indirect_buffer
= (struct r600_resource
*)
174 pipe_buffer_create(screen
, 0,
175 PIPE_USAGE_DEFAULT
, aligned_ndw
* 4);
176 if (!state
->indirect_buffer
)
179 /* Pad the IB to 8 DWs to meet CP fetch alignment requirements. */
180 if (sctx
->screen
->b
.info
.gfx_ib_pad_with_type2
) {
181 for (int i
= state
->ndw
; i
< aligned_ndw
; i
++)
182 state
->pm4
[i
] = 0x80000000; /* type2 nop packet */
184 for (int i
= state
->ndw
; i
< aligned_ndw
; i
++)
185 state
->pm4
[i
] = 0xffff1000; /* type3 nop packet */
188 pipe_buffer_write(&sctx
->b
.b
, &state
->indirect_buffer
->b
.b
,
189 0, aligned_ndw
*4, state
->pm4
);