radeon/llvm: Add live-in registers during DAG lowering
[mesa.git] / src / gallium / drivers / radeonsi / radeonsi_pm4.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Christian König <christian.koenig@amd.com>
25 */
26
27 #include "util/u_memory.h"
28 #include "radeonsi_pipe.h"
29 #include "radeonsi_pm4.h"
30 #include "sid.h"
31 #include "r600_hw_context_priv.h"
32
33 #define NUMBER_OF_STATES (sizeof(union si_state) / sizeof(struct si_pm4_state *))
34
35 void si_pm4_cmd_begin(struct si_pm4_state *state, unsigned opcode)
36 {
37 state->last_opcode = opcode;
38 state->last_pm4 = state->ndw++;
39 }
40
41 void si_pm4_cmd_add(struct si_pm4_state *state, uint32_t dw)
42 {
43 state->pm4[state->ndw++] = dw;
44 }
45
46 void si_pm4_cmd_end(struct si_pm4_state *state, bool predicate)
47 {
48 unsigned count;
49 count = state->ndw - state->last_pm4 - 2;
50 state->pm4[state->last_pm4] = PKT3(state->last_opcode,
51 count, predicate);
52
53 assert(state->ndw <= SI_PM4_MAX_DW);
54 }
55
56 void si_pm4_set_reg(struct si_pm4_state *state, unsigned reg, uint32_t val)
57 {
58 unsigned opcode;
59
60 if (reg >= SI_CONFIG_REG_OFFSET && reg <= SI_CONFIG_REG_END) {
61 opcode = PKT3_SET_CONFIG_REG;
62 reg -= SI_CONFIG_REG_OFFSET;
63
64 } else if (reg >= SI_SH_REG_OFFSET && reg <= SI_SH_REG_END) {
65 opcode = PKT3_SET_SH_REG;
66 reg -= SI_SH_REG_OFFSET;
67
68 } else if (reg >= SI_CONTEXT_REG_OFFSET && reg <= SI_CONTEXT_REG_END) {
69 opcode = PKT3_SET_CONTEXT_REG;
70 reg -= SI_CONTEXT_REG_OFFSET;
71 } else {
72 R600_ERR("Invalid register offset %08x!\n", reg);
73 return;
74 }
75
76 reg >>= 2;
77
78 if (opcode != state->last_opcode || reg != (state->last_reg + 1)) {
79 si_pm4_cmd_begin(state, opcode);
80 si_pm4_cmd_add(state, reg);
81 }
82
83 state->last_reg = reg;
84 si_pm4_cmd_add(state, val);
85 si_pm4_cmd_end(state, false);
86 }
87
88 void si_pm4_add_bo(struct si_pm4_state *state,
89 struct si_resource *bo,
90 enum radeon_bo_usage usage)
91 {
92 unsigned idx = state->nbo++;
93 assert(idx < SI_PM4_MAX_BO);
94
95 si_resource_reference(&state->bo[idx], bo);
96 state->bo_usage[idx] = usage;
97 }
98
99 void si_pm4_inval_shader_cache(struct si_pm4_state *state)
100 {
101 state->cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
102 state->cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
103 }
104
105 void si_pm4_inval_texture_cache(struct si_pm4_state *state)
106 {
107 state->cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
108 }
109
110 void si_pm4_inval_vertex_cache(struct si_pm4_state *state)
111 {
112 /* Some GPUs don't have the vertex cache and must use the texture cache instead. */
113 state->cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
114 }
115
116 void si_pm4_inval_fb_cache(struct si_pm4_state *state, unsigned nr_cbufs)
117 {
118 state->cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1);
119 state->cp_coher_cntl |= ((1 << nr_cbufs) - 1) << S_0085F0_CB0_DEST_BASE_ENA_SHIFT;
120 }
121
122 void si_pm4_inval_zsbuf_cache(struct si_pm4_state *state)
123 {
124 state->cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1);
125 }
126
127 void si_pm4_free_state(struct r600_context *rctx,
128 struct si_pm4_state *state,
129 unsigned idx)
130 {
131 if (state == NULL)
132 return;
133
134 if (idx != ~0 && rctx->emitted.array[idx] == state) {
135 rctx->emitted.array[idx] = NULL;
136 }
137
138 for (int i = 0; i < state->nbo; ++i) {
139 si_resource_reference(&state->bo[i], NULL);
140 }
141 FREE(state);
142 }
143
144 uint32_t si_pm4_sync_flags(struct r600_context *rctx)
145 {
146 uint32_t cp_coher_cntl = 0;
147
148 for (int i = 0; i < NUMBER_OF_STATES; ++i) {
149 struct si_pm4_state *state = rctx->queued.array[i];
150
151 if (!state || rctx->emitted.array[i] == state)
152 continue;
153
154 cp_coher_cntl |= state->cp_coher_cntl;
155 }
156 return cp_coher_cntl;
157 }
158
159 unsigned si_pm4_dirty_dw(struct r600_context *rctx)
160 {
161 unsigned count = 0;
162
163 for (int i = 0; i < NUMBER_OF_STATES; ++i) {
164 struct si_pm4_state *state = rctx->queued.array[i];
165
166 if (!state || rctx->emitted.array[i] == state)
167 continue;
168
169 count += state->ndw;
170 }
171
172 return count;
173 }
174
175 void si_pm4_emit(struct r600_context *rctx, struct si_pm4_state *state)
176 {
177 struct radeon_winsys_cs *cs = rctx->cs;
178 for (int i = 0; i < state->nbo; ++i) {
179 r600_context_bo_reloc(rctx, state->bo[i],
180 state->bo_usage[i]);
181 }
182
183 memcpy(&cs->buf[cs->cdw], state->pm4, state->ndw * 4);
184 cs->cdw += state->ndw;
185 }
186
187 void si_pm4_emit_dirty(struct r600_context *rctx)
188 {
189 for (int i = 0; i < NUMBER_OF_STATES; ++i) {
190 struct si_pm4_state *state = rctx->queued.array[i];
191
192 if (!state || rctx->emitted.array[i] == state)
193 continue;
194
195 si_pm4_emit(rctx, state);
196 rctx->emitted.array[i] = state;
197 }
198 }
199
200 void si_pm4_reset_emitted(struct r600_context *rctx)
201 {
202 memset(&rctx->emitted, 0, sizeof(rctx->emitted));
203 }