Merge remote-tracking branch 'mareko/r300g-draw-instanced' into pipe-video
[mesa.git] / src / gallium / winsys / r600 / drm / r600_hw_context.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 */
26 #include <errno.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <stdlib.h>
30 #include <assert.h>
31 #include <pipe/p_compiler.h>
32 #include <util/u_inlines.h>
33 #include <util/u_memory.h>
34 #include <pipebuffer/pb_bufmgr.h>
35 #include "xf86drm.h"
36 #include "radeon_drm.h"
37 #include "r600_priv.h"
38 #include "bof.h"
39 #include "r600d.h"
40
41 #define GROUP_FORCE_NEW_BLOCK 0
42
43 static void INLINE r600_context_update_fenced_list(struct r600_context *ctx)
44 {
45 for (int i = 0; i < ctx->creloc; i++) {
46 if (!LIST_IS_EMPTY(&ctx->bo[i]->fencedlist))
47 LIST_DELINIT(&ctx->bo[i]->fencedlist);
48 LIST_ADDTAIL(&ctx->bo[i]->fencedlist, &ctx->fenced_bo);
49 ctx->bo[i]->fence = ctx->radeon->fence;
50 ctx->bo[i]->ctx = ctx;
51 }
52 }
53
54 static void INLINE r600_context_fence_wraparound(struct r600_context *ctx, unsigned fence)
55 {
56 struct radeon_bo *bo = NULL;
57 struct radeon_bo *tmp;
58
59 LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &ctx->fenced_bo, fencedlist) {
60 if (bo->fence <= *ctx->radeon->cfence) {
61 LIST_DELINIT(&bo->fencedlist);
62 bo->fence = 0;
63 } else {
64 bo->fence = fence;
65 }
66 }
67 }
68
69 int r600_context_add_block(struct r600_context *ctx, const struct r600_reg *reg, unsigned nreg,
70 unsigned opcode, unsigned offset_base)
71 {
72 struct r600_block *block;
73 struct r600_range *range;
74 int offset;
75
76 for (unsigned i = 0, n = 0; i < nreg; i += n) {
77 u32 j;
78
79 /* ignore new block balise */
80 if (reg[i].offset == GROUP_FORCE_NEW_BLOCK) {
81 n = 1;
82 continue;
83 }
84
85 /* register that need relocation are in their own group */
86 /* find number of consecutive registers */
87 n = 0;
88 offset = reg[i].offset;
89 while (reg[i + n].offset == offset) {
90 n++;
91 offset += 4;
92 if ((n + i) >= nreg)
93 break;
94 if (n >= (R600_BLOCK_MAX_REG - 2))
95 break;
96 }
97
98 /* allocate new block */
99 block = calloc(1, sizeof(struct r600_block));
100 if (block == NULL) {
101 return -ENOMEM;
102 }
103 ctx->nblocks++;
104 for (int j = 0; j < n; j++) {
105 range = &ctx->range[CTX_RANGE_ID(ctx, reg[i + j].offset)];
106 /* create block table if it doesn't exist */
107 if (!range->blocks)
108 range->blocks = calloc(1 << HASH_SHIFT, sizeof(void *));
109 if (!range->blocks)
110 return -1;
111
112 range->blocks[CTX_BLOCK_ID(ctx, reg[i + j].offset)] = block;
113 }
114
115 /* initialize block */
116 block->status |= R600_BLOCK_STATUS_DIRTY; /* dirty all blocks at start */
117 block->start_offset = reg[i].offset;
118 block->pm4[block->pm4_ndwords++] = PKT3(opcode, n, 0);
119 block->pm4[block->pm4_ndwords++] = (block->start_offset - offset_base) >> 2;
120 block->reg = &block->pm4[block->pm4_ndwords];
121 block->pm4_ndwords += n;
122 block->nreg = n;
123 block->nreg_dirty = n;
124 block->flags = 0;
125 LIST_INITHEAD(&block->list);
126
127 for (j = 0; j < n; j++) {
128 if (reg[i+j].flags & REG_FLAG_DIRTY_ALWAYS) {
129 block->flags |= REG_FLAG_DIRTY_ALWAYS;
130 }
131 if (reg[i+j].flags & REG_FLAG_NEED_BO) {
132 block->nbo++;
133 assert(block->nbo < R600_BLOCK_MAX_BO);
134 block->pm4_bo_index[j] = block->nbo;
135 block->pm4[block->pm4_ndwords++] = PKT3(PKT3_NOP, 0, 0);
136 block->pm4[block->pm4_ndwords++] = 0x00000000;
137 if (reg[i+j].flags & REG_FLAG_RV6XX_SBU) {
138 block->reloc[block->nbo].flush_flags = 0;
139 block->reloc[block->nbo].flush_mask = 0;
140 } else {
141 block->reloc[block->nbo].flush_flags = reg[i+j].flush_flags;
142 block->reloc[block->nbo].flush_mask = reg[i+j].flush_mask;
143 }
144 block->reloc[block->nbo].bo_pm4_index = block->pm4_ndwords - 1;
145 }
146 if ((ctx->radeon->family > CHIP_R600) &&
147 (ctx->radeon->family < CHIP_RV770) && reg[i+j].flags & REG_FLAG_RV6XX_SBU) {
148 block->pm4[block->pm4_ndwords++] = PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0);
149 block->pm4[block->pm4_ndwords++] = reg[i+j].flush_flags;
150 }
151 }
152 for (j = 0; j < n; j++) {
153 if (reg[i+j].flush_flags) {
154 block->pm4_flush_ndwords += 7;
155 }
156 }
157 /* check that we stay in limit */
158 assert(block->pm4_ndwords < R600_BLOCK_MAX_REG);
159 }
160 return 0;
161 }
162
163 /* R600/R700 configuration */
164 static const struct r600_reg r600_config_reg_list[] = {
165 {R_008958_VGT_PRIMITIVE_TYPE, 0, 0, 0},
166 {R_008C00_SQ_CONFIG, 0, 0, 0},
167 {R_008C04_SQ_GPR_RESOURCE_MGMT_1, 0, 0, 0},
168 {R_008C08_SQ_GPR_RESOURCE_MGMT_2, 0, 0, 0},
169 {R_008C0C_SQ_THREAD_RESOURCE_MGMT, 0, 0, 0},
170 {R_008C10_SQ_STACK_RESOURCE_MGMT_1, 0, 0, 0},
171 {R_008C14_SQ_STACK_RESOURCE_MGMT_2, 0, 0, 0},
172 {R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0, 0, 0},
173 {R_009508_TA_CNTL_AUX, 0, 0, 0},
174 {R_009714_VC_ENHANCE, 0, 0, 0},
175 {R_009830_DB_DEBUG, 0, 0, 0},
176 {R_009838_DB_WATERMARKS, 0, 0, 0},
177 };
178
179 static const struct r600_reg r600_ctl_const_list[] = {
180 {R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0, 0},
181 {R_03CFF4_SQ_VTX_START_INST_LOC, 0, 0, 0},
182 };
183
184 static const struct r600_reg r600_context_reg_list[] = {
185 {R_028350_SX_MISC, 0, 0, 0},
186 {R_0286C8_SPI_THREAD_GROUPING, 0, 0, 0},
187 {R_0288A8_SQ_ESGS_RING_ITEMSIZE, 0, 0, 0},
188 {R_0288AC_SQ_GSVS_RING_ITEMSIZE, 0, 0, 0},
189 {R_0288B0_SQ_ESTMP_RING_ITEMSIZE, 0, 0, 0},
190 {R_0288B4_SQ_GSTMP_RING_ITEMSIZE, 0, 0, 0},
191 {R_0288B8_SQ_VSTMP_RING_ITEMSIZE, 0, 0, 0},
192 {R_0288BC_SQ_PSTMP_RING_ITEMSIZE, 0, 0, 0},
193 {R_0288C0_SQ_FBUF_RING_ITEMSIZE, 0, 0, 0},
194 {R_0288C4_SQ_REDUC_RING_ITEMSIZE, 0, 0, 0},
195 {R_0288C8_SQ_GS_VERT_ITEMSIZE, 0, 0, 0},
196 {R_028A10_VGT_OUTPUT_PATH_CNTL, 0, 0, 0},
197 {R_028A14_VGT_HOS_CNTL, 0, 0, 0},
198 {R_028A18_VGT_HOS_MAX_TESS_LEVEL, 0, 0, 0},
199 {R_028A1C_VGT_HOS_MIN_TESS_LEVEL, 0, 0, 0},
200 {R_028A20_VGT_HOS_REUSE_DEPTH, 0, 0, 0},
201 {R_028A24_VGT_GROUP_PRIM_TYPE, 0, 0, 0},
202 {R_028A28_VGT_GROUP_FIRST_DECR, 0, 0, 0},
203 {R_028A2C_VGT_GROUP_DECR, 0, 0, 0},
204 {R_028A30_VGT_GROUP_VECT_0_CNTL, 0, 0, 0},
205 {R_028A34_VGT_GROUP_VECT_1_CNTL, 0, 0, 0},
206 {R_028A38_VGT_GROUP_VECT_0_FMT_CNTL, 0, 0, 0},
207 {R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL, 0, 0, 0},
208 {R_028A40_VGT_GS_MODE, 0, 0, 0},
209 {R_028A4C_PA_SC_MODE_CNTL, 0, 0, 0},
210 {R_028AB0_VGT_STRMOUT_EN, 0, 0, 0},
211 {R_028AB4_VGT_REUSE_OFF, 0, 0, 0},
212 {R_028AB8_VGT_VTX_CNT_EN, 0, 0, 0},
213 {R_028B20_VGT_STRMOUT_BUFFER_EN, 0, 0, 0},
214 {R_028028_DB_STENCIL_CLEAR, 0, 0, 0},
215 {R_02802C_DB_DEPTH_CLEAR, 0, 0, 0},
216 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
217 {R_028040_CB_COLOR0_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(0), 0},
218 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
219 {R_0280A0_CB_COLOR0_INFO, REG_FLAG_NEED_BO, 0, 0xFFFFFFFF},
220 {R_028060_CB_COLOR0_SIZE, 0, 0, 0},
221 {R_028080_CB_COLOR0_VIEW, 0, 0, 0},
222 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
223 {R_0280E0_CB_COLOR0_FRAG, REG_FLAG_NEED_BO, 0, 0},
224 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
225 {R_0280C0_CB_COLOR0_TILE, REG_FLAG_NEED_BO, 0, 0},
226 {R_028100_CB_COLOR0_MASK, 0, 0, 0},
227 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
228 {R_028044_CB_COLOR1_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(1), 0},
229 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
230 {R_0280A4_CB_COLOR1_INFO, REG_FLAG_NEED_BO, 0, 0xFFFFFFFF},
231 {R_028064_CB_COLOR1_SIZE, 0, 0, 0},
232 {R_028084_CB_COLOR1_VIEW, 0, 0, 0},
233 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
234 {R_0280E4_CB_COLOR1_FRAG, REG_FLAG_NEED_BO, 0, 0},
235 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
236 {R_0280C4_CB_COLOR1_TILE, REG_FLAG_NEED_BO, 0, 0},
237 {R_028104_CB_COLOR1_MASK, 0, 0, 0},
238 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
239 {R_028048_CB_COLOR2_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(2), 0},
240 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
241 {R_0280A8_CB_COLOR2_INFO, REG_FLAG_NEED_BO, 0, 0xFFFFFFFF},
242 {R_028068_CB_COLOR2_SIZE, 0, 0, 0},
243 {R_028088_CB_COLOR2_VIEW, 0, 0, 0},
244 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
245 {R_0280E8_CB_COLOR2_FRAG, REG_FLAG_NEED_BO, 0, 0},
246 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
247 {R_0280C8_CB_COLOR2_TILE, REG_FLAG_NEED_BO, 0, 0},
248 {R_028108_CB_COLOR2_MASK, 0, 0, 0},
249 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
250 {R_02804C_CB_COLOR3_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(3), 0},
251 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
252 {R_0280AC_CB_COLOR3_INFO, REG_FLAG_NEED_BO, 0, 0xFFFFFFFF},
253 {R_02806C_CB_COLOR3_SIZE, 0, 0, 0},
254 {R_02808C_CB_COLOR3_VIEW, 0, 0, 0},
255 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
256 {R_0280EC_CB_COLOR3_FRAG, REG_FLAG_NEED_BO, 0, 0},
257 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
258 {R_0280CC_CB_COLOR3_TILE, REG_FLAG_NEED_BO, 0, 0},
259 {R_02810C_CB_COLOR3_MASK, 0, 0, 0},
260 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
261 {R_028050_CB_COLOR4_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(4), 0},
262 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
263 {R_0280B0_CB_COLOR4_INFO, REG_FLAG_NEED_BO, 0, 0xFFFFFFFF},
264 {R_028070_CB_COLOR4_SIZE, 0, 0, 0},
265 {R_028090_CB_COLOR4_VIEW, 0, 0, 0},
266 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
267 {R_0280F0_CB_COLOR4_FRAG, REG_FLAG_NEED_BO, 0, 0},
268 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
269 {R_0280D0_CB_COLOR4_TILE, REG_FLAG_NEED_BO, 0, 0},
270 {R_028110_CB_COLOR4_MASK, 0, 0, 0},
271 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
272 {R_028054_CB_COLOR5_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(5), 0},
273 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
274 {R_0280B4_CB_COLOR5_INFO, REG_FLAG_NEED_BO, 0, 0xFFFFFFFF},
275 {R_028074_CB_COLOR5_SIZE, 0, 0, 0},
276 {R_028094_CB_COLOR5_VIEW, 0, 0, 0},
277 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
278 {R_0280F4_CB_COLOR5_FRAG, REG_FLAG_NEED_BO, 0, 0},
279 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
280 {R_0280D4_CB_COLOR5_TILE, REG_FLAG_NEED_BO, 0, 0},
281 {R_028114_CB_COLOR5_MASK, 0, 0, 0},
282 {R_028058_CB_COLOR6_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(6), 0},
283 {R_0280B8_CB_COLOR6_INFO, REG_FLAG_NEED_BO, 0, 0xFFFFFFFF},
284 {R_028078_CB_COLOR6_SIZE, 0, 0, 0},
285 {R_028098_CB_COLOR6_VIEW, 0, 0, 0},
286 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
287 {R_0280F8_CB_COLOR6_FRAG, REG_FLAG_NEED_BO, 0, 0},
288 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
289 {R_0280D8_CB_COLOR6_TILE, REG_FLAG_NEED_BO, 0, 0},
290 {R_028118_CB_COLOR6_MASK, 0, 0, 0},
291 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
292 {R_02805C_CB_COLOR7_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(7), 0},
293 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
294 {R_0280BC_CB_COLOR7_INFO, REG_FLAG_NEED_BO, 0, 0xFFFFFFFF},
295 {R_02807C_CB_COLOR7_SIZE, 0, 0, 0},
296 {R_02809C_CB_COLOR7_VIEW, 0, 0, 0},
297 {R_0280FC_CB_COLOR7_FRAG, REG_FLAG_NEED_BO, 0, 0},
298 {R_0280DC_CB_COLOR7_TILE, REG_FLAG_NEED_BO, 0, 0},
299 {R_02811C_CB_COLOR7_MASK, 0, 0, 0},
300 {R_028120_CB_CLEAR_RED, 0, 0, 0},
301 {R_028124_CB_CLEAR_GREEN, 0, 0, 0},
302 {R_028128_CB_CLEAR_BLUE, 0, 0, 0},
303 {R_02812C_CB_CLEAR_ALPHA, 0, 0, 0},
304 {R_028140_ALU_CONST_BUFFER_SIZE_PS_0, REG_FLAG_DIRTY_ALWAYS, 0, 0},
305 {R_028180_ALU_CONST_BUFFER_SIZE_VS_0, REG_FLAG_DIRTY_ALWAYS, 0, 0},
306 {R_028940_ALU_CONST_CACHE_PS_0, REG_FLAG_NEED_BO, S_0085F0_SH_ACTION_ENA(1), 0xFFFFFFFF},
307 {R_028980_ALU_CONST_CACHE_VS_0, REG_FLAG_NEED_BO, S_0085F0_SH_ACTION_ENA(1), 0xFFFFFFFF},
308 {R_02823C_CB_SHADER_MASK, 0, 0, 0},
309 {R_028238_CB_TARGET_MASK, 0, 0, 0},
310 {R_028410_SX_ALPHA_TEST_CONTROL, 0, 0, 0},
311 {R_028414_CB_BLEND_RED, 0, 0, 0},
312 {R_028418_CB_BLEND_GREEN, 0, 0, 0},
313 {R_02841C_CB_BLEND_BLUE, 0, 0, 0},
314 {R_028420_CB_BLEND_ALPHA, 0, 0, 0},
315 {R_028424_CB_FOG_RED, 0, 0, 0},
316 {R_028428_CB_FOG_GREEN, 0, 0, 0},
317 {R_02842C_CB_FOG_BLUE, 0, 0, 0},
318 {R_028430_DB_STENCILREFMASK, 0, 0, 0},
319 {R_028434_DB_STENCILREFMASK_BF, 0, 0, 0},
320 {R_028438_SX_ALPHA_REF, 0, 0, 0},
321 {R_0286DC_SPI_FOG_CNTL, 0, 0, 0},
322 {R_0286E0_SPI_FOG_FUNC_SCALE, 0, 0, 0},
323 {R_0286E4_SPI_FOG_FUNC_BIAS, 0, 0, 0},
324 {R_028780_CB_BLEND0_CONTROL, 0, 0, 0},
325 {R_028784_CB_BLEND1_CONTROL, 0, 0, 0},
326 {R_028788_CB_BLEND2_CONTROL, 0, 0, 0},
327 {R_02878C_CB_BLEND3_CONTROL, 0, 0, 0},
328 {R_028790_CB_BLEND4_CONTROL, 0, 0, 0},
329 {R_028794_CB_BLEND5_CONTROL, 0, 0, 0},
330 {R_028798_CB_BLEND6_CONTROL, 0, 0, 0},
331 {R_02879C_CB_BLEND7_CONTROL, 0, 0, 0},
332 {R_0287A0_CB_SHADER_CONTROL, 0, 0, 0},
333 {R_028800_DB_DEPTH_CONTROL, 0, 0, 0},
334 {R_028804_CB_BLEND_CONTROL, 0, 0, 0},
335 {R_028808_CB_COLOR_CONTROL, 0, 0, 0},
336 {R_02880C_DB_SHADER_CONTROL, 0, 0, 0},
337 {R_028C04_PA_SC_AA_CONFIG, 0, 0, 0},
338 {R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 0, 0, 0},
339 {R_028C20_PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, 0, 0, 0},
340 {R_028C30_CB_CLRCMP_CONTROL, 0, 0, 0},
341 {R_028C34_CB_CLRCMP_SRC, 0, 0, 0},
342 {R_028C38_CB_CLRCMP_DST, 0, 0, 0},
343 {R_028C3C_CB_CLRCMP_MSK, 0, 0, 0},
344 {R_028C48_PA_SC_AA_MASK, 0, 0, 0},
345 {R_028D2C_DB_SRESULTS_COMPARE_STATE1, 0, 0, 0},
346 {R_028D44_DB_ALPHA_TO_MASK, 0, 0, 0},
347 {R_02800C_DB_DEPTH_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_DEPTH, 0},
348 {R_028000_DB_DEPTH_SIZE, 0, 0, 0},
349 {R_028004_DB_DEPTH_VIEW, 0, 0, 0},
350 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
351 {R_028010_DB_DEPTH_INFO, REG_FLAG_NEED_BO, 0, 0},
352 {R_028D0C_DB_RENDER_CONTROL, 0, 0, 0},
353 {R_028D10_DB_RENDER_OVERRIDE, 0, 0, 0},
354 {R_028D24_DB_HTILE_SURFACE, 0, 0, 0},
355 {R_028D30_DB_PRELOAD_CONTROL, 0, 0, 0},
356 {R_028D34_DB_PREFETCH_LIMIT, 0, 0, 0},
357 {R_028030_PA_SC_SCREEN_SCISSOR_TL, 0, 0, 0},
358 {R_028034_PA_SC_SCREEN_SCISSOR_BR, 0, 0, 0},
359 {R_028200_PA_SC_WINDOW_OFFSET, 0, 0, 0},
360 {R_028204_PA_SC_WINDOW_SCISSOR_TL, 0, 0, 0},
361 {R_028208_PA_SC_WINDOW_SCISSOR_BR, 0, 0, 0},
362 {R_02820C_PA_SC_CLIPRECT_RULE, 0, 0, 0},
363 {R_028210_PA_SC_CLIPRECT_0_TL, 0, 0, 0},
364 {R_028214_PA_SC_CLIPRECT_0_BR, 0, 0, 0},
365 {R_028218_PA_SC_CLIPRECT_1_TL, 0, 0, 0},
366 {R_02821C_PA_SC_CLIPRECT_1_BR, 0, 0, 0},
367 {R_028220_PA_SC_CLIPRECT_2_TL, 0, 0, 0},
368 {R_028224_PA_SC_CLIPRECT_2_BR, 0, 0, 0},
369 {R_028228_PA_SC_CLIPRECT_3_TL, 0, 0, 0},
370 {R_02822C_PA_SC_CLIPRECT_3_BR, 0, 0, 0},
371 {R_028230_PA_SC_EDGERULE, 0, 0, 0},
372 {R_028240_PA_SC_GENERIC_SCISSOR_TL, 0, 0, 0},
373 {R_028244_PA_SC_GENERIC_SCISSOR_BR, 0, 0, 0},
374 {R_028250_PA_SC_VPORT_SCISSOR_0_TL, 0, 0, 0},
375 {R_028254_PA_SC_VPORT_SCISSOR_0_BR, 0, 0, 0},
376 {R_0282D0_PA_SC_VPORT_ZMIN_0, 0, 0, 0},
377 {R_0282D4_PA_SC_VPORT_ZMAX_0, 0, 0, 0},
378 {R_02843C_PA_CL_VPORT_XSCALE_0, 0, 0, 0},
379 {R_028440_PA_CL_VPORT_XOFFSET_0, 0, 0, 0},
380 {R_028444_PA_CL_VPORT_YSCALE_0, 0, 0, 0},
381 {R_028448_PA_CL_VPORT_YOFFSET_0, 0, 0, 0},
382 {R_02844C_PA_CL_VPORT_ZSCALE_0, 0, 0, 0},
383 {R_028450_PA_CL_VPORT_ZOFFSET_0, 0, 0, 0},
384 {R_0286D4_SPI_INTERP_CONTROL_0, 0, 0, 0},
385 {R_028810_PA_CL_CLIP_CNTL, 0, 0, 0},
386 {R_028814_PA_SU_SC_MODE_CNTL, 0, 0, 0},
387 {R_028818_PA_CL_VTE_CNTL, 0, 0, 0},
388 {R_02881C_PA_CL_VS_OUT_CNTL, 0, 0, 0},
389 {R_028820_PA_CL_NANINF_CNTL, 0, 0, 0},
390 {R_028A00_PA_SU_POINT_SIZE, 0, 0, 0},
391 {R_028A04_PA_SU_POINT_MINMAX, 0, 0, 0},
392 {R_028A08_PA_SU_LINE_CNTL, 0, 0, 0},
393 {R_028A0C_PA_SC_LINE_STIPPLE, 0, 0, 0},
394 {R_028A48_PA_SC_MPASS_PS_CNTL, 0, 0, 0},
395 {R_028C00_PA_SC_LINE_CNTL, 0, 0, 0},
396 {R_028C08_PA_SU_VTX_CNTL, 0, 0, 0},
397 {R_028C0C_PA_CL_GB_VERT_CLIP_ADJ, 0, 0, 0},
398 {R_028C10_PA_CL_GB_VERT_DISC_ADJ, 0, 0, 0},
399 {R_028C14_PA_CL_GB_HORZ_CLIP_ADJ, 0, 0, 0},
400 {R_028C18_PA_CL_GB_HORZ_DISC_ADJ, 0, 0, 0},
401 {R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL, 0, 0, 0},
402 {R_028DFC_PA_SU_POLY_OFFSET_CLAMP, 0, 0, 0},
403 {R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 0, 0, 0},
404 {R_028E04_PA_SU_POLY_OFFSET_FRONT_OFFSET, 0, 0, 0},
405 {R_028E08_PA_SU_POLY_OFFSET_BACK_SCALE, 0, 0, 0},
406 {R_028E0C_PA_SU_POLY_OFFSET_BACK_OFFSET, 0, 0, 0},
407 {R_028E20_PA_CL_UCP0_X, 0, 0, 0},
408 {R_028E24_PA_CL_UCP0_Y, 0, 0, 0},
409 {R_028E28_PA_CL_UCP0_Z, 0, 0, 0},
410 {R_028E2C_PA_CL_UCP0_W, 0, 0, 0},
411 {R_028E30_PA_CL_UCP1_X, 0, 0, 0},
412 {R_028E34_PA_CL_UCP1_Y, 0, 0, 0},
413 {R_028E38_PA_CL_UCP1_Z, 0, 0, 0},
414 {R_028E3C_PA_CL_UCP1_W, 0, 0, 0},
415 {R_028E40_PA_CL_UCP2_X, 0, 0, 0},
416 {R_028E44_PA_CL_UCP2_Y, 0, 0, 0},
417 {R_028E48_PA_CL_UCP2_Z, 0, 0, 0},
418 {R_028E4C_PA_CL_UCP2_W, 0, 0, 0},
419 {R_028E50_PA_CL_UCP3_X, 0, 0, 0},
420 {R_028E54_PA_CL_UCP3_Y, 0, 0, 0},
421 {R_028E58_PA_CL_UCP3_Z, 0, 0, 0},
422 {R_028E5C_PA_CL_UCP3_W, 0, 0, 0},
423 {R_028E60_PA_CL_UCP4_X, 0, 0, 0},
424 {R_028E64_PA_CL_UCP4_Y, 0, 0, 0},
425 {R_028E68_PA_CL_UCP4_Z, 0, 0, 0},
426 {R_028E6C_PA_CL_UCP4_W, 0, 0, 0},
427 {R_028E70_PA_CL_UCP5_X, 0, 0, 0},
428 {R_028E74_PA_CL_UCP5_Y, 0, 0, 0},
429 {R_028E78_PA_CL_UCP5_Z, 0, 0, 0},
430 {R_028E7C_PA_CL_UCP5_W, 0, 0, 0},
431 {R_028380_SQ_VTX_SEMANTIC_0, 0, 0, 0},
432 {R_028384_SQ_VTX_SEMANTIC_1, 0, 0, 0},
433 {R_028388_SQ_VTX_SEMANTIC_2, 0, 0, 0},
434 {R_02838C_SQ_VTX_SEMANTIC_3, 0, 0, 0},
435 {R_028390_SQ_VTX_SEMANTIC_4, 0, 0, 0},
436 {R_028394_SQ_VTX_SEMANTIC_5, 0, 0, 0},
437 {R_028398_SQ_VTX_SEMANTIC_6, 0, 0, 0},
438 {R_02839C_SQ_VTX_SEMANTIC_7, 0, 0, 0},
439 {R_0283A0_SQ_VTX_SEMANTIC_8, 0, 0, 0},
440 {R_0283A4_SQ_VTX_SEMANTIC_9, 0, 0, 0},
441 {R_0283A8_SQ_VTX_SEMANTIC_10, 0, 0, 0},
442 {R_0283AC_SQ_VTX_SEMANTIC_11, 0, 0, 0},
443 {R_0283B0_SQ_VTX_SEMANTIC_12, 0, 0, 0},
444 {R_0283B4_SQ_VTX_SEMANTIC_13, 0, 0, 0},
445 {R_0283B8_SQ_VTX_SEMANTIC_14, 0, 0, 0},
446 {R_0283BC_SQ_VTX_SEMANTIC_15, 0, 0, 0},
447 {R_0283C0_SQ_VTX_SEMANTIC_16, 0, 0, 0},
448 {R_0283C4_SQ_VTX_SEMANTIC_17, 0, 0, 0},
449 {R_0283C8_SQ_VTX_SEMANTIC_18, 0, 0, 0},
450 {R_0283CC_SQ_VTX_SEMANTIC_19, 0, 0, 0},
451 {R_0283D0_SQ_VTX_SEMANTIC_20, 0, 0, 0},
452 {R_0283D4_SQ_VTX_SEMANTIC_21, 0, 0, 0},
453 {R_0283D8_SQ_VTX_SEMANTIC_22, 0, 0, 0},
454 {R_0283DC_SQ_VTX_SEMANTIC_23, 0, 0, 0},
455 {R_0283E0_SQ_VTX_SEMANTIC_24, 0, 0, 0},
456 {R_0283E4_SQ_VTX_SEMANTIC_25, 0, 0, 0},
457 {R_0283E8_SQ_VTX_SEMANTIC_26, 0, 0, 0},
458 {R_0283EC_SQ_VTX_SEMANTIC_27, 0, 0, 0},
459 {R_0283F0_SQ_VTX_SEMANTIC_28, 0, 0, 0},
460 {R_0283F4_SQ_VTX_SEMANTIC_29, 0, 0, 0},
461 {R_0283F8_SQ_VTX_SEMANTIC_30, 0, 0, 0},
462 {R_0283FC_SQ_VTX_SEMANTIC_31, 0, 0, 0},
463 {R_028614_SPI_VS_OUT_ID_0, 0, 0, 0},
464 {R_028618_SPI_VS_OUT_ID_1, 0, 0, 0},
465 {R_02861C_SPI_VS_OUT_ID_2, 0, 0, 0},
466 {R_028620_SPI_VS_OUT_ID_3, 0, 0, 0},
467 {R_028624_SPI_VS_OUT_ID_4, 0, 0, 0},
468 {R_028628_SPI_VS_OUT_ID_5, 0, 0, 0},
469 {R_02862C_SPI_VS_OUT_ID_6, 0, 0, 0},
470 {R_028630_SPI_VS_OUT_ID_7, 0, 0, 0},
471 {R_028634_SPI_VS_OUT_ID_8, 0, 0, 0},
472 {R_028638_SPI_VS_OUT_ID_9, 0, 0, 0},
473 {R_0286C4_SPI_VS_OUT_CONFIG, 0, 0, 0},
474 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
475 {R_028858_SQ_PGM_START_VS, REG_FLAG_NEED_BO, S_0085F0_SH_ACTION_ENA(1), 0xFFFFFFFF},
476 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
477 {R_028868_SQ_PGM_RESOURCES_VS, 0, 0, 0},
478 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
479 {R_028894_SQ_PGM_START_FS, REG_FLAG_NEED_BO, S_0085F0_SH_ACTION_ENA(1), 0xFFFFFFFF},
480 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
481 {R_0288A4_SQ_PGM_RESOURCES_FS, 0, 0, 0},
482 {R_0288D0_SQ_PGM_CF_OFFSET_VS, 0, 0, 0},
483 {R_0288DC_SQ_PGM_CF_OFFSET_FS, 0, 0, 0},
484 {R_028644_SPI_PS_INPUT_CNTL_0, 0, 0, 0},
485 {R_028648_SPI_PS_INPUT_CNTL_1, 0, 0, 0},
486 {R_02864C_SPI_PS_INPUT_CNTL_2, 0, 0, 0},
487 {R_028650_SPI_PS_INPUT_CNTL_3, 0, 0, 0},
488 {R_028654_SPI_PS_INPUT_CNTL_4, 0, 0, 0},
489 {R_028658_SPI_PS_INPUT_CNTL_5, 0, 0, 0},
490 {R_02865C_SPI_PS_INPUT_CNTL_6, 0, 0, 0},
491 {R_028660_SPI_PS_INPUT_CNTL_7, 0, 0, 0},
492 {R_028664_SPI_PS_INPUT_CNTL_8, 0, 0, 0},
493 {R_028668_SPI_PS_INPUT_CNTL_9, 0, 0, 0},
494 {R_02866C_SPI_PS_INPUT_CNTL_10, 0, 0, 0},
495 {R_028670_SPI_PS_INPUT_CNTL_11, 0, 0, 0},
496 {R_028674_SPI_PS_INPUT_CNTL_12, 0, 0, 0},
497 {R_028678_SPI_PS_INPUT_CNTL_13, 0, 0, 0},
498 {R_02867C_SPI_PS_INPUT_CNTL_14, 0, 0, 0},
499 {R_028680_SPI_PS_INPUT_CNTL_15, 0, 0, 0},
500 {R_028684_SPI_PS_INPUT_CNTL_16, 0, 0, 0},
501 {R_028688_SPI_PS_INPUT_CNTL_17, 0, 0, 0},
502 {R_02868C_SPI_PS_INPUT_CNTL_18, 0, 0, 0},
503 {R_028690_SPI_PS_INPUT_CNTL_19, 0, 0, 0},
504 {R_028694_SPI_PS_INPUT_CNTL_20, 0, 0, 0},
505 {R_028698_SPI_PS_INPUT_CNTL_21, 0, 0, 0},
506 {R_02869C_SPI_PS_INPUT_CNTL_22, 0, 0, 0},
507 {R_0286A0_SPI_PS_INPUT_CNTL_23, 0, 0, 0},
508 {R_0286A4_SPI_PS_INPUT_CNTL_24, 0, 0, 0},
509 {R_0286A8_SPI_PS_INPUT_CNTL_25, 0, 0, 0},
510 {R_0286AC_SPI_PS_INPUT_CNTL_26, 0, 0, 0},
511 {R_0286B0_SPI_PS_INPUT_CNTL_27, 0, 0, 0},
512 {R_0286B4_SPI_PS_INPUT_CNTL_28, 0, 0, 0},
513 {R_0286B8_SPI_PS_INPUT_CNTL_29, 0, 0, 0},
514 {R_0286BC_SPI_PS_INPUT_CNTL_30, 0, 0, 0},
515 {R_0286C0_SPI_PS_INPUT_CNTL_31, 0, 0, 0},
516 {R_0286CC_SPI_PS_IN_CONTROL_0, 0, 0, 0},
517 {R_0286D0_SPI_PS_IN_CONTROL_1, 0, 0, 0},
518 {R_0286D8_SPI_INPUT_Z, 0, 0, 0},
519 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
520 {R_028840_SQ_PGM_START_PS, REG_FLAG_NEED_BO, S_0085F0_SH_ACTION_ENA(1), 0xFFFFFFFF},
521 {GROUP_FORCE_NEW_BLOCK, 0, 0, 0},
522 {R_028850_SQ_PGM_RESOURCES_PS, 0, 0, 0},
523 {R_028854_SQ_PGM_EXPORTS_PS, 0, 0, 0},
524 {R_0288CC_SQ_PGM_CF_OFFSET_PS, 0, 0, 0},
525 {R_028400_VGT_MAX_VTX_INDX, 0, 0, 0},
526 {R_028404_VGT_MIN_VTX_INDX, 0, 0, 0},
527 {R_028408_VGT_INDX_OFFSET, 0, 0, 0},
528 {R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, 0, 0, 0},
529 {R_028A84_VGT_PRIMITIVEID_EN, 0, 0, 0},
530 {R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, 0, 0, 0},
531 {R_028AA0_VGT_INSTANCE_STEP_RATE_0, 0, 0, 0},
532 {R_028AA4_VGT_INSTANCE_STEP_RATE_1, 0, 0, 0},
533 };
534
535 /* SHADER RESOURCE R600/R700 */
536 static int r600_state_resource_init(struct r600_context *ctx, u32 offset)
537 {
538 struct r600_reg r600_shader_resource[] = {
539 {R_038000_RESOURCE0_WORD0, 0, 0, 0},
540 {R_038004_RESOURCE0_WORD1, 0, 0, 0},
541 {R_038008_RESOURCE0_WORD2, REG_FLAG_NEED_BO, S_0085F0_TC_ACTION_ENA(1) | S_0085F0_VC_ACTION_ENA(1), 0xFFFFFFFF},
542 {R_03800C_RESOURCE0_WORD3, REG_FLAG_NEED_BO, S_0085F0_TC_ACTION_ENA(1) | S_0085F0_VC_ACTION_ENA(1), 0xFFFFFFFF},
543 {R_038010_RESOURCE0_WORD4, 0, 0, 0},
544 {R_038014_RESOURCE0_WORD5, 0, 0, 0},
545 {R_038018_RESOURCE0_WORD6, 0, 0, 0},
546 };
547 unsigned nreg = Elements(r600_shader_resource);
548
549 for (int i = 0; i < nreg; i++) {
550 r600_shader_resource[i].offset += offset;
551 }
552 return r600_context_add_block(ctx, r600_shader_resource, nreg, PKT3_SET_RESOURCE, R600_RESOURCE_OFFSET);
553 }
554
555 /* SHADER SAMPLER R600/R700 */
556 static int r600_state_sampler_init(struct r600_context *ctx, u32 offset)
557 {
558 struct r600_reg r600_shader_sampler[] = {
559 {R_03C000_SQ_TEX_SAMPLER_WORD0_0, 0, 0, 0},
560 {R_03C004_SQ_TEX_SAMPLER_WORD1_0, 0, 0, 0},
561 {R_03C008_SQ_TEX_SAMPLER_WORD2_0, 0, 0, 0},
562 };
563 unsigned nreg = Elements(r600_shader_sampler);
564
565 for (int i = 0; i < nreg; i++) {
566 r600_shader_sampler[i].offset += offset;
567 }
568 return r600_context_add_block(ctx, r600_shader_sampler, nreg, PKT3_SET_SAMPLER, R600_SAMPLER_OFFSET);
569 }
570
571 /* SHADER SAMPLER BORDER R600/R700 */
572 static int r600_state_sampler_border_init(struct r600_context *ctx, u32 offset)
573 {
574 struct r600_reg r600_shader_sampler_border[] = {
575 {R_00A400_TD_PS_SAMPLER0_BORDER_RED, 0, 0, 0},
576 {R_00A404_TD_PS_SAMPLER0_BORDER_GREEN, 0, 0, 0},
577 {R_00A408_TD_PS_SAMPLER0_BORDER_BLUE, 0, 0, 0},
578 {R_00A40C_TD_PS_SAMPLER0_BORDER_ALPHA, 0, 0, 0},
579 };
580 unsigned nreg = Elements(r600_shader_sampler_border);
581
582 for (int i = 0; i < nreg; i++) {
583 r600_shader_sampler_border[i].offset += offset;
584 }
585 return r600_context_add_block(ctx, r600_shader_sampler_border, nreg, PKT3_SET_CONFIG_REG, R600_CONFIG_REG_OFFSET);
586 }
587
588 static int r600_loop_const_init(struct r600_context *ctx, u32 offset)
589 {
590 unsigned nreg = 32;
591 struct r600_reg r600_loop_consts[32];
592 int i;
593
594 for (i = 0; i < nreg; i++) {
595 r600_loop_consts[i].offset = R600_LOOP_CONST_OFFSET + ((offset + i) * 4);
596 r600_loop_consts[i].flags = REG_FLAG_DIRTY_ALWAYS;
597 r600_loop_consts[i].flush_flags = 0;
598 r600_loop_consts[i].flush_mask = 0;
599 }
600 return r600_context_add_block(ctx, r600_loop_consts, nreg, PKT3_SET_LOOP_CONST, R600_LOOP_CONST_OFFSET);
601 }
602
603 static void r600_context_clear_fenced_bo(struct r600_context *ctx)
604 {
605 struct radeon_bo *bo, *tmp;
606
607 LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &ctx->fenced_bo, fencedlist) {
608 LIST_DELINIT(&bo->fencedlist);
609 bo->fence = 0;
610 bo->ctx = NULL;
611 }
612 }
613
614 /* initialize */
615 void r600_context_fini(struct r600_context *ctx)
616 {
617 struct r600_block *block;
618 struct r600_range *range;
619
620 for (int i = 0; i < NUM_RANGES; i++) {
621 if (!ctx->range[i].blocks)
622 continue;
623 for (int j = 0; j < (1 << HASH_SHIFT); j++) {
624 block = ctx->range[i].blocks[j];
625 if (block) {
626 for (int k = 0, offset = block->start_offset; k < block->nreg; k++, offset += 4) {
627 range = &ctx->range[CTX_RANGE_ID(ctx, offset)];
628 range->blocks[CTX_BLOCK_ID(ctx, offset)] = NULL;
629 }
630 for (int k = 1; k <= block->nbo; k++) {
631 r600_bo_reference(ctx->radeon, &block->reloc[k].bo, NULL);
632 }
633 free(block);
634 }
635 }
636 free(ctx->range[i].blocks);
637 }
638 free(ctx->range);
639 free(ctx->blocks);
640 free(ctx->reloc);
641 free(ctx->bo);
642 free(ctx->pm4);
643
644 r600_context_clear_fenced_bo(ctx);
645 memset(ctx, 0, sizeof(struct r600_context));
646 }
647
648 int r600_setup_block_table(struct r600_context *ctx)
649 {
650 /* setup block table */
651 ctx->blocks = calloc(ctx->nblocks, sizeof(void*));
652 if (!ctx->blocks)
653 return -ENOMEM;
654 for (int i = 0, c = 0; i < NUM_RANGES; i++) {
655 if (!ctx->range[i].blocks)
656 continue;
657 for (int j = 0, add; j < (1 << HASH_SHIFT); j++) {
658 if (!ctx->range[i].blocks[j])
659 continue;
660
661 add = 1;
662 for (int k = 0; k < c; k++) {
663 if (ctx->blocks[k] == ctx->range[i].blocks[j]) {
664 add = 0;
665 break;
666 }
667 }
668 if (add) {
669 assert(c < ctx->nblocks);
670 ctx->blocks[c++] = ctx->range[i].blocks[j];
671 j += (ctx->range[i].blocks[j]->nreg) - 1;
672 }
673 }
674 }
675 return 0;
676 }
677
678 int r600_context_init(struct r600_context *ctx, struct radeon *radeon)
679 {
680 int r;
681
682 memset(ctx, 0, sizeof(struct r600_context));
683 ctx->radeon = radeon;
684 LIST_INITHEAD(&ctx->query_list);
685
686 ctx->range = calloc(NUM_RANGES, sizeof(struct r600_range));
687 if (!ctx->range) {
688 r = -ENOMEM;
689 goto out_err;
690 }
691
692 /* add blocks */
693 r = r600_context_add_block(ctx, r600_config_reg_list,
694 Elements(r600_config_reg_list), PKT3_SET_CONFIG_REG, R600_CONFIG_REG_OFFSET);
695 if (r)
696 goto out_err;
697 r = r600_context_add_block(ctx, r600_context_reg_list,
698 Elements(r600_context_reg_list), PKT3_SET_CONTEXT_REG, R600_CONTEXT_REG_OFFSET);
699 if (r)
700 goto out_err;
701 r = r600_context_add_block(ctx, r600_ctl_const_list,
702 Elements(r600_ctl_const_list), PKT3_SET_CTL_CONST, R600_CTL_CONST_OFFSET);
703 if (r)
704 goto out_err;
705
706 /* PS SAMPLER BORDER */
707 for (int j = 0, offset = 0; j < 18; j++, offset += 0x10) {
708 r = r600_state_sampler_border_init(ctx, offset);
709 if (r)
710 goto out_err;
711 }
712
713 /* VS SAMPLER BORDER */
714 for (int j = 0, offset = 0x200; j < 18; j++, offset += 0x10) {
715 r = r600_state_sampler_border_init(ctx, offset);
716 if (r)
717 goto out_err;
718 }
719 /* PS SAMPLER */
720 for (int j = 0, offset = 0; j < 18; j++, offset += 0xC) {
721 r = r600_state_sampler_init(ctx, offset);
722 if (r)
723 goto out_err;
724 }
725 /* VS SAMPLER */
726 for (int j = 0, offset = 0xD8; j < 18; j++, offset += 0xC) {
727 r = r600_state_sampler_init(ctx, offset);
728 if (r)
729 goto out_err;
730 }
731 /* PS RESOURCE */
732 for (int j = 0, offset = 0; j < 160; j++, offset += 0x1C) {
733 r = r600_state_resource_init(ctx, offset);
734 if (r)
735 goto out_err;
736 }
737 /* VS RESOURCE */
738 for (int j = 0, offset = 0x1180; j < 160; j++, offset += 0x1C) {
739 r = r600_state_resource_init(ctx, offset);
740 if (r)
741 goto out_err;
742 }
743 /* FS RESOURCE */
744 for (int j = 0, offset = 0x2300; j < 16; j++, offset += 0x1C) {
745 r = r600_state_resource_init(ctx, offset);
746 if (r)
747 goto out_err;
748 }
749
750 /* PS loop const */
751 r600_loop_const_init(ctx, 0);
752 /* VS loop const */
753 r600_loop_const_init(ctx, 32);
754
755 r = r600_setup_block_table(ctx);
756 if (r)
757 goto out_err;
758
759 /* allocate cs variables */
760 ctx->nreloc = RADEON_CTX_MAX_PM4;
761 ctx->reloc = calloc(ctx->nreloc, sizeof(struct r600_reloc));
762 if (ctx->reloc == NULL) {
763 r = -ENOMEM;
764 goto out_err;
765 }
766 ctx->bo = calloc(ctx->nreloc, sizeof(void *));
767 if (ctx->bo == NULL) {
768 r = -ENOMEM;
769 goto out_err;
770 }
771 ctx->pm4_ndwords = RADEON_CTX_MAX_PM4;
772 ctx->pm4 = calloc(ctx->pm4_ndwords, 4);
773 if (ctx->pm4 == NULL) {
774 r = -ENOMEM;
775 goto out_err;
776 }
777 /* save 16dwords space for fence mecanism */
778 ctx->pm4_ndwords -= 16;
779
780 LIST_INITHEAD(&ctx->fenced_bo);
781
782 /* init dirty list */
783 LIST_INITHEAD(&ctx->dirty);
784
785 ctx->max_db = 4;
786
787 return 0;
788 out_err:
789 r600_context_fini(ctx);
790 return r;
791 }
792
793 /* Flushes all surfaces */
794 void r600_context_flush_all(struct r600_context *ctx, unsigned flush_flags)
795 {
796 unsigned ndwords = 5;
797
798 if ((ctx->pm4_dirty_cdwords + ndwords + ctx->pm4_cdwords) > ctx->pm4_ndwords) {
799 /* need to flush */
800 r600_context_flush(ctx);
801 }
802
803 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_SURFACE_SYNC, 3, ctx->predicate_drawing);
804 ctx->pm4[ctx->pm4_cdwords++] = flush_flags; /* CP_COHER_CNTL */
805 ctx->pm4[ctx->pm4_cdwords++] = 0xffffffff; /* CP_COHER_SIZE */
806 ctx->pm4[ctx->pm4_cdwords++] = 0; /* CP_COHER_BASE */
807 ctx->pm4[ctx->pm4_cdwords++] = 0x0000000A; /* POLL_INTERVAL */
808 }
809
810 void r600_context_bo_flush(struct r600_context *ctx, unsigned flush_flags,
811 unsigned flush_mask, struct r600_bo *rbo)
812 {
813 struct radeon_bo *bo;
814
815 bo = rbo->bo;
816 /* if bo has already been flushed */
817 if (!(~bo->last_flush & flush_flags)) {
818 bo->last_flush &= flush_mask;
819 return;
820 }
821
822 if ((ctx->radeon->family < CHIP_RV770) &&
823 (G_0085F0_CB_ACTION_ENA(flush_flags) ||
824 G_0085F0_DB_ACTION_ENA(flush_flags))) {
825 if (ctx->flags & R600_CONTEXT_CHECK_EVENT_FLUSH) {
826 /* the rv670 seems to fail fbo-generatemipmap unless we flush the CB1 dest base ena */
827 if ((bo->binding & BO_BOUND_TEXTURE) &&
828 (flush_flags & S_0085F0_CB_ACTION_ENA(1))) {
829 if ((ctx->radeon->family == CHIP_RV670) ||
830 (ctx->radeon->family == CHIP_RS780) ||
831 (ctx->radeon->family == CHIP_RS880)) {
832 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_SURFACE_SYNC, 3, ctx->predicate_drawing);
833 ctx->pm4[ctx->pm4_cdwords++] = S_0085F0_CB1_DEST_BASE_ENA(1); /* CP_COHER_CNTL */
834 ctx->pm4[ctx->pm4_cdwords++] = 0xffffffff; /* CP_COHER_SIZE */
835 ctx->pm4[ctx->pm4_cdwords++] = 0; /* CP_COHER_BASE */
836 ctx->pm4[ctx->pm4_cdwords++] = 0x0000000A; /* POLL_INTERVAL */
837 }
838 }
839
840 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 0, ctx->predicate_drawing);
841 ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0);
842 ctx->flags &= ~R600_CONTEXT_CHECK_EVENT_FLUSH;
843 }
844 } else {
845 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_SURFACE_SYNC, 3, ctx->predicate_drawing);
846 ctx->pm4[ctx->pm4_cdwords++] = flush_flags;
847 ctx->pm4[ctx->pm4_cdwords++] = (bo->size + 255) >> 8;
848 ctx->pm4[ctx->pm4_cdwords++] = 0x00000000;
849 ctx->pm4[ctx->pm4_cdwords++] = 0x0000000A;
850 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, ctx->predicate_drawing);
851 ctx->pm4[ctx->pm4_cdwords++] = bo->reloc_id;
852 }
853 bo->last_flush = (bo->last_flush | flush_flags) & flush_mask;
854 }
855
856 void r600_context_bo_reloc(struct r600_context *ctx, u32 *pm4, struct r600_bo *rbo)
857 {
858 struct radeon_bo *bo;
859
860 bo = rbo->bo;
861 assert(bo != NULL);
862 if (bo->reloc) {
863 *pm4 = bo->reloc_id;
864 return;
865 }
866 bo->reloc = &ctx->reloc[ctx->creloc];
867 bo->reloc_id = ctx->creloc * sizeof(struct r600_reloc) / 4;
868 ctx->reloc[ctx->creloc].handle = bo->handle;
869 ctx->reloc[ctx->creloc].read_domain = rbo->domains & (RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM);
870 ctx->reloc[ctx->creloc].write_domain = rbo->domains & (RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM);
871 ctx->reloc[ctx->creloc].flags = 0;
872 radeon_bo_reference(ctx->radeon, &ctx->bo[ctx->creloc], bo);
873 rbo->fence = ctx->radeon->fence;
874 ctx->creloc++;
875 /* set PKT3 to point to proper reloc */
876 *pm4 = bo->reloc_id;
877 }
878
879 void r600_context_reg(struct r600_context *ctx,
880 unsigned offset, unsigned value,
881 unsigned mask)
882 {
883 struct r600_range *range;
884 struct r600_block *block;
885 unsigned id;
886 unsigned new_val;
887 int dirty;
888
889 range = &ctx->range[CTX_RANGE_ID(ctx, offset)];
890 block = range->blocks[CTX_BLOCK_ID(ctx, offset)];
891 id = (offset - block->start_offset) >> 2;
892
893 dirty = block->status & R600_BLOCK_STATUS_DIRTY;
894
895 new_val = block->reg[id];
896 new_val &= ~mask;
897 new_val |= value;
898 if (new_val != block->reg[id]) {
899 dirty |= R600_BLOCK_STATUS_DIRTY;
900 block->reg[id] = new_val;
901 }
902 r600_context_dirty_block(ctx, block, dirty, id);
903 }
904
905 void r600_context_dirty_block(struct r600_context *ctx, struct r600_block *block,
906 int dirty, int index)
907 {
908 if (dirty && (index + 1) > block->nreg_dirty)
909 block->nreg_dirty = index + 1;
910
911 if ((dirty != (block->status & R600_BLOCK_STATUS_DIRTY)) || !(block->status & R600_BLOCK_STATUS_ENABLED)) {
912
913 block->status |= R600_BLOCK_STATUS_ENABLED;
914 block->status |= R600_BLOCK_STATUS_DIRTY;
915 ctx->pm4_dirty_cdwords += block->pm4_ndwords + block->pm4_flush_ndwords;
916 LIST_ADDTAIL(&block->list,&ctx->dirty);
917 }
918 }
919
920 void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state)
921 {
922 struct r600_range *range;
923 struct r600_block *block;
924 unsigned new_val;
925 int dirty;
926 for (int i = 0; i < state->nregs; i++) {
927 unsigned id, reloc_id;
928 struct r600_pipe_reg *reg = &state->regs[i];
929
930 range = &ctx->range[CTX_RANGE_ID(ctx, reg->offset)];
931 block = range->blocks[CTX_BLOCK_ID(ctx, reg->offset)];
932 id = (reg->offset - block->start_offset) >> 2;
933
934 dirty = block->status & R600_BLOCK_STATUS_DIRTY;
935
936 new_val = block->reg[id];
937 new_val &= ~reg->mask;
938 new_val |= reg->value;
939 if (new_val != block->reg[id]) {
940 block->reg[id] = new_val;
941 dirty |= R600_BLOCK_STATUS_DIRTY;
942 }
943 if (block->flags & REG_FLAG_DIRTY_ALWAYS)
944 dirty |= R600_BLOCK_STATUS_DIRTY;
945 if (block->pm4_bo_index[id] && state->regs[i].bo) {
946 /* find relocation */
947 reloc_id = block->pm4_bo_index[id];
948 r600_bo_reference(ctx->radeon, &block->reloc[reloc_id].bo, reg->bo);
949 reg->bo->fence = ctx->radeon->fence;
950 /* always force dirty for relocs for now */
951 dirty |= R600_BLOCK_STATUS_DIRTY;
952 }
953
954 r600_context_dirty_block(ctx, block, dirty, id);
955 }
956 }
957
958 void r600_context_pipe_state_set_resource(struct r600_context *ctx, struct r600_pipe_state *state, unsigned offset)
959 {
960 struct r600_range *range;
961 struct r600_block *block;
962 int i;
963 int dirty;
964 int num_regs = ctx->radeon->chip_class >= EVERGREEN ? 8 : 7;
965
966 range = &ctx->range[CTX_RANGE_ID(ctx, offset)];
967 block = range->blocks[CTX_BLOCK_ID(ctx, offset)];
968 if (state == NULL) {
969 block->status &= ~(R600_BLOCK_STATUS_ENABLED | R600_BLOCK_STATUS_DIRTY);
970 if (block->reloc[1].bo)
971 block->reloc[1].bo->bo->binding &= ~BO_BOUND_TEXTURE;
972
973 r600_bo_reference(ctx->radeon, &block->reloc[1].bo, NULL);
974 r600_bo_reference(ctx->radeon , &block->reloc[2].bo, NULL);
975 LIST_DELINIT(&block->list);
976 return;
977 }
978
979 dirty = block->status & R600_BLOCK_STATUS_DIRTY;
980
981 for (i = 0; i < num_regs; i++) {
982 if (block->reg[i] != state->regs[i].value) {
983 dirty |= R600_BLOCK_STATUS_DIRTY;
984 block->reg[i] = state->regs[i].value;
985 }
986 }
987
988 /* if no BOs on block, force dirty */
989 if (!block->reloc[1].bo || !block->reloc[2].bo)
990 dirty |= R600_BLOCK_STATUS_DIRTY;
991
992 if (!dirty) {
993 if (state->regs[0].bo) {
994 if ((block->reloc[1].bo->bo->handle != state->regs[0].bo->bo->handle) ||
995 (block->reloc[2].bo->bo->handle != state->regs[0].bo->bo->handle))
996 dirty |= R600_BLOCK_STATUS_DIRTY;
997 } else {
998 if ((block->reloc[1].bo->bo->handle != state->regs[2].bo->bo->handle) ||
999 (block->reloc[2].bo->bo->handle != state->regs[3].bo->bo->handle))
1000 dirty |= R600_BLOCK_STATUS_DIRTY;
1001 }
1002 }
1003 if (!dirty) {
1004 if (state->regs[0].bo)
1005 state->regs[0].bo->fence = ctx->radeon->fence;
1006 else {
1007 state->regs[2].bo->fence = ctx->radeon->fence;
1008 state->regs[3].bo->fence = ctx->radeon->fence;
1009 }
1010 } else {
1011 r600_bo_reference(ctx->radeon, &block->reloc[1].bo, NULL);
1012 r600_bo_reference(ctx->radeon, &block->reloc[2].bo, NULL);
1013 if (state->regs[0].bo) {
1014 /* VERTEX RESOURCE, we preted there is 2 bo to relocate so
1015 * we have single case btw VERTEX & TEXTURE resource
1016 */
1017 r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[0].bo);
1018 r600_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[0].bo);
1019 state->regs[0].bo->fence = ctx->radeon->fence;
1020 } else {
1021 /* TEXTURE RESOURCE */
1022 r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[2].bo);
1023 r600_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[3].bo);
1024 state->regs[2].bo->fence = ctx->radeon->fence;
1025 state->regs[3].bo->fence = ctx->radeon->fence;
1026 state->regs[2].bo->bo->binding |= BO_BOUND_TEXTURE;
1027 }
1028 }
1029 r600_context_dirty_block(ctx, block, dirty, num_regs - 1);
1030 }
1031
1032 void r600_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid)
1033 {
1034 unsigned offset = R_038000_SQ_TEX_RESOURCE_WORD0_0 + 0x1C * rid;
1035
1036 r600_context_pipe_state_set_resource(ctx, state, offset);
1037 }
1038
1039 void r600_context_pipe_state_set_vs_resource(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid)
1040 {
1041 unsigned offset = R_038000_SQ_TEX_RESOURCE_WORD0_0 + 0x1180 + 0x1C * rid;
1042
1043 r600_context_pipe_state_set_resource(ctx, state, offset);
1044 }
1045
1046 void r600_context_pipe_state_set_fs_resource(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid)
1047 {
1048 unsigned offset = R_038000_SQ_TEX_RESOURCE_WORD0_0 + 0x2300 + 0x1C * rid;
1049
1050 r600_context_pipe_state_set_resource(ctx, state, offset);
1051 }
1052
1053 static inline void r600_context_pipe_state_set_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned offset)
1054 {
1055 struct r600_range *range;
1056 struct r600_block *block;
1057 int i;
1058 int dirty;
1059
1060 range = &ctx->range[CTX_RANGE_ID(ctx, offset)];
1061 block = range->blocks[CTX_BLOCK_ID(ctx, offset)];
1062 if (state == NULL) {
1063 block->status &= ~(R600_BLOCK_STATUS_ENABLED | R600_BLOCK_STATUS_DIRTY);
1064 LIST_DELINIT(&block->list);
1065 return;
1066 }
1067 dirty = block->status & R600_BLOCK_STATUS_DIRTY;
1068 for (i = 0; i < 3; i++) {
1069 if (block->reg[i] != state->regs[i].value) {
1070 block->reg[i] = state->regs[i].value;
1071 dirty |= R600_BLOCK_STATUS_DIRTY;
1072 }
1073 }
1074
1075 r600_context_dirty_block(ctx, block, dirty, 2);
1076 }
1077
1078 static inline void r600_context_ps_partial_flush(struct r600_context *ctx)
1079 {
1080 if (!(ctx->flags & R600_CONTEXT_DRAW_PENDING))
1081 return;
1082
1083 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
1084 ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
1085
1086 ctx->flags &= ~R600_CONTEXT_DRAW_PENDING;
1087 }
1088
1089 static inline void r600_context_pipe_state_set_sampler_border(struct r600_context *ctx, struct r600_pipe_state *state, unsigned offset)
1090 {
1091 struct r600_range *range;
1092 struct r600_block *block;
1093 int i;
1094 int dirty;
1095
1096 range = &ctx->range[CTX_RANGE_ID(ctx, offset)];
1097 block = range->blocks[CTX_BLOCK_ID(ctx, offset)];
1098 if (state == NULL) {
1099 block->status &= ~(R600_BLOCK_STATUS_ENABLED | R600_BLOCK_STATUS_DIRTY);
1100 LIST_DELINIT(&block->list);
1101 return;
1102 }
1103 if (state->nregs <= 3) {
1104 return;
1105 }
1106 dirty = block->status & R600_BLOCK_STATUS_DIRTY;
1107 for (i = 0; i < 4; i++) {
1108 if (block->reg[i] != state->regs[i + 3].value) {
1109 block->reg[i] = state->regs[i + 3].value;
1110 dirty |= R600_BLOCK_STATUS_DIRTY;
1111 }
1112 }
1113
1114 /* We have to flush the shaders before we change the border color
1115 * registers, or previous draw commands that haven't completed yet
1116 * will end up using the new border color. */
1117 if (dirty & R600_BLOCK_STATUS_DIRTY)
1118 r600_context_ps_partial_flush(ctx);
1119
1120 r600_context_dirty_block(ctx, block, dirty, 3);
1121 }
1122
1123 void r600_context_pipe_state_set_ps_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id)
1124 {
1125 unsigned offset;
1126
1127 offset = 0x0003C000 + id * 0xc;
1128 r600_context_pipe_state_set_sampler(ctx, state, offset);
1129 offset = 0x0000A400 + id * 0x10;
1130 r600_context_pipe_state_set_sampler_border(ctx, state, offset);
1131 }
1132
1133 void r600_context_pipe_state_set_vs_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id)
1134 {
1135 unsigned offset;
1136
1137 offset = 0x0003C0D8 + id * 0xc;
1138 r600_context_pipe_state_set_sampler(ctx, state, offset);
1139 offset = 0x0000A600 + id * 0x10;
1140 r600_context_pipe_state_set_sampler_border(ctx, state, offset);
1141 }
1142
1143 struct r600_bo *r600_context_reg_bo(struct r600_context *ctx, unsigned offset)
1144 {
1145 struct r600_range *range;
1146 struct r600_block *block;
1147 unsigned id;
1148
1149 range = &ctx->range[CTX_RANGE_ID(ctx, offset)];
1150 block = range->blocks[CTX_BLOCK_ID(ctx, offset)];
1151 offset -= block->start_offset;
1152 id = block->pm4_bo_index[offset >> 2];
1153 if (block->reloc[id].bo) {
1154 return block->reloc[id].bo;
1155 }
1156 return NULL;
1157 }
1158
1159 void r600_context_block_emit_dirty(struct r600_context *ctx, struct r600_block *block)
1160 {
1161 int id;
1162
1163 if (block->nreg_dirty == 0 && block->nbo == 0 && !(block->flags & REG_FLAG_DIRTY_ALWAYS)) {
1164 goto out;
1165 }
1166
1167 ctx->flags |= R600_CONTEXT_CHECK_EVENT_FLUSH;
1168 for (int j = 0; j < block->nreg; j++) {
1169 if (block->pm4_bo_index[j]) {
1170 /* find relocation */
1171 id = block->pm4_bo_index[j];
1172 if (block->reloc[id].bo) {
1173 r600_context_bo_reloc(ctx,
1174 &block->pm4[block->reloc[id].bo_pm4_index],
1175 block->reloc[id].bo);
1176 r600_context_bo_flush(ctx,
1177 block->reloc[id].flush_flags,
1178 block->reloc[id].flush_mask,
1179 block->reloc[id].bo);
1180 }
1181 }
1182 }
1183 ctx->flags &= ~R600_CONTEXT_CHECK_EVENT_FLUSH;
1184 memcpy(&ctx->pm4[ctx->pm4_cdwords], block->pm4, block->pm4_ndwords * 4);
1185 ctx->pm4_cdwords += block->pm4_ndwords;
1186
1187 if (block->nreg_dirty != block->nreg && block->nbo == 0 && !(block->flags & REG_FLAG_DIRTY_ALWAYS)) {
1188 int new_dwords = block->nreg_dirty;
1189 uint32_t oldword, newword;
1190 ctx->pm4_cdwords -= block->pm4_ndwords;
1191 newword = oldword = ctx->pm4[ctx->pm4_cdwords];
1192 newword &= PKT_COUNT_C;
1193 newword |= PKT_COUNT_S(new_dwords);
1194 ctx->pm4[ctx->pm4_cdwords] = newword;
1195 ctx->pm4_cdwords += new_dwords + 2;
1196 }
1197 out:
1198 block->status ^= R600_BLOCK_STATUS_DIRTY;
1199 block->nreg_dirty = 0;
1200 LIST_DELINIT(&block->list);
1201 }
1202
1203 void r600_context_flush_dest_caches(struct r600_context *ctx)
1204 {
1205 struct r600_bo *cb[8];
1206 struct r600_bo *db;
1207 int i;
1208
1209 if (!(ctx->flags & R600_CONTEXT_DST_CACHES_DIRTY))
1210 return;
1211
1212 db = r600_context_reg_bo(ctx, R_02800C_DB_DEPTH_BASE);
1213 cb[0] = r600_context_reg_bo(ctx, R_028040_CB_COLOR0_BASE);
1214 cb[1] = r600_context_reg_bo(ctx, R_028044_CB_COLOR1_BASE);
1215 cb[2] = r600_context_reg_bo(ctx, R_028048_CB_COLOR2_BASE);
1216 cb[3] = r600_context_reg_bo(ctx, R_02804C_CB_COLOR3_BASE);
1217 cb[4] = r600_context_reg_bo(ctx, R_028050_CB_COLOR4_BASE);
1218 cb[5] = r600_context_reg_bo(ctx, R_028054_CB_COLOR5_BASE);
1219 cb[6] = r600_context_reg_bo(ctx, R_028058_CB_COLOR6_BASE);
1220 cb[7] = r600_context_reg_bo(ctx, R_02805C_CB_COLOR7_BASE);
1221
1222 ctx->flags |= R600_CONTEXT_CHECK_EVENT_FLUSH;
1223 /* flush the color buffers */
1224 for (i = 0; i < 8; i++) {
1225 if (!cb[i])
1226 continue;
1227
1228 r600_context_bo_flush(ctx,
1229 (S_0085F0_CB0_DEST_BASE_ENA(1) << i) |
1230 S_0085F0_CB_ACTION_ENA(1),
1231 0, cb[i]);
1232 }
1233 if (db) {
1234 r600_context_bo_flush(ctx, S_0085F0_DB_ACTION_ENA(1), 0, db);
1235 }
1236 ctx->flags &= ~R600_CONTEXT_CHECK_EVENT_FLUSH;
1237 ctx->flags &= ~R600_CONTEXT_DST_CACHES_DIRTY;
1238 }
1239
1240 void r600_context_draw(struct r600_context *ctx, const struct r600_draw *draw)
1241 {
1242 unsigned ndwords = 7;
1243 struct r600_block *dirty_block = NULL;
1244 struct r600_block *next_block;
1245
1246 if (draw->indices) {
1247 ndwords = 11;
1248 /* make sure there is enough relocation space before scheduling draw */
1249 if (ctx->creloc >= (ctx->nreloc - 1)) {
1250 r600_context_flush(ctx);
1251 }
1252 }
1253
1254 /* queries need some special values */
1255 if (ctx->num_query_running) {
1256 if (ctx->radeon->family >= CHIP_RV770) {
1257 r600_context_reg(ctx,
1258 R_028D0C_DB_RENDER_CONTROL,
1259 S_028D0C_R700_PERFECT_ZPASS_COUNTS(1),
1260 S_028D0C_R700_PERFECT_ZPASS_COUNTS(1));
1261 }
1262 r600_context_reg(ctx,
1263 R_028D10_DB_RENDER_OVERRIDE,
1264 S_028D10_NOOP_CULL_DISABLE(1),
1265 S_028D10_NOOP_CULL_DISABLE(1));
1266 }
1267
1268 /* update the max dword count to make sure we have enough space
1269 * reserved for flushing the destination caches */
1270 ctx->pm4_ndwords = RADEON_CTX_MAX_PM4 - ctx->num_dest_buffers * 7 - 16;
1271
1272 if ((ctx->pm4_dirty_cdwords + ndwords + ctx->pm4_cdwords) > ctx->pm4_ndwords) {
1273 /* need to flush */
1274 r600_context_flush(ctx);
1275 }
1276 /* at that point everythings is flushed and ctx->pm4_cdwords = 0 */
1277 if ((ctx->pm4_dirty_cdwords + ndwords) > ctx->pm4_ndwords) {
1278 R600_ERR("context is too big to be scheduled\n");
1279 return;
1280 }
1281 /* enough room to copy packet */
1282 LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &ctx->dirty, list) {
1283 r600_context_block_emit_dirty(ctx, dirty_block);
1284 }
1285
1286 /* draw packet */
1287 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_INDEX_TYPE, 0, ctx->predicate_drawing);
1288 ctx->pm4[ctx->pm4_cdwords++] = draw->vgt_index_type;
1289 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NUM_INSTANCES, 0, ctx->predicate_drawing);
1290 ctx->pm4[ctx->pm4_cdwords++] = draw->vgt_num_instances;
1291 if (draw->indices) {
1292 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_DRAW_INDEX, 3, ctx->predicate_drawing);
1293 ctx->pm4[ctx->pm4_cdwords++] = draw->indices_bo_offset + r600_bo_offset(draw->indices);
1294 ctx->pm4[ctx->pm4_cdwords++] = 0;
1295 ctx->pm4[ctx->pm4_cdwords++] = draw->vgt_num_indices;
1296 ctx->pm4[ctx->pm4_cdwords++] = draw->vgt_draw_initiator;
1297 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, ctx->predicate_drawing);
1298 ctx->pm4[ctx->pm4_cdwords++] = 0;
1299 r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], draw->indices);
1300 } else {
1301 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_DRAW_INDEX_AUTO, 1, ctx->predicate_drawing);
1302 ctx->pm4[ctx->pm4_cdwords++] = draw->vgt_num_indices;
1303 ctx->pm4[ctx->pm4_cdwords++] = draw->vgt_draw_initiator;
1304 }
1305
1306 ctx->flags |= (R600_CONTEXT_DST_CACHES_DIRTY | R600_CONTEXT_DRAW_PENDING);
1307
1308 /* all dirty state have been scheduled in current cs */
1309 ctx->pm4_dirty_cdwords = 0;
1310 }
1311
1312 void r600_context_flush(struct r600_context *ctx)
1313 {
1314 struct drm_radeon_cs drmib = {};
1315 struct drm_radeon_cs_chunk chunks[2];
1316 uint64_t chunk_array[2];
1317 unsigned fence;
1318 int r;
1319
1320 if (!ctx->pm4_cdwords)
1321 return;
1322
1323 /* suspend queries */
1324 r600_context_queries_suspend(ctx);
1325
1326 if (ctx->radeon->family >= CHIP_CEDAR)
1327 evergreen_context_flush_dest_caches(ctx);
1328 else
1329 r600_context_flush_dest_caches(ctx);
1330
1331 /* partial flush is needed to avoid lockups on some chips with user fences */
1332 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
1333 ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
1334 /* emit fence */
1335 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
1336 ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
1337 ctx->pm4[ctx->pm4_cdwords++] = 0;
1338 ctx->pm4[ctx->pm4_cdwords++] = (1 << 29) | (0 << 24);
1339 ctx->pm4[ctx->pm4_cdwords++] = ctx->radeon->fence;
1340 ctx->pm4[ctx->pm4_cdwords++] = 0;
1341 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, 0);
1342 ctx->pm4[ctx->pm4_cdwords++] = 0;
1343 r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], ctx->radeon->fence_bo);
1344
1345 #if 1
1346 /* emit cs */
1347 drmib.num_chunks = 2;
1348 drmib.chunks = (uint64_t)(uintptr_t)chunk_array;
1349 chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
1350 chunks[0].length_dw = ctx->pm4_cdwords;
1351 chunks[0].chunk_data = (uint64_t)(uintptr_t)ctx->pm4;
1352 chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
1353 chunks[1].length_dw = ctx->creloc * sizeof(struct r600_reloc) / 4;
1354 chunks[1].chunk_data = (uint64_t)(uintptr_t)ctx->reloc;
1355 chunk_array[0] = (uint64_t)(uintptr_t)&chunks[0];
1356 chunk_array[1] = (uint64_t)(uintptr_t)&chunks[1];
1357 r = drmCommandWriteRead(ctx->radeon->fd, DRM_RADEON_CS, &drmib,
1358 sizeof(struct drm_radeon_cs));
1359 #else
1360 *ctx->radeon->cfence = ctx->radeon->fence;
1361 #endif
1362
1363 r600_context_update_fenced_list(ctx);
1364
1365 fence = ctx->radeon->fence + 1;
1366 if (fence < ctx->radeon->fence) {
1367 /* wrap around */
1368 fence = 1;
1369 r600_context_fence_wraparound(ctx, fence);
1370 }
1371 ctx->radeon->fence = fence;
1372
1373 /* restart */
1374 for (int i = 0; i < ctx->creloc; i++) {
1375 ctx->bo[i]->reloc = NULL;
1376 ctx->bo[i]->last_flush = 0;
1377 radeon_bo_reference(ctx->radeon, &ctx->bo[i], NULL);
1378 }
1379 ctx->creloc = 0;
1380 ctx->pm4_dirty_cdwords = 0;
1381 ctx->pm4_cdwords = 0;
1382 ctx->flags = 0;
1383
1384 /* resume queries */
1385 r600_context_queries_resume(ctx);
1386
1387 /* set all valid group as dirty so they get reemited on
1388 * next draw command
1389 */
1390 for (int i = 0; i < ctx->nblocks; i++) {
1391 if (ctx->blocks[i]->status & R600_BLOCK_STATUS_ENABLED) {
1392 if(!(ctx->blocks[i]->status & R600_BLOCK_STATUS_DIRTY)) {
1393 LIST_ADDTAIL(&ctx->blocks[i]->list,&ctx->dirty);
1394 }
1395 ctx->pm4_dirty_cdwords += ctx->blocks[i]->pm4_ndwords + ctx->blocks[i]->pm4_flush_ndwords;
1396 ctx->blocks[i]->status |= R600_BLOCK_STATUS_DIRTY;
1397 ctx->blocks[i]->nreg_dirty = ctx->blocks[i]->nreg;
1398 }
1399 }
1400 }
1401
1402 void r600_context_emit_fence(struct r600_context *ctx, struct r600_bo *fence_bo, unsigned offset, unsigned value)
1403 {
1404 unsigned ndwords = 10;
1405
1406 if (((ctx->pm4_dirty_cdwords + ndwords + ctx->pm4_cdwords) > ctx->pm4_ndwords) ||
1407 (ctx->creloc >= (ctx->nreloc - 1))) {
1408 /* need to flush */
1409 r600_context_flush(ctx);
1410 }
1411
1412 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
1413 ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
1414 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
1415 ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
1416 ctx->pm4[ctx->pm4_cdwords++] = offset << 2; /* ADDRESS_LO */
1417 ctx->pm4[ctx->pm4_cdwords++] = (1 << 29) | (0 << 24); /* DATA_SEL | INT_EN | ADDRESS_HI */
1418 ctx->pm4[ctx->pm4_cdwords++] = value; /* DATA_LO */
1419 ctx->pm4[ctx->pm4_cdwords++] = 0; /* DATA_HI */
1420 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, 0);
1421 ctx->pm4[ctx->pm4_cdwords++] = 0;
1422 r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], fence_bo);
1423 }
1424
1425 void r600_context_dump_bof(struct r600_context *ctx, const char *file)
1426 {
1427 bof_t *bcs, *blob, *array, *bo, *size, *handle, *device_id, *root;
1428 unsigned i;
1429
1430 root = device_id = bcs = blob = array = bo = size = handle = NULL;
1431 root = bof_object();
1432 if (root == NULL)
1433 goto out_err;
1434 device_id = bof_int32(ctx->radeon->device);
1435 if (device_id == NULL)
1436 goto out_err;
1437 if (bof_object_set(root, "device_id", device_id))
1438 goto out_err;
1439 bof_decref(device_id);
1440 device_id = NULL;
1441 /* dump relocs */
1442 blob = bof_blob(ctx->creloc * 16, ctx->reloc);
1443 if (blob == NULL)
1444 goto out_err;
1445 if (bof_object_set(root, "reloc", blob))
1446 goto out_err;
1447 bof_decref(blob);
1448 blob = NULL;
1449 /* dump cs */
1450 blob = bof_blob(ctx->pm4_cdwords * 4, ctx->pm4);
1451 if (blob == NULL)
1452 goto out_err;
1453 if (bof_object_set(root, "pm4", blob))
1454 goto out_err;
1455 bof_decref(blob);
1456 blob = NULL;
1457 /* dump bo */
1458 array = bof_array();
1459 if (array == NULL)
1460 goto out_err;
1461 for (i = 0; i < ctx->creloc; i++) {
1462 struct radeon_bo *rbo = ctx->bo[i];
1463 bo = bof_object();
1464 if (bo == NULL)
1465 goto out_err;
1466 size = bof_int32(rbo->size);
1467 if (size == NULL)
1468 goto out_err;
1469 if (bof_object_set(bo, "size", size))
1470 goto out_err;
1471 bof_decref(size);
1472 size = NULL;
1473 handle = bof_int32(rbo->handle);
1474 if (handle == NULL)
1475 goto out_err;
1476 if (bof_object_set(bo, "handle", handle))
1477 goto out_err;
1478 bof_decref(handle);
1479 handle = NULL;
1480 radeon_bo_map(ctx->radeon, rbo);
1481 blob = bof_blob(rbo->size, rbo->data);
1482 radeon_bo_unmap(ctx->radeon, rbo);
1483 if (blob == NULL)
1484 goto out_err;
1485 if (bof_object_set(bo, "data", blob))
1486 goto out_err;
1487 bof_decref(blob);
1488 blob = NULL;
1489 if (bof_array_append(array, bo))
1490 goto out_err;
1491 bof_decref(bo);
1492 bo = NULL;
1493 }
1494 if (bof_object_set(root, "bo", array))
1495 goto out_err;
1496 bof_dump_file(root, file);
1497 out_err:
1498 bof_decref(blob);
1499 bof_decref(array);
1500 bof_decref(bo);
1501 bof_decref(size);
1502 bof_decref(handle);
1503 bof_decref(device_id);
1504 bof_decref(root);
1505 }
1506
1507 static boolean r600_query_result(struct r600_context *ctx, struct r600_query *query, boolean wait)
1508 {
1509 u64 start, end;
1510 u32 *results;
1511 int i;
1512 int size;
1513
1514 if (wait)
1515 results = r600_bo_map(ctx->radeon, query->buffer, PB_USAGE_CPU_READ, NULL);
1516 else
1517 results = r600_bo_map(ctx->radeon, query->buffer, PB_USAGE_DONTBLOCK | PB_USAGE_CPU_READ, NULL);
1518 if (!results)
1519 return FALSE;
1520
1521 size = query->num_results * (query->type == PIPE_QUERY_OCCLUSION_COUNTER ? ctx->max_db : 1);
1522 for (i = 0; i < size; i += 4) {
1523 start = (u64)results[i] | (u64)results[i + 1] << 32;
1524 end = (u64)results[i + 2] | (u64)results[i + 3] << 32;
1525 if (((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))
1526 || query->type == PIPE_QUERY_TIME_ELAPSED) {
1527 query->result += end - start;
1528 }
1529 }
1530 r600_bo_unmap(ctx->radeon, query->buffer);
1531 query->num_results = 0;
1532
1533 return TRUE;
1534 }
1535
1536 void r600_query_begin(struct r600_context *ctx, struct r600_query *query)
1537 {
1538 unsigned required_space;
1539 int num_backends = r600_get_num_backends(ctx->radeon);
1540
1541 /* query request needs 6/8 dwords for begin + 6/8 dwords for end */
1542 if (query->type == PIPE_QUERY_TIME_ELAPSED)
1543 required_space = 16;
1544 else
1545 required_space = 12;
1546
1547 if ((required_space + ctx->pm4_cdwords) > ctx->pm4_ndwords) {
1548 /* need to flush */
1549 r600_context_flush(ctx);
1550 }
1551
1552 /* if query buffer is full force a flush */
1553 if (query->num_results*4 >= query->buffer_size - 16) {
1554 r600_context_flush(ctx);
1555 r600_query_result(ctx, query, TRUE);
1556 }
1557
1558 if (query->type == PIPE_QUERY_OCCLUSION_COUNTER &&
1559 num_backends > 0 && num_backends < ctx->max_db) {
1560 /* as per info on ZPASS the driver must set the unusued DB top bits */
1561 u32 *results;
1562 int i;
1563
1564 results = r600_bo_map(ctx->radeon, query->buffer, PB_USAGE_DONTBLOCK | PB_USAGE_CPU_WRITE, NULL);
1565 if (results) {
1566 memset(results + (query->num_results * 4), 0, ctx->max_db * 4 * 4);
1567
1568 for (i = num_backends; i < ctx->max_db; i++) {
1569 results[(i * 4)+1] = 0x80000000;
1570 results[(i * 4)+3] = 0x80000000;
1571 }
1572 r600_bo_unmap(ctx->radeon, query->buffer);
1573 }
1574 }
1575
1576 /* emit begin query */
1577 if (query->type == PIPE_QUERY_TIME_ELAPSED) {
1578 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
1579 ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
1580 ctx->pm4[ctx->pm4_cdwords++] = query->num_results*4 + r600_bo_offset(query->buffer);
1581 ctx->pm4[ctx->pm4_cdwords++] = (3 << 29);
1582 ctx->pm4[ctx->pm4_cdwords++] = 0;
1583 ctx->pm4[ctx->pm4_cdwords++] = 0;
1584 } else {
1585 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
1586 ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
1587 ctx->pm4[ctx->pm4_cdwords++] = query->num_results*4 + r600_bo_offset(query->buffer);
1588 ctx->pm4[ctx->pm4_cdwords++] = 0;
1589 }
1590 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, 0);
1591 ctx->pm4[ctx->pm4_cdwords++] = 0;
1592 r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], query->buffer);
1593
1594 query->state |= R600_QUERY_STATE_STARTED;
1595 query->state ^= R600_QUERY_STATE_ENDED;
1596 ctx->num_query_running++;
1597 }
1598
1599 void r600_query_end(struct r600_context *ctx, struct r600_query *query)
1600 {
1601 /* emit begin query */
1602 if (query->type == PIPE_QUERY_TIME_ELAPSED) {
1603 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
1604 ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
1605 ctx->pm4[ctx->pm4_cdwords++] = query->num_results*4 + 8 + r600_bo_offset(query->buffer);
1606 ctx->pm4[ctx->pm4_cdwords++] = (3 << 29);
1607 ctx->pm4[ctx->pm4_cdwords++] = 0;
1608 ctx->pm4[ctx->pm4_cdwords++] = 0;
1609 } else {
1610 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
1611 ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
1612 ctx->pm4[ctx->pm4_cdwords++] = query->num_results*4 + 8 + r600_bo_offset(query->buffer);
1613 ctx->pm4[ctx->pm4_cdwords++] = 0;
1614 }
1615 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, 0);
1616 ctx->pm4[ctx->pm4_cdwords++] = 0;
1617 r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], query->buffer);
1618
1619 query->num_results += 4 * (query->type == PIPE_QUERY_OCCLUSION_COUNTER ? ctx->max_db : 1);
1620 query->state ^= R600_QUERY_STATE_STARTED;
1621 query->state |= R600_QUERY_STATE_ENDED;
1622 ctx->num_query_running--;
1623 }
1624
1625 void r600_query_predication(struct r600_context *ctx, struct r600_query *query, int operation,
1626 int flag_wait)
1627 {
1628 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_SET_PREDICATION, 1, 0);
1629
1630 if (operation == PREDICATION_OP_CLEAR) {
1631 ctx->pm4[ctx->pm4_cdwords++] = 0;
1632 ctx->pm4[ctx->pm4_cdwords++] = PRED_OP(PREDICATION_OP_CLEAR);
1633 } else {
1634 int results_base = query->num_results - (4 * ctx->max_db);
1635
1636 if (results_base < 0)
1637 results_base = 0;
1638
1639 ctx->pm4[ctx->pm4_cdwords++] = results_base*4 + r600_bo_offset(query->buffer);
1640 ctx->pm4[ctx->pm4_cdwords++] = PRED_OP(operation) | (flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW) | PREDICATION_DRAW_VISIBLE;
1641 ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, 0);
1642 ctx->pm4[ctx->pm4_cdwords++] = 0;
1643 r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], query->buffer);
1644 }
1645 }
1646
1647 struct r600_query *r600_context_query_create(struct r600_context *ctx, unsigned query_type)
1648 {
1649 struct r600_query *query;
1650
1651 if (query_type != PIPE_QUERY_OCCLUSION_COUNTER && query_type != PIPE_QUERY_TIME_ELAPSED)
1652 return NULL;
1653
1654 query = calloc(1, sizeof(struct r600_query));
1655 if (query == NULL)
1656 return NULL;
1657
1658 query->type = query_type;
1659 query->buffer_size = 4096;
1660
1661 /* As of GL4, query buffers are normally read by the CPU after
1662 * being written by the gpu, hence staging is probably a good
1663 * usage pattern.
1664 */
1665 query->buffer = r600_bo(ctx->radeon, query->buffer_size, 1, 0,
1666 PIPE_USAGE_STAGING);
1667 if (!query->buffer) {
1668 free(query);
1669 return NULL;
1670 }
1671
1672 LIST_ADDTAIL(&query->list, &ctx->query_list);
1673
1674 return query;
1675 }
1676
1677 void r600_context_query_destroy(struct r600_context *ctx, struct r600_query *query)
1678 {
1679 r600_bo_reference(ctx->radeon, &query->buffer, NULL);
1680 LIST_DELINIT(&query->list);
1681 free(query);
1682 }
1683
1684 boolean r600_context_query_result(struct r600_context *ctx,
1685 struct r600_query *query,
1686 boolean wait, void *vresult)
1687 {
1688 uint64_t *result = (uint64_t*)vresult;
1689
1690 if (query->num_results) {
1691 r600_context_flush(ctx);
1692 }
1693 if (!r600_query_result(ctx, query, wait))
1694 return FALSE;
1695 if (query->type == PIPE_QUERY_TIME_ELAPSED)
1696 *result = (1000000*query->result)/r600_get_clock_crystal_freq(ctx->radeon);
1697 else
1698 *result = query->result;
1699 query->result = 0;
1700 return TRUE;
1701 }
1702
1703 void r600_context_queries_suspend(struct r600_context *ctx)
1704 {
1705 struct r600_query *query;
1706
1707 LIST_FOR_EACH_ENTRY(query, &ctx->query_list, list) {
1708 if (query->state & R600_QUERY_STATE_STARTED) {
1709 r600_query_end(ctx, query);
1710 query->state |= R600_QUERY_STATE_SUSPENDED;
1711 }
1712 }
1713 }
1714
1715 void r600_context_queries_resume(struct r600_context *ctx)
1716 {
1717 struct r600_query *query;
1718
1719 LIST_FOR_EACH_ENTRY(query, &ctx->query_list, list) {
1720 if (query->state & R600_QUERY_STATE_SUSPENDED) {
1721 r600_query_begin(ctx, query);
1722 query->state ^= R600_QUERY_STATE_SUSPENDED;
1723 }
1724 }
1725 }