2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "r600_hw_context_priv.h"
28 #include "util/u_memory.h"
31 /* Get backends mask */
32 void r600_get_backend_mask(struct r600_context
*ctx
)
34 struct radeon_winsys_cs
*cs
= ctx
->cs
;
35 struct r600_resource
*buffer
;
37 unsigned num_backends
= ctx
->screen
->info
.r600_num_backends
;
41 /* if backend_map query is supported by the kernel */
42 if (ctx
->screen
->info
.r600_backend_map_valid
) {
43 unsigned num_tile_pipes
= ctx
->screen
->info
.r600_num_tile_pipes
;
44 unsigned backend_map
= ctx
->screen
->info
.r600_backend_map
;
45 unsigned item_width
, item_mask
;
47 if (ctx
->chip_class
>= EVERGREEN
) {
55 while(num_tile_pipes
--) {
56 i
= backend_map
& item_mask
;
58 backend_map
>>= item_width
;
61 ctx
->backend_mask
= mask
;
66 /* otherwise backup path for older kernels */
68 /* create buffer for event data */
69 buffer
= (struct r600_resource
*)
70 pipe_buffer_create(&ctx
->screen
->screen
, PIPE_BIND_CUSTOM
,
71 PIPE_USAGE_STAGING
, ctx
->max_db
*16);
75 va
= r600_resource_va(&ctx
->screen
->screen
, (void*)buffer
);
77 /* initialize buffer with zeroes */
78 results
= ctx
->ws
->buffer_map(buffer
->cs_buf
, ctx
->cs
, PIPE_TRANSFER_WRITE
);
80 memset(results
, 0, ctx
->max_db
* 4 * 4);
81 ctx
->ws
->buffer_unmap(buffer
->cs_buf
);
83 /* emit EVENT_WRITE for ZPASS_DONE */
84 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
85 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1);
86 cs
->buf
[cs
->cdw
++] = va
;
87 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
89 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
90 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, buffer
, RADEON_USAGE_WRITE
);
93 results
= ctx
->ws
->buffer_map(buffer
->cs_buf
, ctx
->cs
, PIPE_TRANSFER_READ
);
95 for(i
= 0; i
< ctx
->max_db
; i
++) {
96 /* at least highest bit will be set if backend is used */
100 ctx
->ws
->buffer_unmap(buffer
->cs_buf
);
104 pipe_resource_reference((struct pipe_resource
**)&buffer
, NULL
);
107 ctx
->backend_mask
= mask
;
112 /* fallback to old method - set num_backends lower bits to 1 */
113 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
117 static void r600_init_block(struct r600_context
*ctx
,
118 struct r600_block
*block
,
119 const struct r600_reg
*reg
, int index
, int nreg
,
120 unsigned opcode
, unsigned offset_base
)
125 /* initialize block */
127 block
->status
|= R600_BLOCK_STATUS_DIRTY
; /* dirty all blocks at start */
128 block
->start_offset
= reg
[i
].offset
;
129 block
->pm4
[block
->pm4_ndwords
++] = PKT3(opcode
, n
, 0);
130 block
->pm4
[block
->pm4_ndwords
++] = (block
->start_offset
- offset_base
) >> 2;
131 block
->reg
= &block
->pm4
[block
->pm4_ndwords
];
132 block
->pm4_ndwords
+= n
;
134 block
->nreg_dirty
= n
;
135 LIST_INITHEAD(&block
->list
);
136 LIST_INITHEAD(&block
->enable_list
);
138 for (j
= 0; j
< n
; j
++) {
139 if (reg
[i
+j
].flags
& REG_FLAG_DIRTY_ALWAYS
) {
140 block
->flags
|= REG_FLAG_DIRTY_ALWAYS
;
142 if (reg
[i
+j
].flags
& REG_FLAG_ENABLE_ALWAYS
) {
143 if (!(block
->status
& R600_BLOCK_STATUS_ENABLED
)) {
144 block
->status
|= R600_BLOCK_STATUS_ENABLED
;
145 LIST_ADDTAIL(&block
->enable_list
, &ctx
->enable_list
);
146 LIST_ADDTAIL(&block
->list
,&ctx
->dirty
);
149 if (reg
[i
+j
].flags
& REG_FLAG_FLUSH_CHANGE
) {
150 block
->flags
|= REG_FLAG_FLUSH_CHANGE
;
153 if (reg
[i
+j
].flags
& REG_FLAG_NEED_BO
) {
155 assert(block
->nbo
< R600_BLOCK_MAX_BO
);
156 block
->pm4_bo_index
[j
] = block
->nbo
;
157 block
->pm4
[block
->pm4_ndwords
++] = PKT3(PKT3_NOP
, 0, 0);
158 block
->pm4
[block
->pm4_ndwords
++] = 0x00000000;
159 block
->reloc
[block
->nbo
].bo_pm4_index
= block
->pm4_ndwords
- 1;
161 if ((ctx
->family
> CHIP_R600
) &&
162 (ctx
->family
< CHIP_RV770
) && reg
[i
+j
].flags
& REG_FLAG_RV6XX_SBU
) {
163 block
->pm4
[block
->pm4_ndwords
++] = PKT3(PKT3_SURFACE_BASE_UPDATE
, 0, 0);
164 block
->pm4
[block
->pm4_ndwords
++] = reg
[i
+j
].sbu_flags
;
167 /* check that we stay in limit */
168 assert(block
->pm4_ndwords
< R600_BLOCK_MAX_REG
);
171 int r600_context_add_block(struct r600_context
*ctx
, const struct r600_reg
*reg
, unsigned nreg
,
172 unsigned opcode
, unsigned offset_base
)
174 struct r600_block
*block
;
175 struct r600_range
*range
;
178 for (unsigned i
= 0, n
= 0; i
< nreg
; i
+= n
) {
179 /* ignore new block balise */
180 if (reg
[i
].offset
== GROUP_FORCE_NEW_BLOCK
) {
185 /* ignore regs not on R600 on R600 */
186 if ((reg
[i
].flags
& REG_FLAG_NOT_R600
) && ctx
->family
== CHIP_R600
) {
191 /* register that need relocation are in their own group */
192 /* find number of consecutive registers */
194 offset
= reg
[i
].offset
;
195 while (reg
[i
+ n
].offset
== offset
) {
200 if (n
>= (R600_BLOCK_MAX_REG
- 2))
204 /* allocate new block */
205 block
= calloc(1, sizeof(struct r600_block
));
210 for (int j
= 0; j
< n
; j
++) {
211 range
= &ctx
->range
[CTX_RANGE_ID(reg
[i
+ j
].offset
)];
212 /* create block table if it doesn't exist */
214 range
->blocks
= calloc(1 << HASH_SHIFT
, sizeof(void *));
218 range
->blocks
[CTX_BLOCK_ID(reg
[i
+ j
].offset
)] = block
;
221 r600_init_block(ctx
, block
, reg
, i
, n
, opcode
, offset_base
);
227 /* R600/R700 configuration */
228 static const struct r600_reg r600_config_reg_list
[] = {
229 {R_008B40_PA_SC_AA_SAMPLE_LOCS_2S
, 0, 0},
230 {R_008B44_PA_SC_AA_SAMPLE_LOCS_4S
, 0, 0},
231 {R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0
, 0, 0},
232 {R_008B4C_PA_SC_AA_SAMPLE_LOCS_8S_WD1
, 0, 0},
233 {R_008C04_SQ_GPR_RESOURCE_MGMT_1
, REG_FLAG_ENABLE_ALWAYS
| REG_FLAG_FLUSH_CHANGE
, 0},
236 static const struct r600_reg r600_context_reg_list
[] = {
237 {R_028A4C_PA_SC_MODE_CNTL
, 0, 0},
238 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
239 {R_028040_CB_COLOR0_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(0)},
240 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
241 {R_0280A0_CB_COLOR0_INFO
, REG_FLAG_NEED_BO
, 0},
242 {R_028060_CB_COLOR0_SIZE
, 0, 0},
243 {R_028080_CB_COLOR0_VIEW
, 0, 0},
244 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
245 {R_0280E0_CB_COLOR0_FRAG
, REG_FLAG_NEED_BO
, 0},
246 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
247 {R_0280C0_CB_COLOR0_TILE
, REG_FLAG_NEED_BO
, 0},
248 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
249 {R_028100_CB_COLOR0_MASK
, 0, 0},
250 {R_028044_CB_COLOR1_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(1)},
251 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
252 {R_0280A4_CB_COLOR1_INFO
, REG_FLAG_NEED_BO
, 0},
253 {R_028064_CB_COLOR1_SIZE
, 0, 0},
254 {R_028084_CB_COLOR1_VIEW
, 0, 0},
255 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
256 {R_0280E4_CB_COLOR1_FRAG
, REG_FLAG_NEED_BO
, 0},
257 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
258 {R_0280C4_CB_COLOR1_TILE
, REG_FLAG_NEED_BO
, 0},
259 {R_028104_CB_COLOR1_MASK
, 0, 0},
260 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
261 {R_028048_CB_COLOR2_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(2)},
262 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
263 {R_0280A8_CB_COLOR2_INFO
, REG_FLAG_NEED_BO
, 0},
264 {R_028068_CB_COLOR2_SIZE
, 0, 0},
265 {R_028088_CB_COLOR2_VIEW
, 0, 0},
266 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
267 {R_0280E8_CB_COLOR2_FRAG
, REG_FLAG_NEED_BO
, 0},
268 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
269 {R_0280C8_CB_COLOR2_TILE
, REG_FLAG_NEED_BO
, 0},
270 {R_028108_CB_COLOR2_MASK
, 0, 0},
271 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
272 {R_02804C_CB_COLOR3_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(3)},
273 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
274 {R_0280AC_CB_COLOR3_INFO
, REG_FLAG_NEED_BO
, 0},
275 {R_02806C_CB_COLOR3_SIZE
, 0, 0},
276 {R_02808C_CB_COLOR3_VIEW
, 0, 0},
277 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
278 {R_0280EC_CB_COLOR3_FRAG
, REG_FLAG_NEED_BO
, 0},
279 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
280 {R_0280CC_CB_COLOR3_TILE
, REG_FLAG_NEED_BO
, 0},
281 {R_02810C_CB_COLOR3_MASK
, 0, 0},
282 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
283 {R_028050_CB_COLOR4_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(4)},
284 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
285 {R_0280B0_CB_COLOR4_INFO
, REG_FLAG_NEED_BO
, 0},
286 {R_028070_CB_COLOR4_SIZE
, 0, 0},
287 {R_028090_CB_COLOR4_VIEW
, 0, 0},
288 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
289 {R_0280F0_CB_COLOR4_FRAG
, REG_FLAG_NEED_BO
, 0},
290 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
291 {R_0280D0_CB_COLOR4_TILE
, REG_FLAG_NEED_BO
, 0},
292 {R_028110_CB_COLOR4_MASK
, 0, 0},
293 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
294 {R_028054_CB_COLOR5_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(5)},
295 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
296 {R_0280B4_CB_COLOR5_INFO
, REG_FLAG_NEED_BO
, 0},
297 {R_028074_CB_COLOR5_SIZE
, 0, 0},
298 {R_028094_CB_COLOR5_VIEW
, 0, 0},
299 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
300 {R_0280F4_CB_COLOR5_FRAG
, REG_FLAG_NEED_BO
, 0},
301 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
302 {R_0280D4_CB_COLOR5_TILE
, REG_FLAG_NEED_BO
, 0},
303 {R_028114_CB_COLOR5_MASK
, 0, 0},
304 {R_028058_CB_COLOR6_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(6)},
305 {R_0280B8_CB_COLOR6_INFO
, REG_FLAG_NEED_BO
, 0},
306 {R_028078_CB_COLOR6_SIZE
, 0, 0},
307 {R_028098_CB_COLOR6_VIEW
, 0, 0},
308 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
309 {R_0280F8_CB_COLOR6_FRAG
, REG_FLAG_NEED_BO
, 0},
310 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
311 {R_0280D8_CB_COLOR6_TILE
, REG_FLAG_NEED_BO
, 0},
312 {R_028118_CB_COLOR6_MASK
, 0, 0},
313 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
314 {R_02805C_CB_COLOR7_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(7)},
315 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
316 {R_0280BC_CB_COLOR7_INFO
, REG_FLAG_NEED_BO
, 0},
317 {R_02807C_CB_COLOR7_SIZE
, 0, 0},
318 {R_02809C_CB_COLOR7_VIEW
, 0, 0},
319 {R_0280FC_CB_COLOR7_FRAG
, REG_FLAG_NEED_BO
, 0},
320 {R_0280DC_CB_COLOR7_TILE
, REG_FLAG_NEED_BO
, 0},
321 {R_02811C_CB_COLOR7_MASK
, 0, 0},
322 {R_028120_CB_CLEAR_RED
, 0, 0},
323 {R_028124_CB_CLEAR_GREEN
, 0, 0},
324 {R_028128_CB_CLEAR_BLUE
, 0, 0},
325 {R_02812C_CB_CLEAR_ALPHA
, 0, 0},
326 {R_028424_CB_FOG_RED
, 0, 0},
327 {R_028428_CB_FOG_GREEN
, 0, 0},
328 {R_02842C_CB_FOG_BLUE
, 0, 0},
329 {R_028780_CB_BLEND0_CONTROL
, REG_FLAG_NOT_R600
, 0},
330 {R_028784_CB_BLEND1_CONTROL
, REG_FLAG_NOT_R600
, 0},
331 {R_028788_CB_BLEND2_CONTROL
, REG_FLAG_NOT_R600
, 0},
332 {R_02878C_CB_BLEND3_CONTROL
, REG_FLAG_NOT_R600
, 0},
333 {R_028790_CB_BLEND4_CONTROL
, REG_FLAG_NOT_R600
, 0},
334 {R_028794_CB_BLEND5_CONTROL
, REG_FLAG_NOT_R600
, 0},
335 {R_028798_CB_BLEND6_CONTROL
, REG_FLAG_NOT_R600
, 0},
336 {R_02879C_CB_BLEND7_CONTROL
, REG_FLAG_NOT_R600
, 0},
337 {R_0287A0_CB_SHADER_CONTROL
, 0, 0},
338 {R_028800_DB_DEPTH_CONTROL
, 0, 0},
339 {R_028804_CB_BLEND_CONTROL
, 0, 0},
340 {R_02880C_DB_SHADER_CONTROL
, 0, 0},
341 {R_02800C_DB_DEPTH_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_DEPTH
},
342 {R_028000_DB_DEPTH_SIZE
, 0, 0},
343 {R_028004_DB_DEPTH_VIEW
, 0, 0},
344 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
345 {R_028010_DB_DEPTH_INFO
, REG_FLAG_NEED_BO
, 0},
346 {R_028D24_DB_HTILE_SURFACE
, 0, 0},
347 {R_028D34_DB_PREFETCH_LIMIT
, 0, 0},
348 {R_028D44_DB_ALPHA_TO_MASK
, 0, 0},
349 {R_028204_PA_SC_WINDOW_SCISSOR_TL
, 0, 0},
350 {R_028208_PA_SC_WINDOW_SCISSOR_BR
, 0, 0},
351 {R_028250_PA_SC_VPORT_SCISSOR_0_TL
, 0, 0},
352 {R_028254_PA_SC_VPORT_SCISSOR_0_BR
, 0, 0},
353 {R_0286D4_SPI_INTERP_CONTROL_0
, 0, 0},
354 {R_028814_PA_SU_SC_MODE_CNTL
, 0, 0},
355 {R_028A00_PA_SU_POINT_SIZE
, 0, 0},
356 {R_028A04_PA_SU_POINT_MINMAX
, 0, 0},
357 {R_028A08_PA_SU_LINE_CNTL
, 0, 0},
358 {R_028C00_PA_SC_LINE_CNTL
, 0, 0},
359 {R_028C04_PA_SC_AA_CONFIG
, 0, 0},
360 {R_028C08_PA_SU_VTX_CNTL
, 0, 0},
361 {R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL
, 0, 0},
362 {R_028DFC_PA_SU_POLY_OFFSET_CLAMP
, 0, 0},
363 {R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE
, 0, 0},
364 {R_028E04_PA_SU_POLY_OFFSET_FRONT_OFFSET
, 0, 0},
365 {R_028E08_PA_SU_POLY_OFFSET_BACK_SCALE
, 0, 0},
366 {R_028E0C_PA_SU_POLY_OFFSET_BACK_OFFSET
, 0, 0},
367 {R_028350_SX_MISC
, 0, 0},
368 {R_028380_SQ_VTX_SEMANTIC_0
, 0, 0},
369 {R_028384_SQ_VTX_SEMANTIC_1
, 0, 0},
370 {R_028388_SQ_VTX_SEMANTIC_2
, 0, 0},
371 {R_02838C_SQ_VTX_SEMANTIC_3
, 0, 0},
372 {R_028390_SQ_VTX_SEMANTIC_4
, 0, 0},
373 {R_028394_SQ_VTX_SEMANTIC_5
, 0, 0},
374 {R_028398_SQ_VTX_SEMANTIC_6
, 0, 0},
375 {R_02839C_SQ_VTX_SEMANTIC_7
, 0, 0},
376 {R_0283A0_SQ_VTX_SEMANTIC_8
, 0, 0},
377 {R_0283A4_SQ_VTX_SEMANTIC_9
, 0, 0},
378 {R_0283A8_SQ_VTX_SEMANTIC_10
, 0, 0},
379 {R_0283AC_SQ_VTX_SEMANTIC_11
, 0, 0},
380 {R_0283B0_SQ_VTX_SEMANTIC_12
, 0, 0},
381 {R_0283B4_SQ_VTX_SEMANTIC_13
, 0, 0},
382 {R_0283B8_SQ_VTX_SEMANTIC_14
, 0, 0},
383 {R_0283BC_SQ_VTX_SEMANTIC_15
, 0, 0},
384 {R_0283C0_SQ_VTX_SEMANTIC_16
, 0, 0},
385 {R_0283C4_SQ_VTX_SEMANTIC_17
, 0, 0},
386 {R_0283C8_SQ_VTX_SEMANTIC_18
, 0, 0},
387 {R_0283CC_SQ_VTX_SEMANTIC_19
, 0, 0},
388 {R_0283D0_SQ_VTX_SEMANTIC_20
, 0, 0},
389 {R_0283D4_SQ_VTX_SEMANTIC_21
, 0, 0},
390 {R_0283D8_SQ_VTX_SEMANTIC_22
, 0, 0},
391 {R_0283DC_SQ_VTX_SEMANTIC_23
, 0, 0},
392 {R_0283E0_SQ_VTX_SEMANTIC_24
, 0, 0},
393 {R_0283E4_SQ_VTX_SEMANTIC_25
, 0, 0},
394 {R_0283E8_SQ_VTX_SEMANTIC_26
, 0, 0},
395 {R_0283EC_SQ_VTX_SEMANTIC_27
, 0, 0},
396 {R_0283F0_SQ_VTX_SEMANTIC_28
, 0, 0},
397 {R_0283F4_SQ_VTX_SEMANTIC_29
, 0, 0},
398 {R_0283F8_SQ_VTX_SEMANTIC_30
, 0, 0},
399 {R_0283FC_SQ_VTX_SEMANTIC_31
, 0, 0},
400 {R_028614_SPI_VS_OUT_ID_0
, 0, 0},
401 {R_028618_SPI_VS_OUT_ID_1
, 0, 0},
402 {R_02861C_SPI_VS_OUT_ID_2
, 0, 0},
403 {R_028620_SPI_VS_OUT_ID_3
, 0, 0},
404 {R_028624_SPI_VS_OUT_ID_4
, 0, 0},
405 {R_028628_SPI_VS_OUT_ID_5
, 0, 0},
406 {R_02862C_SPI_VS_OUT_ID_6
, 0, 0},
407 {R_028630_SPI_VS_OUT_ID_7
, 0, 0},
408 {R_028634_SPI_VS_OUT_ID_8
, 0, 0},
409 {R_028638_SPI_VS_OUT_ID_9
, 0, 0},
410 {R_0286C4_SPI_VS_OUT_CONFIG
, 0, 0},
411 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
412 {R_028858_SQ_PGM_START_VS
, REG_FLAG_NEED_BO
, 0},
413 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
414 {R_028868_SQ_PGM_RESOURCES_VS
, 0, 0},
415 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
416 {R_028894_SQ_PGM_START_FS
, REG_FLAG_NEED_BO
, 0},
417 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
418 {R_0288A4_SQ_PGM_RESOURCES_FS
, 0, 0},
419 {R_0288DC_SQ_PGM_CF_OFFSET_FS
, 0, 0},
420 {R_028644_SPI_PS_INPUT_CNTL_0
, 0, 0},
421 {R_028648_SPI_PS_INPUT_CNTL_1
, 0, 0},
422 {R_02864C_SPI_PS_INPUT_CNTL_2
, 0, 0},
423 {R_028650_SPI_PS_INPUT_CNTL_3
, 0, 0},
424 {R_028654_SPI_PS_INPUT_CNTL_4
, 0, 0},
425 {R_028658_SPI_PS_INPUT_CNTL_5
, 0, 0},
426 {R_02865C_SPI_PS_INPUT_CNTL_6
, 0, 0},
427 {R_028660_SPI_PS_INPUT_CNTL_7
, 0, 0},
428 {R_028664_SPI_PS_INPUT_CNTL_8
, 0, 0},
429 {R_028668_SPI_PS_INPUT_CNTL_9
, 0, 0},
430 {R_02866C_SPI_PS_INPUT_CNTL_10
, 0, 0},
431 {R_028670_SPI_PS_INPUT_CNTL_11
, 0, 0},
432 {R_028674_SPI_PS_INPUT_CNTL_12
, 0, 0},
433 {R_028678_SPI_PS_INPUT_CNTL_13
, 0, 0},
434 {R_02867C_SPI_PS_INPUT_CNTL_14
, 0, 0},
435 {R_028680_SPI_PS_INPUT_CNTL_15
, 0, 0},
436 {R_028684_SPI_PS_INPUT_CNTL_16
, 0, 0},
437 {R_028688_SPI_PS_INPUT_CNTL_17
, 0, 0},
438 {R_02868C_SPI_PS_INPUT_CNTL_18
, 0, 0},
439 {R_028690_SPI_PS_INPUT_CNTL_19
, 0, 0},
440 {R_028694_SPI_PS_INPUT_CNTL_20
, 0, 0},
441 {R_028698_SPI_PS_INPUT_CNTL_21
, 0, 0},
442 {R_02869C_SPI_PS_INPUT_CNTL_22
, 0, 0},
443 {R_0286A0_SPI_PS_INPUT_CNTL_23
, 0, 0},
444 {R_0286A4_SPI_PS_INPUT_CNTL_24
, 0, 0},
445 {R_0286A8_SPI_PS_INPUT_CNTL_25
, 0, 0},
446 {R_0286AC_SPI_PS_INPUT_CNTL_26
, 0, 0},
447 {R_0286B0_SPI_PS_INPUT_CNTL_27
, 0, 0},
448 {R_0286B4_SPI_PS_INPUT_CNTL_28
, 0, 0},
449 {R_0286B8_SPI_PS_INPUT_CNTL_29
, 0, 0},
450 {R_0286BC_SPI_PS_INPUT_CNTL_30
, 0, 0},
451 {R_0286C0_SPI_PS_INPUT_CNTL_31
, 0, 0},
452 {R_0286CC_SPI_PS_IN_CONTROL_0
, 0, 0},
453 {R_0286D0_SPI_PS_IN_CONTROL_1
, 0, 0},
454 {R_0286D8_SPI_INPUT_Z
, 0, 0},
455 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
456 {R_028840_SQ_PGM_START_PS
, REG_FLAG_NEED_BO
, 0},
457 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
458 {R_028850_SQ_PGM_RESOURCES_PS
, 0, 0},
459 {R_028854_SQ_PGM_EXPORTS_PS
, 0, 0},
460 {R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX
, 0, 0},
461 {R_028C20_PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX
, 0, 0},
464 static int r600_loop_const_init(struct r600_context
*ctx
, uint32_t offset
)
467 struct r600_reg r600_loop_consts
[32];
470 for (i
= 0; i
< nreg
; i
++) {
471 r600_loop_consts
[i
].offset
= R600_LOOP_CONST_OFFSET
+ ((offset
+ i
) * 4);
472 r600_loop_consts
[i
].flags
= REG_FLAG_DIRTY_ALWAYS
;
473 r600_loop_consts
[i
].sbu_flags
= 0;
475 return r600_context_add_block(ctx
, r600_loop_consts
, nreg
, PKT3_SET_LOOP_CONST
, R600_LOOP_CONST_OFFSET
);
479 void r600_context_fini(struct r600_context
*ctx
)
481 struct r600_block
*block
;
482 struct r600_range
*range
;
485 for (int i
= 0; i
< NUM_RANGES
; i
++) {
486 if (!ctx
->range
[i
].blocks
)
488 for (int j
= 0; j
< (1 << HASH_SHIFT
); j
++) {
489 block
= ctx
->range
[i
].blocks
[j
];
491 for (int k
= 0, offset
= block
->start_offset
; k
< block
->nreg
; k
++, offset
+= 4) {
492 range
= &ctx
->range
[CTX_RANGE_ID(offset
)];
493 range
->blocks
[CTX_BLOCK_ID(offset
)] = NULL
;
495 for (int k
= 1; k
<= block
->nbo
; k
++) {
496 pipe_resource_reference((struct pipe_resource
**)&block
->reloc
[k
].bo
, NULL
);
501 free(ctx
->range
[i
].blocks
);
507 int r600_setup_block_table(struct r600_context
*ctx
)
509 /* setup block table */
511 ctx
->blocks
= calloc(ctx
->nblocks
, sizeof(void*));
514 for (int i
= 0; i
< NUM_RANGES
; i
++) {
515 if (!ctx
->range
[i
].blocks
)
517 for (int j
= 0, add
; j
< (1 << HASH_SHIFT
); j
++) {
518 if (!ctx
->range
[i
].blocks
[j
])
522 for (int k
= 0; k
< c
; k
++) {
523 if (ctx
->blocks
[k
] == ctx
->range
[i
].blocks
[j
]) {
529 assert(c
< ctx
->nblocks
);
530 ctx
->blocks
[c
++] = ctx
->range
[i
].blocks
[j
];
531 j
+= (ctx
->range
[i
].blocks
[j
]->nreg
) - 1;
538 int r600_context_init(struct r600_context
*ctx
)
543 r
= r600_context_add_block(ctx
, r600_config_reg_list
,
544 Elements(r600_config_reg_list
), PKT3_SET_CONFIG_REG
, R600_CONFIG_REG_OFFSET
);
547 r
= r600_context_add_block(ctx
, r600_context_reg_list
,
548 Elements(r600_context_reg_list
), PKT3_SET_CONTEXT_REG
, R600_CONTEXT_REG_OFFSET
);
553 r600_loop_const_init(ctx
, 0);
555 r600_loop_const_init(ctx
, 32);
557 r
= r600_setup_block_table(ctx
);
564 r600_context_fini(ctx
);
568 void r600_need_cs_space(struct r600_context
*ctx
, unsigned num_dw
,
569 boolean count_draw_in
)
571 /* The number of dwords we already used in the CS so far. */
572 num_dw
+= ctx
->cs
->cdw
;
577 /* The number of dwords all the dirty states would take. */
578 for (i
= 0; i
< R600_NUM_ATOMS
; i
++) {
579 if (ctx
->atoms
[i
] && ctx
->atoms
[i
]->dirty
) {
580 num_dw
+= ctx
->atoms
[i
]->num_dw
;
584 num_dw
+= ctx
->pm4_dirty_cdwords
;
586 /* The upper-bound of how much space a draw command would take. */
587 num_dw
+= R600_MAX_FLUSH_CS_DWORDS
+ R600_MAX_DRAW_CS_DWORDS
;
590 /* Count in queries_suspend. */
591 num_dw
+= ctx
->num_cs_dw_nontimer_queries_suspend
;
592 num_dw
+= ctx
->num_cs_dw_timer_queries_suspend
;
594 /* Count in streamout_end at the end of CS. */
595 num_dw
+= ctx
->num_cs_dw_streamout_end
;
597 /* Count in render_condition(NULL) at the end of CS. */
598 if (ctx
->predicate_drawing
) {
603 if (ctx
->chip_class
<= R700
) {
607 /* Count in framebuffer cache flushes at the end of CS. */
608 num_dw
+= R600_MAX_FLUSH_CS_DWORDS
;
610 /* The fence at the end of CS. */
613 /* Flush if there's not enough space. */
614 if (num_dw
> RADEON_MAX_CMDBUF_DWORDS
) {
615 r600_flush(&ctx
->context
, NULL
, RADEON_FLUSH_ASYNC
);
619 void r600_context_dirty_block(struct r600_context
*ctx
,
620 struct r600_block
*block
,
621 int dirty
, int index
)
623 if ((index
+ 1) > block
->nreg_dirty
)
624 block
->nreg_dirty
= index
+ 1;
626 if ((dirty
!= (block
->status
& R600_BLOCK_STATUS_DIRTY
)) || !(block
->status
& R600_BLOCK_STATUS_ENABLED
)) {
627 block
->status
|= R600_BLOCK_STATUS_DIRTY
;
628 ctx
->pm4_dirty_cdwords
+= block
->pm4_ndwords
;
629 if (!(block
->status
& R600_BLOCK_STATUS_ENABLED
)) {
630 block
->status
|= R600_BLOCK_STATUS_ENABLED
;
631 LIST_ADDTAIL(&block
->enable_list
, &ctx
->enable_list
);
633 LIST_ADDTAIL(&block
->list
,&ctx
->dirty
);
635 if (block
->flags
& REG_FLAG_FLUSH_CHANGE
) {
636 ctx
->flags
|= R600_CONTEXT_PS_PARTIAL_FLUSH
;
642 * If reg needs a reloc, this function will add it to its block's reloc list.
643 * @return true if reg needs a reloc, false otherwise
645 static bool r600_reg_set_block_reloc(struct r600_pipe_reg
*reg
)
649 if (!reg
->block
->pm4_bo_index
[reg
->id
]) {
652 /* find relocation */
653 reloc_id
= reg
->block
->pm4_bo_index
[reg
->id
];
654 pipe_resource_reference(
655 (struct pipe_resource
**)®
->block
->reloc
[reloc_id
].bo
,
657 reg
->block
->reloc
[reloc_id
].bo_usage
= reg
->bo_usage
;
662 * This function will emit all the registers in state directly to the command
663 * stream allowing you to bypass the r600_context dirty list.
665 * This is used for dispatching compute shaders to avoid mixing compute and
666 * 3D states in the context's dirty list.
668 * @param pkt_flags Should be either 0 or RADEON_CP_PACKET3_COMPUTE_MODE. This
669 * value will be passed on to r600_context_block_emit_dirty an or'd against
672 void r600_context_pipe_state_emit(struct r600_context
*ctx
,
673 struct r600_pipe_state
*state
,
678 /* Mark all blocks as dirty:
679 * Since two registers can be in the same block, we need to make sure
680 * we mark all the blocks dirty before we emit any of them. If we were
681 * to mark blocks dirty and emit them in the same loop, like this:
683 * foreach (reg in state->regs) {
684 * mark_dirty(reg->block)
685 * emit_block(reg->block)
688 * Then if we have two registers in this state that are in the same
689 * block, we would end up emitting that block twice.
691 for (i
= 0; i
< state
->nregs
; i
++) {
692 struct r600_pipe_reg
*reg
= &state
->regs
[i
];
693 /* Mark all the registers in the block as dirty */
694 reg
->block
->nreg_dirty
= reg
->block
->nreg
;
695 reg
->block
->status
|= R600_BLOCK_STATUS_DIRTY
;
696 /* Update the reloc for this register if necessary. */
697 r600_reg_set_block_reloc(reg
);
700 /* Emit the registers writes */
701 for (i
= 0; i
< state
->nregs
; i
++) {
702 struct r600_pipe_reg
*reg
= &state
->regs
[i
];
703 if (reg
->block
->status
& R600_BLOCK_STATUS_DIRTY
) {
704 r600_context_block_emit_dirty(ctx
, reg
->block
, pkt_flags
);
709 void r600_context_pipe_state_set(struct r600_context
*ctx
, struct r600_pipe_state
*state
)
711 struct r600_block
*block
;
713 for (int i
= 0; i
< state
->nregs
; i
++) {
715 struct r600_pipe_reg
*reg
= &state
->regs
[i
];
720 dirty
= block
->status
& R600_BLOCK_STATUS_DIRTY
;
722 if (reg
->value
!= block
->reg
[id
]) {
723 block
->reg
[id
] = reg
->value
;
724 dirty
|= R600_BLOCK_STATUS_DIRTY
;
726 if (block
->flags
& REG_FLAG_DIRTY_ALWAYS
)
727 dirty
|= R600_BLOCK_STATUS_DIRTY
;
728 if (r600_reg_set_block_reloc(reg
)) {
729 /* always force dirty for relocs for now */
730 dirty
|= R600_BLOCK_STATUS_DIRTY
;
734 r600_context_dirty_block(ctx
, block
, dirty
, id
);
739 * @param pkt_flags should be set to RADEON_CP_PACKET3_COMPUTE_MODE if this
740 * block will be used for compute shaders.
742 void r600_context_block_emit_dirty(struct r600_context
*ctx
, struct r600_block
*block
,
745 struct radeon_winsys_cs
*cs
= ctx
->cs
;
746 int optional
= block
->nbo
== 0 && !(block
->flags
& REG_FLAG_DIRTY_ALWAYS
);
747 int cp_dwords
= block
->pm4_ndwords
, start_dword
= 0;
749 int nbo
= block
->nbo
;
751 if (block
->nreg_dirty
== 0 && optional
) {
756 for (int j
= 0; j
< block
->nreg
; j
++) {
757 if (block
->pm4_bo_index
[j
]) {
758 /* find relocation */
759 struct r600_block_reloc
*reloc
= &block
->reloc
[block
->pm4_bo_index
[j
]];
761 block
->pm4
[reloc
->bo_pm4_index
] =
762 r600_context_bo_reloc(ctx
, reloc
->bo
, reloc
->bo_usage
);
764 block
->pm4
[reloc
->bo_pm4_index
] = 0;
774 optional
&= (block
->nreg_dirty
!= block
->nreg
);
776 new_dwords
= block
->nreg_dirty
;
777 start_dword
= cs
->cdw
;
778 cp_dwords
= new_dwords
+ 2;
780 memcpy(&cs
->buf
[cs
->cdw
], block
->pm4
, cp_dwords
* 4);
782 /* We are applying the pkt_flags after copying the register block to
783 * the the command stream, because it is possible this block will be
784 * emitted with a different pkt_flags, and we don't want to store the
785 * pkt_flags in the block.
787 cs
->buf
[cs
->cdw
] |= pkt_flags
;
788 cs
->cdw
+= cp_dwords
;
793 newword
= cs
->buf
[start_dword
];
794 newword
&= PKT_COUNT_C
;
795 newword
|= PKT_COUNT_S(new_dwords
);
796 cs
->buf
[start_dword
] = newword
;
799 block
->status
^= R600_BLOCK_STATUS_DIRTY
;
800 block
->nreg_dirty
= 0;
801 LIST_DELINIT(&block
->list
);
804 void r600_flush_emit(struct r600_context
*rctx
)
806 struct radeon_winsys_cs
*cs
= rctx
->cs
;
812 if (rctx
->flags
& R600_CONTEXT_PS_PARTIAL_FLUSH
) {
813 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
814 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
817 if (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV
) {
818 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
819 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT
) | EVENT_INDEX(0);
821 /* DB flushes are special due to errata with hyperz, we need to
822 * insert a no-op, so that the cache has time to really flush.
824 if (rctx
->chip_class
<= R700
&&
825 rctx
->flags
& R600_CONTEXT_HTILE_ERRATA
) {
826 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 31, 0);
827 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
828 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
829 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
830 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
831 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
832 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
833 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
834 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
835 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
836 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
837 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
838 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
839 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
840 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
841 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
842 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
843 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
844 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
845 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
846 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
847 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
848 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
849 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
850 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
851 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
852 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
853 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
854 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
855 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
856 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
857 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
858 cs
->buf
[cs
->cdw
++] = 0xdeadcafe;
862 if (rctx
->flags
& (R600_CONTEXT_CB_FLUSH
|
863 R600_CONTEXT_DB_FLUSH
|
864 R600_CONTEXT_SHADERCONST_FLUSH
|
865 R600_CONTEXT_TEX_FLUSH
|
866 R600_CONTEXT_VTX_FLUSH
|
867 R600_CONTEXT_STREAMOUT_FLUSH
)) {
868 /* anything left (cb, vtx, shader, streamout) can be flushed
869 * using the surface sync packet
873 if (rctx
->flags
& R600_CONTEXT_CB_FLUSH
) {
874 flags
|= S_0085F0_CB_ACTION_ENA(1) |
875 S_0085F0_CB0_DEST_BASE_ENA(1) |
876 S_0085F0_CB1_DEST_BASE_ENA(1) |
877 S_0085F0_CB2_DEST_BASE_ENA(1) |
878 S_0085F0_CB3_DEST_BASE_ENA(1) |
879 S_0085F0_CB4_DEST_BASE_ENA(1) |
880 S_0085F0_CB5_DEST_BASE_ENA(1) |
881 S_0085F0_CB6_DEST_BASE_ENA(1) |
882 S_0085F0_CB7_DEST_BASE_ENA(1);
884 if (rctx
->chip_class
>= EVERGREEN
) {
885 flags
|= S_0085F0_CB8_DEST_BASE_ENA(1) |
886 S_0085F0_CB9_DEST_BASE_ENA(1) |
887 S_0085F0_CB10_DEST_BASE_ENA(1) |
888 S_0085F0_CB11_DEST_BASE_ENA(1);
892 * (CB1_DEST_BASE_ENA is also required, which is
893 * included unconditionally above). */
894 if (rctx
->family
== CHIP_RV670
||
895 rctx
->family
== CHIP_RS780
||
896 rctx
->family
== CHIP_RS880
) {
897 flags
|= S_0085F0_DEST_BASE_0_ENA(1);
901 if (rctx
->flags
& R600_CONTEXT_STREAMOUT_FLUSH
) {
902 flags
|= S_0085F0_SO0_DEST_BASE_ENA(1) |
903 S_0085F0_SO1_DEST_BASE_ENA(1) |
904 S_0085F0_SO2_DEST_BASE_ENA(1) |
905 S_0085F0_SO3_DEST_BASE_ENA(1) |
906 S_0085F0_SMX_ACTION_ENA(1);
909 if (rctx
->family
== CHIP_RV670
||
910 rctx
->family
== CHIP_RS780
||
911 rctx
->family
== CHIP_RS880
) {
912 flags
|= S_0085F0_DEST_BASE_0_ENA(1);
916 flags
|= (rctx
->flags
& R600_CONTEXT_DB_FLUSH
) ? S_0085F0_DB_ACTION_ENA(1) |
917 S_0085F0_DB_DEST_BASE_ENA(1): 0;
918 flags
|= (rctx
->flags
& R600_CONTEXT_SHADERCONST_FLUSH
) ? S_0085F0_SH_ACTION_ENA(1) : 0;
919 flags
|= (rctx
->flags
& R600_CONTEXT_TEX_FLUSH
) ? S_0085F0_TC_ACTION_ENA(1) : 0;
920 flags
|= (rctx
->flags
& R600_CONTEXT_VTX_FLUSH
) ? S_0085F0_VC_ACTION_ENA(1) : 0;
922 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SURFACE_SYNC
, 3, 0);
923 cs
->buf
[cs
->cdw
++] = flags
; /* CP_COHER_CNTL */
924 cs
->buf
[cs
->cdw
++] = 0xffffffff; /* CP_COHER_SIZE */
925 cs
->buf
[cs
->cdw
++] = 0; /* CP_COHER_BASE */
926 cs
->buf
[cs
->cdw
++] = 0x0000000A; /* POLL_INTERVAL */
929 if (rctx
->flags
& R600_CONTEXT_WAIT_IDLE
) {
930 /* wait for things to settle */
931 r600_write_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
934 /* everything is properly flushed */
938 void r600_context_flush(struct r600_context
*ctx
, unsigned flags
)
940 struct radeon_winsys_cs
*cs
= ctx
->cs
;
942 if (cs
->cdw
== ctx
->start_cs_cmd
.atom
.num_dw
)
945 ctx
->timer_queries_suspended
= false;
946 ctx
->nontimer_queries_suspended
= false;
947 ctx
->streamout_suspended
= false;
949 /* suspend queries */
950 if (ctx
->num_cs_dw_timer_queries_suspend
) {
951 r600_suspend_timer_queries(ctx
);
952 ctx
->timer_queries_suspended
= true;
954 if (ctx
->num_cs_dw_nontimer_queries_suspend
) {
955 r600_suspend_nontimer_queries(ctx
);
956 ctx
->nontimer_queries_suspended
= true;
959 if (ctx
->num_cs_dw_streamout_end
) {
960 r600_context_streamout_end(ctx
);
961 ctx
->streamout_suspended
= true;
964 /* partial flush is needed to avoid lockups on some chips with user fences */
965 ctx
->flags
|= R600_CONTEXT_PS_PARTIAL_FLUSH
;
967 /* flush the framebuffer */
968 ctx
->flags
|= R600_CONTEXT_CB_FLUSH
| R600_CONTEXT_DB_FLUSH
;
971 if (ctx
->chip_class
== R600
) {
972 ctx
->flags
|= R600_CONTEXT_FLUSH_AND_INV
;
975 r600_flush_emit(ctx
);
977 /* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
978 if (ctx
->chip_class
<= R700
) {
979 r600_write_context_reg(cs
, R_028350_SX_MISC
, 0);
982 /* force to keep tiling flags */
983 flags
|= RADEON_FLUSH_KEEP_TILING_FLAGS
;
986 ctx
->ws
->cs_flush(ctx
->cs
, flags
);
988 r600_begin_new_cs(ctx
);
991 void r600_begin_new_cs(struct r600_context
*ctx
)
993 struct r600_block
*enable_block
= NULL
;
996 ctx
->pm4_dirty_cdwords
= 0;
999 /* Begin a new CS. */
1000 r600_emit_atom(ctx
, &ctx
->start_cs_cmd
.atom
);
1002 /* Re-emit states. */
1003 r600_atom_dirty(ctx
, &ctx
->alphatest_state
.atom
);
1004 r600_atom_dirty(ctx
, &ctx
->blend_color
.atom
);
1005 r600_atom_dirty(ctx
, &ctx
->cb_misc_state
.atom
);
1006 r600_atom_dirty(ctx
, &ctx
->clip_misc_state
.atom
);
1007 r600_atom_dirty(ctx
, &ctx
->clip_state
.atom
);
1008 r600_atom_dirty(ctx
, &ctx
->db_misc_state
.atom
);
1009 r600_atom_dirty(ctx
, &ctx
->vgt_state
.atom
);
1010 r600_atom_dirty(ctx
, &ctx
->vgt2_state
.atom
);
1011 r600_atom_dirty(ctx
, &ctx
->sample_mask
.atom
);
1012 r600_atom_dirty(ctx
, &ctx
->stencil_ref
.atom
);
1013 r600_atom_dirty(ctx
, &ctx
->viewport
.atom
);
1015 if (ctx
->chip_class
<= R700
) {
1016 r600_atom_dirty(ctx
, &ctx
->seamless_cube_map
.atom
);
1019 ctx
->vertex_buffer_state
.dirty_mask
= ctx
->vertex_buffer_state
.enabled_mask
;
1020 r600_vertex_buffers_dirty(ctx
);
1022 /* Re-emit shader resources. */
1023 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
1024 struct r600_constbuf_state
*constbuf
= &ctx
->constbuf_state
[shader
];
1025 struct r600_textures_info
*samplers
= &ctx
->samplers
[shader
];
1027 constbuf
->dirty_mask
= constbuf
->enabled_mask
;
1028 samplers
->views
.dirty_mask
= samplers
->views
.enabled_mask
;
1029 samplers
->states
.dirty_mask
= samplers
->states
.enabled_mask
;
1031 r600_constant_buffers_dirty(ctx
, constbuf
);
1032 r600_sampler_views_dirty(ctx
, &samplers
->views
);
1033 r600_sampler_states_dirty(ctx
, &samplers
->states
);
1036 if (ctx
->streamout_suspended
) {
1037 ctx
->streamout_start
= TRUE
;
1038 ctx
->streamout_append_bitmask
= ~0;
1041 /* resume queries */
1042 if (ctx
->timer_queries_suspended
) {
1043 r600_resume_timer_queries(ctx
);
1045 if (ctx
->nontimer_queries_suspended
) {
1046 r600_resume_nontimer_queries(ctx
);
1049 /* set all valid group as dirty so they get reemited on
1052 LIST_FOR_EACH_ENTRY(enable_block
, &ctx
->enable_list
, enable_list
) {
1053 if(!(enable_block
->status
& R600_BLOCK_STATUS_DIRTY
)) {
1054 LIST_ADDTAIL(&enable_block
->list
,&ctx
->dirty
);
1055 enable_block
->status
|= R600_BLOCK_STATUS_DIRTY
;
1057 ctx
->pm4_dirty_cdwords
+= enable_block
->pm4_ndwords
;
1058 enable_block
->nreg_dirty
= enable_block
->nreg
;
1061 /* Re-emit the draw state. */
1062 ctx
->last_primitive_type
= -1;
1063 ctx
->last_start_instance
= -1;
1066 void r600_context_emit_fence(struct r600_context
*ctx
, struct r600_resource
*fence_bo
, unsigned offset
, unsigned value
)
1068 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1071 r600_need_cs_space(ctx
, 10, FALSE
);
1073 va
= r600_resource_va(&ctx
->screen
->screen
, (void*)fence_bo
);
1074 va
= va
+ (offset
<< 2);
1076 ctx
->flags
&= ~R600_CONTEXT_PS_PARTIAL_FLUSH
;
1077 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
1078 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
1080 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0);
1081 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5);
1082 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* ADDRESS_LO */
1083 /* DATA_SEL | INT_EN | ADDRESS_HI */
1084 cs
->buf
[cs
->cdw
++] = (1 << 29) | (0 << 24) | ((va
>> 32UL) & 0xFF);
1085 cs
->buf
[cs
->cdw
++] = value
; /* DATA_LO */
1086 cs
->buf
[cs
->cdw
++] = 0; /* DATA_HI */
1087 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1088 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, fence_bo
, RADEON_USAGE_WRITE
);
1091 static void r600_flush_vgt_streamout(struct r600_context
*ctx
)
1093 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1095 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CONFIG_REG
, 1, 0);
1096 cs
->buf
[cs
->cdw
++] = (R_008490_CP_STRMOUT_CNTL
- R600_CONFIG_REG_OFFSET
) >> 2;
1097 cs
->buf
[cs
->cdw
++] = 0;
1099 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
1100 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH
) | EVENT_INDEX(0);
1102 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_WAIT_REG_MEM
, 5, 0);
1103 cs
->buf
[cs
->cdw
++] = WAIT_REG_MEM_EQUAL
; /* wait until the register is equal to the reference value */
1104 cs
->buf
[cs
->cdw
++] = R_008490_CP_STRMOUT_CNTL
>> 2; /* register */
1105 cs
->buf
[cs
->cdw
++] = 0;
1106 cs
->buf
[cs
->cdw
++] = S_008490_OFFSET_UPDATE_DONE(1); /* reference value */
1107 cs
->buf
[cs
->cdw
++] = S_008490_OFFSET_UPDATE_DONE(1); /* mask */
1108 cs
->buf
[cs
->cdw
++] = 4; /* poll interval */
1111 static void r600_set_streamout_enable(struct r600_context
*ctx
, unsigned buffer_enable_bit
)
1113 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1115 if (buffer_enable_bit
) {
1116 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CONTEXT_REG
, 1, 0);
1117 cs
->buf
[cs
->cdw
++] = (R_028AB0_VGT_STRMOUT_EN
- R600_CONTEXT_REG_OFFSET
) >> 2;
1118 cs
->buf
[cs
->cdw
++] = S_028AB0_STREAMOUT(1);
1120 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CONTEXT_REG
, 1, 0);
1121 cs
->buf
[cs
->cdw
++] = (R_028B20_VGT_STRMOUT_BUFFER_EN
- R600_CONTEXT_REG_OFFSET
) >> 2;
1122 cs
->buf
[cs
->cdw
++] = buffer_enable_bit
;
1124 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CONTEXT_REG
, 1, 0);
1125 cs
->buf
[cs
->cdw
++] = (R_028AB0_VGT_STRMOUT_EN
- R600_CONTEXT_REG_OFFSET
) >> 2;
1126 cs
->buf
[cs
->cdw
++] = S_028AB0_STREAMOUT(0);
1130 void r600_context_streamout_begin(struct r600_context
*ctx
)
1132 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1133 struct r600_so_target
**t
= ctx
->so_targets
;
1134 unsigned *stride_in_dw
= ctx
->vs_shader
->so
.stride
;
1135 unsigned buffer_en
, i
, update_flags
= 0;
1138 buffer_en
= (ctx
->num_so_targets
>= 1 && t
[0] ? 1 : 0) |
1139 (ctx
->num_so_targets
>= 2 && t
[1] ? 2 : 0) |
1140 (ctx
->num_so_targets
>= 3 && t
[2] ? 4 : 0) |
1141 (ctx
->num_so_targets
>= 4 && t
[3] ? 8 : 0);
1143 ctx
->num_cs_dw_streamout_end
=
1144 12 + /* flush_vgt_streamout */
1145 util_bitcount(buffer_en
) * 8 + /* STRMOUT_BUFFER_UPDATE */
1146 3 /* set_streamout_enable(0) */;
1148 r600_need_cs_space(ctx
,
1149 12 + /* flush_vgt_streamout */
1150 6 + /* set_streamout_enable */
1151 util_bitcount(buffer_en
) * 7 + /* SET_CONTEXT_REG */
1152 (ctx
->chip_class
== R700
? util_bitcount(buffer_en
) * 5 : 0) + /* STRMOUT_BASE_UPDATE */
1153 util_bitcount(buffer_en
& ctx
->streamout_append_bitmask
) * 8 + /* STRMOUT_BUFFER_UPDATE */
1154 util_bitcount(buffer_en
& ~ctx
->streamout_append_bitmask
) * 6 + /* STRMOUT_BUFFER_UPDATE */
1155 (ctx
->family
> CHIP_R600
&& ctx
->family
< CHIP_RV770
? 2 : 0) + /* SURFACE_BASE_UPDATE */
1156 ctx
->num_cs_dw_streamout_end
, TRUE
);
1158 if (ctx
->chip_class
>= EVERGREEN
) {
1159 evergreen_flush_vgt_streamout(ctx
);
1160 evergreen_set_streamout_enable(ctx
, buffer_en
);
1162 r600_flush_vgt_streamout(ctx
);
1163 r600_set_streamout_enable(ctx
, buffer_en
);
1166 for (i
= 0; i
< ctx
->num_so_targets
; i
++) {
1168 t
[i
]->stride_in_dw
= stride_in_dw
[i
];
1170 va
= r600_resource_va(&ctx
->screen
->screen
,
1171 (void*)t
[i
]->b
.buffer
);
1173 update_flags
|= SURFACE_BASE_UPDATE_STRMOUT(i
);
1175 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CONTEXT_REG
, 3, 0);
1176 cs
->buf
[cs
->cdw
++] = (R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0
+
1177 16*i
- R600_CONTEXT_REG_OFFSET
) >> 2;
1178 cs
->buf
[cs
->cdw
++] = (t
[i
]->b
.buffer_offset
+
1179 t
[i
]->b
.buffer_size
) >> 2; /* BUFFER_SIZE (in DW) */
1180 cs
->buf
[cs
->cdw
++] = stride_in_dw
[i
]; /* VTX_STRIDE (in DW) */
1181 cs
->buf
[cs
->cdw
++] = va
>> 8; /* BUFFER_BASE */
1183 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1184 cs
->buf
[cs
->cdw
++] =
1185 r600_context_bo_reloc(ctx
, r600_resource(t
[i
]->b
.buffer
),
1186 RADEON_USAGE_WRITE
);
1188 /* R7xx requires this packet after updating BUFFER_BASE.
1189 * Without this, R7xx locks up. */
1190 if (ctx
->chip_class
== R700
) {
1191 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BASE_UPDATE
, 1, 0);
1192 cs
->buf
[cs
->cdw
++] = i
;
1193 cs
->buf
[cs
->cdw
++] = va
>> 8;
1195 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1196 cs
->buf
[cs
->cdw
++] =
1197 r600_context_bo_reloc(ctx
, r600_resource(t
[i
]->b
.buffer
),
1198 RADEON_USAGE_WRITE
);
1201 if (ctx
->streamout_append_bitmask
& (1 << i
)) {
1202 va
= r600_resource_va(&ctx
->screen
->screen
,
1203 (void*)t
[i
]->filled_size
);
1205 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE
, 4, 0);
1206 cs
->buf
[cs
->cdw
++] = STRMOUT_SELECT_BUFFER(i
) |
1207 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM
); /* control */
1208 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1209 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1210 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* src address lo */
1211 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFFUL
; /* src address hi */
1213 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1214 cs
->buf
[cs
->cdw
++] =
1215 r600_context_bo_reloc(ctx
, t
[i
]->filled_size
,
1218 /* Start from the beginning. */
1219 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE
, 4, 0);
1220 cs
->buf
[cs
->cdw
++] = STRMOUT_SELECT_BUFFER(i
) |
1221 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET
); /* control */
1222 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1223 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1224 cs
->buf
[cs
->cdw
++] = t
[i
]->b
.buffer_offset
>> 2; /* buffer offset in DW */
1225 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1230 if (ctx
->family
> CHIP_R600
&& ctx
->family
< CHIP_RV770
) {
1231 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SURFACE_BASE_UPDATE
, 0, 0);
1232 cs
->buf
[cs
->cdw
++] = update_flags
;
1236 void r600_context_streamout_end(struct r600_context
*ctx
)
1238 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1239 struct r600_so_target
**t
= ctx
->so_targets
;
1243 if (ctx
->chip_class
>= EVERGREEN
) {
1244 evergreen_flush_vgt_streamout(ctx
);
1246 r600_flush_vgt_streamout(ctx
);
1249 for (i
= 0; i
< ctx
->num_so_targets
; i
++) {
1251 va
= r600_resource_va(&ctx
->screen
->screen
,
1252 (void*)t
[i
]->filled_size
);
1253 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE
, 4, 0);
1254 cs
->buf
[cs
->cdw
++] = STRMOUT_SELECT_BUFFER(i
) |
1255 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE
) |
1256 STRMOUT_STORE_BUFFER_FILLED_SIZE
; /* control */
1257 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* dst address lo */
1258 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFFUL
; /* dst address hi */
1259 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1260 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1262 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1263 cs
->buf
[cs
->cdw
++] =
1264 r600_context_bo_reloc(ctx
, t
[i
]->filled_size
,
1265 RADEON_USAGE_WRITE
);
1270 if (ctx
->chip_class
>= EVERGREEN
) {
1271 evergreen_set_streamout_enable(ctx
, 0);
1273 r600_set_streamout_enable(ctx
, 0);
1275 ctx
->flags
|= R600_CONTEXT_STREAMOUT_FLUSH
;
1278 if (ctx
->chip_class
== R600
) {
1279 ctx
->flags
|= R600_CONTEXT_FLUSH_AND_INV
;
1281 ctx
->num_cs_dw_streamout_end
= 0;