2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "r600_hw_context_priv.h"
27 #include "r600_pipe.h"
29 #include "util/u_memory.h"
32 #define GROUP_FORCE_NEW_BLOCK 0
34 /* Get backends mask */
35 void r600_get_backend_mask(struct r600_context
*ctx
)
37 struct radeon_winsys_cs
*cs
= ctx
->cs
;
38 struct r600_resource
*buffer
;
40 unsigned num_backends
= ctx
->screen
->info
.r600_num_backends
;
43 /* if backend_map query is supported by the kernel */
44 if (ctx
->screen
->info
.r600_backend_map_valid
) {
45 unsigned num_tile_pipes
= ctx
->screen
->info
.r600_num_tile_pipes
;
46 unsigned backend_map
= ctx
->screen
->info
.r600_backend_map
;
47 unsigned item_width
, item_mask
;
49 if (ctx
->chip_class
>= EVERGREEN
) {
57 while(num_tile_pipes
--) {
58 i
= backend_map
& item_mask
;
60 backend_map
>>= item_width
;
63 ctx
->backend_mask
= mask
;
68 /* otherwise backup path for older kernels */
70 /* create buffer for event data */
71 buffer
= (struct r600_resource
*)
72 pipe_buffer_create(&ctx
->screen
->screen
, PIPE_BIND_CUSTOM
,
73 PIPE_USAGE_STAGING
, ctx
->max_db
*16);
77 /* initialize buffer with zeroes */
78 results
= ctx
->ws
->buffer_map(buffer
->buf
, ctx
->cs
, PIPE_TRANSFER_WRITE
);
80 memset(results
, 0, ctx
->max_db
* 4 * 4);
81 ctx
->ws
->buffer_unmap(buffer
->buf
);
83 /* emit EVENT_WRITE for ZPASS_DONE */
84 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
85 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1);
86 cs
->buf
[cs
->cdw
++] = 0;
87 cs
->buf
[cs
->cdw
++] = 0;
89 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
90 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, buffer
, RADEON_USAGE_WRITE
);
93 results
= ctx
->ws
->buffer_map(buffer
->buf
, ctx
->cs
, PIPE_TRANSFER_READ
);
95 for(i
= 0; i
< ctx
->max_db
; i
++) {
96 /* at least highest bit will be set if backend is used */
100 ctx
->ws
->buffer_unmap(buffer
->buf
);
104 pipe_resource_reference((struct pipe_resource
**)&buffer
, NULL
);
107 ctx
->backend_mask
= mask
;
112 /* fallback to old method - set num_backends lower bits to 1 */
113 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
117 static inline void r600_context_ps_partial_flush(struct r600_context
*ctx
)
119 struct radeon_winsys_cs
*cs
= ctx
->cs
;
121 if (!(ctx
->flags
& R600_CONTEXT_DRAW_PENDING
))
124 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
125 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
127 ctx
->flags
&= ~R600_CONTEXT_DRAW_PENDING
;
130 void r600_init_cs(struct r600_context
*ctx
)
132 struct radeon_winsys_cs
*cs
= ctx
->cs
;
134 /* R6xx requires this packet at the start of each command buffer */
135 if (ctx
->family
< CHIP_RV770
) {
136 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_START_3D_CMDBUF
, 0, 0);
137 cs
->buf
[cs
->cdw
++] = 0x00000000;
139 /* All asics require this one */
140 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_CONTEXT_CONTROL
, 1, 0);
141 cs
->buf
[cs
->cdw
++] = 0x80000000;
142 cs
->buf
[cs
->cdw
++] = 0x80000000;
144 ctx
->init_dwords
= cs
->cdw
;
147 static void r600_init_block(struct r600_context
*ctx
,
148 struct r600_block
*block
,
149 const struct r600_reg
*reg
, int index
, int nreg
,
150 unsigned opcode
, unsigned offset_base
)
155 /* initialize block */
156 if (opcode
== PKT3_SET_RESOURCE
) {
157 block
->flags
= BLOCK_FLAG_RESOURCE
;
158 block
->status
|= R600_BLOCK_STATUS_RESOURCE_DIRTY
; /* dirty all blocks at start */
161 block
->status
|= R600_BLOCK_STATUS_DIRTY
; /* dirty all blocks at start */
163 block
->start_offset
= reg
[i
].offset
;
164 block
->pm4
[block
->pm4_ndwords
++] = PKT3(opcode
, n
, 0);
165 block
->pm4
[block
->pm4_ndwords
++] = (block
->start_offset
- offset_base
) >> 2;
166 block
->reg
= &block
->pm4
[block
->pm4_ndwords
];
167 block
->pm4_ndwords
+= n
;
169 block
->nreg_dirty
= n
;
170 LIST_INITHEAD(&block
->list
);
171 LIST_INITHEAD(&block
->enable_list
);
173 for (j
= 0; j
< n
; j
++) {
174 if (reg
[i
+j
].flags
& REG_FLAG_DIRTY_ALWAYS
) {
175 block
->flags
|= REG_FLAG_DIRTY_ALWAYS
;
177 if (reg
[i
+j
].flags
& REG_FLAG_ENABLE_ALWAYS
) {
178 if (!(block
->status
& R600_BLOCK_STATUS_ENABLED
)) {
179 block
->status
|= R600_BLOCK_STATUS_ENABLED
;
180 LIST_ADDTAIL(&block
->enable_list
, &ctx
->enable_list
);
181 LIST_ADDTAIL(&block
->list
,&ctx
->dirty
);
184 if (reg
[i
+j
].flags
& REG_FLAG_FLUSH_CHANGE
) {
185 block
->flags
|= REG_FLAG_FLUSH_CHANGE
;
188 if (reg
[i
+j
].flags
& REG_FLAG_NEED_BO
) {
190 assert(block
->nbo
< R600_BLOCK_MAX_BO
);
191 block
->pm4_bo_index
[j
] = block
->nbo
;
192 block
->pm4
[block
->pm4_ndwords
++] = PKT3(PKT3_NOP
, 0, 0);
193 block
->pm4
[block
->pm4_ndwords
++] = 0x00000000;
194 block
->reloc
[block
->nbo
].bo_pm4_index
= block
->pm4_ndwords
- 1;
196 if ((ctx
->family
> CHIP_R600
) &&
197 (ctx
->family
< CHIP_RV770
) && reg
[i
+j
].flags
& REG_FLAG_RV6XX_SBU
) {
198 block
->pm4
[block
->pm4_ndwords
++] = PKT3(PKT3_SURFACE_BASE_UPDATE
, 0, 0);
199 block
->pm4
[block
->pm4_ndwords
++] = reg
[i
+j
].sbu_flags
;
202 /* check that we stay in limit */
203 assert(block
->pm4_ndwords
< R600_BLOCK_MAX_REG
);
206 int r600_context_add_block(struct r600_context
*ctx
, const struct r600_reg
*reg
, unsigned nreg
,
207 unsigned opcode
, unsigned offset_base
)
209 struct r600_block
*block
;
210 struct r600_range
*range
;
213 for (unsigned i
= 0, n
= 0; i
< nreg
; i
+= n
) {
214 /* ignore new block balise */
215 if (reg
[i
].offset
== GROUP_FORCE_NEW_BLOCK
) {
220 /* ignore regs not on R600 on R600 */
221 if ((reg
[i
].flags
& REG_FLAG_NOT_R600
) && ctx
->family
== CHIP_R600
) {
226 /* register that need relocation are in their own group */
227 /* find number of consecutive registers */
229 offset
= reg
[i
].offset
;
230 while (reg
[i
+ n
].offset
== offset
) {
235 if (n
>= (R600_BLOCK_MAX_REG
- 2))
239 /* allocate new block */
240 block
= calloc(1, sizeof(struct r600_block
));
245 for (int j
= 0; j
< n
; j
++) {
246 range
= &ctx
->range
[CTX_RANGE_ID(reg
[i
+ j
].offset
)];
247 /* create block table if it doesn't exist */
249 range
->blocks
= calloc(1 << HASH_SHIFT
, sizeof(void *));
253 range
->blocks
[CTX_BLOCK_ID(reg
[i
+ j
].offset
)] = block
;
256 r600_init_block(ctx
, block
, reg
, i
, n
, opcode
, offset_base
);
262 /* R600/R700 configuration */
263 static const struct r600_reg r600_config_reg_list
[] = {
264 {R_008958_VGT_PRIMITIVE_TYPE
, 0, 0},
265 {R_008C00_SQ_CONFIG
, REG_FLAG_ENABLE_ALWAYS
| REG_FLAG_FLUSH_CHANGE
, 0},
266 {R_008C04_SQ_GPR_RESOURCE_MGMT_1
, REG_FLAG_ENABLE_ALWAYS
| REG_FLAG_FLUSH_CHANGE
, 0},
267 {R_008C08_SQ_GPR_RESOURCE_MGMT_2
, REG_FLAG_ENABLE_ALWAYS
| REG_FLAG_FLUSH_CHANGE
, 0},
268 {R_008C0C_SQ_THREAD_RESOURCE_MGMT
, REG_FLAG_ENABLE_ALWAYS
| REG_FLAG_FLUSH_CHANGE
, 0},
269 {R_008C10_SQ_STACK_RESOURCE_MGMT_1
, REG_FLAG_ENABLE_ALWAYS
| REG_FLAG_FLUSH_CHANGE
, 0},
270 {R_008C14_SQ_STACK_RESOURCE_MGMT_2
, REG_FLAG_ENABLE_ALWAYS
| REG_FLAG_FLUSH_CHANGE
, 0},
271 {R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
, REG_FLAG_ENABLE_ALWAYS
| REG_FLAG_FLUSH_CHANGE
, 0},
272 {R_009508_TA_CNTL_AUX
, REG_FLAG_ENABLE_ALWAYS
| REG_FLAG_FLUSH_CHANGE
, 0},
273 {R_009714_VC_ENHANCE
, REG_FLAG_ENABLE_ALWAYS
| REG_FLAG_FLUSH_CHANGE
, 0},
274 {R_009830_DB_DEBUG
, REG_FLAG_ENABLE_ALWAYS
| REG_FLAG_FLUSH_CHANGE
, 0},
275 {R_009838_DB_WATERMARKS
, REG_FLAG_ENABLE_ALWAYS
| REG_FLAG_FLUSH_CHANGE
, 0},
278 static const struct r600_reg r600_ctl_const_list
[] = {
279 {R_03CFF0_SQ_VTX_BASE_VTX_LOC
, 0, 0},
280 {R_03CFF4_SQ_VTX_START_INST_LOC
, 0, 0},
283 static const struct r600_reg r600_context_reg_list
[] = {
284 {R_028350_SX_MISC
, 0, 0},
285 {R_0286C8_SPI_THREAD_GROUPING
, 0, 0},
286 {R_0288A8_SQ_ESGS_RING_ITEMSIZE
, 0, 0},
287 {R_0288AC_SQ_GSVS_RING_ITEMSIZE
, 0, 0},
288 {R_0288B0_SQ_ESTMP_RING_ITEMSIZE
, 0, 0},
289 {R_0288B4_SQ_GSTMP_RING_ITEMSIZE
, 0, 0},
290 {R_0288B8_SQ_VSTMP_RING_ITEMSIZE
, 0, 0},
291 {R_0288BC_SQ_PSTMP_RING_ITEMSIZE
, 0, 0},
292 {R_0288C0_SQ_FBUF_RING_ITEMSIZE
, 0, 0},
293 {R_0288C4_SQ_REDUC_RING_ITEMSIZE
, 0, 0},
294 {R_0288C8_SQ_GS_VERT_ITEMSIZE
, 0, 0},
295 {R_028A10_VGT_OUTPUT_PATH_CNTL
, 0, 0},
296 {R_028A14_VGT_HOS_CNTL
, 0, 0},
297 {R_028A18_VGT_HOS_MAX_TESS_LEVEL
, 0, 0},
298 {R_028A1C_VGT_HOS_MIN_TESS_LEVEL
, 0, 0},
299 {R_028A20_VGT_HOS_REUSE_DEPTH
, 0, 0},
300 {R_028A24_VGT_GROUP_PRIM_TYPE
, 0, 0},
301 {R_028A28_VGT_GROUP_FIRST_DECR
, 0, 0},
302 {R_028A2C_VGT_GROUP_DECR
, 0, 0},
303 {R_028A30_VGT_GROUP_VECT_0_CNTL
, 0, 0},
304 {R_028A34_VGT_GROUP_VECT_1_CNTL
, 0, 0},
305 {R_028A38_VGT_GROUP_VECT_0_FMT_CNTL
, 0, 0},
306 {R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL
, 0, 0},
307 {R_028A40_VGT_GS_MODE
, 0, 0},
308 {R_028A4C_PA_SC_MODE_CNTL
, 0, 0},
309 {R_028AB0_VGT_STRMOUT_EN
, 0, 0},
310 {R_028AB4_VGT_REUSE_OFF
, 0, 0},
311 {R_028AB8_VGT_VTX_CNT_EN
, 0, 0},
312 {R_028B20_VGT_STRMOUT_BUFFER_EN
, 0, 0},
313 {R_028028_DB_STENCIL_CLEAR
, 0, 0},
314 {R_02802C_DB_DEPTH_CLEAR
, 0, 0},
315 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
316 {R_028040_CB_COLOR0_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(0)},
317 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
318 {R_0280A0_CB_COLOR0_INFO
, REG_FLAG_NEED_BO
, 0},
319 {R_028060_CB_COLOR0_SIZE
, 0, 0},
320 {R_028080_CB_COLOR0_VIEW
, 0, 0},
321 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
322 {R_0280E0_CB_COLOR0_FRAG
, REG_FLAG_NEED_BO
, 0},
323 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
324 {R_0280C0_CB_COLOR0_TILE
, REG_FLAG_NEED_BO
, 0},
325 {R_028100_CB_COLOR0_MASK
, 0, 0},
326 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
327 {R_028044_CB_COLOR1_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(1)},
328 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
329 {R_0280A4_CB_COLOR1_INFO
, REG_FLAG_NEED_BO
, 0},
330 {R_028064_CB_COLOR1_SIZE
, 0, 0},
331 {R_028084_CB_COLOR1_VIEW
, 0, 0},
332 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
333 {R_0280E4_CB_COLOR1_FRAG
, REG_FLAG_NEED_BO
, 0},
334 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
335 {R_0280C4_CB_COLOR1_TILE
, REG_FLAG_NEED_BO
, 0},
336 {R_028104_CB_COLOR1_MASK
, 0, 0},
337 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
338 {R_028048_CB_COLOR2_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(2)},
339 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
340 {R_0280A8_CB_COLOR2_INFO
, REG_FLAG_NEED_BO
, 0},
341 {R_028068_CB_COLOR2_SIZE
, 0, 0},
342 {R_028088_CB_COLOR2_VIEW
, 0, 0},
343 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
344 {R_0280E8_CB_COLOR2_FRAG
, REG_FLAG_NEED_BO
, 0},
345 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
346 {R_0280C8_CB_COLOR2_TILE
, REG_FLAG_NEED_BO
, 0},
347 {R_028108_CB_COLOR2_MASK
, 0, 0},
348 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
349 {R_02804C_CB_COLOR3_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(3)},
350 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
351 {R_0280AC_CB_COLOR3_INFO
, REG_FLAG_NEED_BO
, 0},
352 {R_02806C_CB_COLOR3_SIZE
, 0, 0},
353 {R_02808C_CB_COLOR3_VIEW
, 0, 0},
354 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
355 {R_0280EC_CB_COLOR3_FRAG
, REG_FLAG_NEED_BO
, 0},
356 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
357 {R_0280CC_CB_COLOR3_TILE
, REG_FLAG_NEED_BO
, 0},
358 {R_02810C_CB_COLOR3_MASK
, 0, 0},
359 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
360 {R_028050_CB_COLOR4_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(4)},
361 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
362 {R_0280B0_CB_COLOR4_INFO
, REG_FLAG_NEED_BO
, 0},
363 {R_028070_CB_COLOR4_SIZE
, 0, 0},
364 {R_028090_CB_COLOR4_VIEW
, 0, 0},
365 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
366 {R_0280F0_CB_COLOR4_FRAG
, REG_FLAG_NEED_BO
, 0},
367 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
368 {R_0280D0_CB_COLOR4_TILE
, REG_FLAG_NEED_BO
, 0},
369 {R_028110_CB_COLOR4_MASK
, 0, 0},
370 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
371 {R_028054_CB_COLOR5_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(5)},
372 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
373 {R_0280B4_CB_COLOR5_INFO
, REG_FLAG_NEED_BO
, 0},
374 {R_028074_CB_COLOR5_SIZE
, 0, 0},
375 {R_028094_CB_COLOR5_VIEW
, 0, 0},
376 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
377 {R_0280F4_CB_COLOR5_FRAG
, REG_FLAG_NEED_BO
, 0},
378 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
379 {R_0280D4_CB_COLOR5_TILE
, REG_FLAG_NEED_BO
, 0},
380 {R_028114_CB_COLOR5_MASK
, 0, 0},
381 {R_028058_CB_COLOR6_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(6)},
382 {R_0280B8_CB_COLOR6_INFO
, REG_FLAG_NEED_BO
, 0},
383 {R_028078_CB_COLOR6_SIZE
, 0, 0},
384 {R_028098_CB_COLOR6_VIEW
, 0, 0},
385 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
386 {R_0280F8_CB_COLOR6_FRAG
, REG_FLAG_NEED_BO
, 0},
387 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
388 {R_0280D8_CB_COLOR6_TILE
, REG_FLAG_NEED_BO
, 0},
389 {R_028118_CB_COLOR6_MASK
, 0, 0},
390 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
391 {R_02805C_CB_COLOR7_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_COLOR(7)},
392 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
393 {R_0280BC_CB_COLOR7_INFO
, REG_FLAG_NEED_BO
, 0},
394 {R_02807C_CB_COLOR7_SIZE
, 0, 0},
395 {R_02809C_CB_COLOR7_VIEW
, 0, 0},
396 {R_0280FC_CB_COLOR7_FRAG
, REG_FLAG_NEED_BO
, 0},
397 {R_0280DC_CB_COLOR7_TILE
, REG_FLAG_NEED_BO
, 0},
398 {R_02811C_CB_COLOR7_MASK
, 0, 0},
399 {R_028120_CB_CLEAR_RED
, 0, 0},
400 {R_028124_CB_CLEAR_GREEN
, 0, 0},
401 {R_028128_CB_CLEAR_BLUE
, 0, 0},
402 {R_02812C_CB_CLEAR_ALPHA
, 0, 0},
403 {R_028140_ALU_CONST_BUFFER_SIZE_PS_0
, REG_FLAG_DIRTY_ALWAYS
, 0},
404 {R_028144_ALU_CONST_BUFFER_SIZE_PS_1
, REG_FLAG_DIRTY_ALWAYS
, 0},
405 {R_028180_ALU_CONST_BUFFER_SIZE_VS_0
, REG_FLAG_DIRTY_ALWAYS
, 0},
406 {R_028184_ALU_CONST_BUFFER_SIZE_VS_1
, REG_FLAG_DIRTY_ALWAYS
, 0},
407 {R_028940_ALU_CONST_CACHE_PS_0
, REG_FLAG_NEED_BO
, 0},
408 {R_028944_ALU_CONST_CACHE_PS_1
, REG_FLAG_NEED_BO
, 0},
409 {R_028980_ALU_CONST_CACHE_VS_0
, REG_FLAG_NEED_BO
, 0},
410 {R_028984_ALU_CONST_CACHE_VS_1
, REG_FLAG_NEED_BO
, 0},
411 {R_02823C_CB_SHADER_MASK
, 0, 0},
412 {R_028238_CB_TARGET_MASK
, 0, 0},
413 {R_028410_SX_ALPHA_TEST_CONTROL
, 0, 0},
414 {R_028414_CB_BLEND_RED
, 0, 0},
415 {R_028418_CB_BLEND_GREEN
, 0, 0},
416 {R_02841C_CB_BLEND_BLUE
, 0, 0},
417 {R_028420_CB_BLEND_ALPHA
, 0, 0},
418 {R_028424_CB_FOG_RED
, 0, 0},
419 {R_028428_CB_FOG_GREEN
, 0, 0},
420 {R_02842C_CB_FOG_BLUE
, 0, 0},
421 {R_028430_DB_STENCILREFMASK
, 0, 0},
422 {R_028434_DB_STENCILREFMASK_BF
, 0, 0},
423 {R_028438_SX_ALPHA_REF
, 0, 0},
424 {R_0286DC_SPI_FOG_CNTL
, 0, 0},
425 {R_0286E0_SPI_FOG_FUNC_SCALE
, 0, 0},
426 {R_0286E4_SPI_FOG_FUNC_BIAS
, 0, 0},
427 {R_028780_CB_BLEND0_CONTROL
, REG_FLAG_NOT_R600
, 0},
428 {R_028784_CB_BLEND1_CONTROL
, REG_FLAG_NOT_R600
, 0},
429 {R_028788_CB_BLEND2_CONTROL
, REG_FLAG_NOT_R600
, 0},
430 {R_02878C_CB_BLEND3_CONTROL
, REG_FLAG_NOT_R600
, 0},
431 {R_028790_CB_BLEND4_CONTROL
, REG_FLAG_NOT_R600
, 0},
432 {R_028794_CB_BLEND5_CONTROL
, REG_FLAG_NOT_R600
, 0},
433 {R_028798_CB_BLEND6_CONTROL
, REG_FLAG_NOT_R600
, 0},
434 {R_02879C_CB_BLEND7_CONTROL
, REG_FLAG_NOT_R600
, 0},
435 {R_0287A0_CB_SHADER_CONTROL
, 0, 0},
436 {R_028800_DB_DEPTH_CONTROL
, 0, 0},
437 {R_028804_CB_BLEND_CONTROL
, 0, 0},
438 {R_028808_CB_COLOR_CONTROL
, 0, 0},
439 {R_02880C_DB_SHADER_CONTROL
, 0, 0},
440 {R_028C04_PA_SC_AA_CONFIG
, 0, 0},
441 {R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX
, 0, 0},
442 {R_028C20_PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX
, 0, 0},
443 {R_028C30_CB_CLRCMP_CONTROL
, 0, 0},
444 {R_028C34_CB_CLRCMP_SRC
, 0, 0},
445 {R_028C38_CB_CLRCMP_DST
, 0, 0},
446 {R_028C3C_CB_CLRCMP_MSK
, 0, 0},
447 {R_028C48_PA_SC_AA_MASK
, 0, 0},
448 {R_028D2C_DB_SRESULTS_COMPARE_STATE1
, 0, 0},
449 {R_028D44_DB_ALPHA_TO_MASK
, 0, 0},
450 {R_02800C_DB_DEPTH_BASE
, REG_FLAG_NEED_BO
|REG_FLAG_RV6XX_SBU
, SURFACE_BASE_UPDATE_DEPTH
},
451 {R_028000_DB_DEPTH_SIZE
, 0, 0},
452 {R_028004_DB_DEPTH_VIEW
, 0, 0},
453 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
454 {R_028010_DB_DEPTH_INFO
, REG_FLAG_NEED_BO
, 0},
455 {R_028D0C_DB_RENDER_CONTROL
, 0, 0},
456 {R_028D10_DB_RENDER_OVERRIDE
, 0, 0},
457 {R_028D24_DB_HTILE_SURFACE
, 0, 0},
458 {R_028D30_DB_PRELOAD_CONTROL
, 0, 0},
459 {R_028D34_DB_PREFETCH_LIMIT
, 0, 0},
460 {R_028030_PA_SC_SCREEN_SCISSOR_TL
, 0, 0},
461 {R_028034_PA_SC_SCREEN_SCISSOR_BR
, 0, 0},
462 {R_028200_PA_SC_WINDOW_OFFSET
, 0, 0},
463 {R_028204_PA_SC_WINDOW_SCISSOR_TL
, 0, 0},
464 {R_028208_PA_SC_WINDOW_SCISSOR_BR
, 0, 0},
465 {R_02820C_PA_SC_CLIPRECT_RULE
, 0, 0},
466 {R_028210_PA_SC_CLIPRECT_0_TL
, 0, 0},
467 {R_028214_PA_SC_CLIPRECT_0_BR
, 0, 0},
468 {R_028218_PA_SC_CLIPRECT_1_TL
, 0, 0},
469 {R_02821C_PA_SC_CLIPRECT_1_BR
, 0, 0},
470 {R_028220_PA_SC_CLIPRECT_2_TL
, 0, 0},
471 {R_028224_PA_SC_CLIPRECT_2_BR
, 0, 0},
472 {R_028228_PA_SC_CLIPRECT_3_TL
, 0, 0},
473 {R_02822C_PA_SC_CLIPRECT_3_BR
, 0, 0},
474 {R_028230_PA_SC_EDGERULE
, 0, 0},
475 {R_028240_PA_SC_GENERIC_SCISSOR_TL
, 0, 0},
476 {R_028244_PA_SC_GENERIC_SCISSOR_BR
, 0, 0},
477 {R_028250_PA_SC_VPORT_SCISSOR_0_TL
, 0, 0},
478 {R_028254_PA_SC_VPORT_SCISSOR_0_BR
, 0, 0},
479 {R_0282D0_PA_SC_VPORT_ZMIN_0
, 0, 0},
480 {R_0282D4_PA_SC_VPORT_ZMAX_0
, 0, 0},
481 {R_02843C_PA_CL_VPORT_XSCALE_0
, 0, 0},
482 {R_028440_PA_CL_VPORT_XOFFSET_0
, 0, 0},
483 {R_028444_PA_CL_VPORT_YSCALE_0
, 0, 0},
484 {R_028448_PA_CL_VPORT_YOFFSET_0
, 0, 0},
485 {R_02844C_PA_CL_VPORT_ZSCALE_0
, 0, 0},
486 {R_028450_PA_CL_VPORT_ZOFFSET_0
, 0, 0},
487 {R_0286D4_SPI_INTERP_CONTROL_0
, 0, 0},
488 {R_028810_PA_CL_CLIP_CNTL
, 0, 0},
489 {R_028814_PA_SU_SC_MODE_CNTL
, 0, 0},
490 {R_028818_PA_CL_VTE_CNTL
, 0, 0},
491 {R_02881C_PA_CL_VS_OUT_CNTL
, 0, 0},
492 {R_028820_PA_CL_NANINF_CNTL
, 0, 0},
493 {R_028A00_PA_SU_POINT_SIZE
, 0, 0},
494 {R_028A04_PA_SU_POINT_MINMAX
, 0, 0},
495 {R_028A08_PA_SU_LINE_CNTL
, 0, 0},
496 {R_028A0C_PA_SC_LINE_STIPPLE
, 0, 0},
497 {R_028A48_PA_SC_MPASS_PS_CNTL
, 0, 0},
498 {R_028C00_PA_SC_LINE_CNTL
, 0, 0},
499 {R_028C08_PA_SU_VTX_CNTL
, 0, 0},
500 {R_028C0C_PA_CL_GB_VERT_CLIP_ADJ
, 0, 0},
501 {R_028C10_PA_CL_GB_VERT_DISC_ADJ
, 0, 0},
502 {R_028C14_PA_CL_GB_HORZ_CLIP_ADJ
, 0, 0},
503 {R_028C18_PA_CL_GB_HORZ_DISC_ADJ
, 0, 0},
504 {R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL
, 0, 0},
505 {R_028DFC_PA_SU_POLY_OFFSET_CLAMP
, 0, 0},
506 {R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE
, 0, 0},
507 {R_028E04_PA_SU_POLY_OFFSET_FRONT_OFFSET
, 0, 0},
508 {R_028E08_PA_SU_POLY_OFFSET_BACK_SCALE
, 0, 0},
509 {R_028E0C_PA_SU_POLY_OFFSET_BACK_OFFSET
, 0, 0},
510 {R_028E20_PA_CL_UCP0_X
, 0, 0},
511 {R_028E24_PA_CL_UCP0_Y
, 0, 0},
512 {R_028E28_PA_CL_UCP0_Z
, 0, 0},
513 {R_028E2C_PA_CL_UCP0_W
, 0, 0},
514 {R_028E30_PA_CL_UCP1_X
, 0, 0},
515 {R_028E34_PA_CL_UCP1_Y
, 0, 0},
516 {R_028E38_PA_CL_UCP1_Z
, 0, 0},
517 {R_028E3C_PA_CL_UCP1_W
, 0, 0},
518 {R_028E40_PA_CL_UCP2_X
, 0, 0},
519 {R_028E44_PA_CL_UCP2_Y
, 0, 0},
520 {R_028E48_PA_CL_UCP2_Z
, 0, 0},
521 {R_028E4C_PA_CL_UCP2_W
, 0, 0},
522 {R_028E50_PA_CL_UCP3_X
, 0, 0},
523 {R_028E54_PA_CL_UCP3_Y
, 0, 0},
524 {R_028E58_PA_CL_UCP3_Z
, 0, 0},
525 {R_028E5C_PA_CL_UCP3_W
, 0, 0},
526 {R_028E60_PA_CL_UCP4_X
, 0, 0},
527 {R_028E64_PA_CL_UCP4_Y
, 0, 0},
528 {R_028E68_PA_CL_UCP4_Z
, 0, 0},
529 {R_028E6C_PA_CL_UCP4_W
, 0, 0},
530 {R_028E70_PA_CL_UCP5_X
, 0, 0},
531 {R_028E74_PA_CL_UCP5_Y
, 0, 0},
532 {R_028E78_PA_CL_UCP5_Z
, 0, 0},
533 {R_028E7C_PA_CL_UCP5_W
, 0, 0},
534 {R_028380_SQ_VTX_SEMANTIC_0
, 0, 0},
535 {R_028384_SQ_VTX_SEMANTIC_1
, 0, 0},
536 {R_028388_SQ_VTX_SEMANTIC_2
, 0, 0},
537 {R_02838C_SQ_VTX_SEMANTIC_3
, 0, 0},
538 {R_028390_SQ_VTX_SEMANTIC_4
, 0, 0},
539 {R_028394_SQ_VTX_SEMANTIC_5
, 0, 0},
540 {R_028398_SQ_VTX_SEMANTIC_6
, 0, 0},
541 {R_02839C_SQ_VTX_SEMANTIC_7
, 0, 0},
542 {R_0283A0_SQ_VTX_SEMANTIC_8
, 0, 0},
543 {R_0283A4_SQ_VTX_SEMANTIC_9
, 0, 0},
544 {R_0283A8_SQ_VTX_SEMANTIC_10
, 0, 0},
545 {R_0283AC_SQ_VTX_SEMANTIC_11
, 0, 0},
546 {R_0283B0_SQ_VTX_SEMANTIC_12
, 0, 0},
547 {R_0283B4_SQ_VTX_SEMANTIC_13
, 0, 0},
548 {R_0283B8_SQ_VTX_SEMANTIC_14
, 0, 0},
549 {R_0283BC_SQ_VTX_SEMANTIC_15
, 0, 0},
550 {R_0283C0_SQ_VTX_SEMANTIC_16
, 0, 0},
551 {R_0283C4_SQ_VTX_SEMANTIC_17
, 0, 0},
552 {R_0283C8_SQ_VTX_SEMANTIC_18
, 0, 0},
553 {R_0283CC_SQ_VTX_SEMANTIC_19
, 0, 0},
554 {R_0283D0_SQ_VTX_SEMANTIC_20
, 0, 0},
555 {R_0283D4_SQ_VTX_SEMANTIC_21
, 0, 0},
556 {R_0283D8_SQ_VTX_SEMANTIC_22
, 0, 0},
557 {R_0283DC_SQ_VTX_SEMANTIC_23
, 0, 0},
558 {R_0283E0_SQ_VTX_SEMANTIC_24
, 0, 0},
559 {R_0283E4_SQ_VTX_SEMANTIC_25
, 0, 0},
560 {R_0283E8_SQ_VTX_SEMANTIC_26
, 0, 0},
561 {R_0283EC_SQ_VTX_SEMANTIC_27
, 0, 0},
562 {R_0283F0_SQ_VTX_SEMANTIC_28
, 0, 0},
563 {R_0283F4_SQ_VTX_SEMANTIC_29
, 0, 0},
564 {R_0283F8_SQ_VTX_SEMANTIC_30
, 0, 0},
565 {R_0283FC_SQ_VTX_SEMANTIC_31
, 0, 0},
566 {R_028614_SPI_VS_OUT_ID_0
, 0, 0},
567 {R_028618_SPI_VS_OUT_ID_1
, 0, 0},
568 {R_02861C_SPI_VS_OUT_ID_2
, 0, 0},
569 {R_028620_SPI_VS_OUT_ID_3
, 0, 0},
570 {R_028624_SPI_VS_OUT_ID_4
, 0, 0},
571 {R_028628_SPI_VS_OUT_ID_5
, 0, 0},
572 {R_02862C_SPI_VS_OUT_ID_6
, 0, 0},
573 {R_028630_SPI_VS_OUT_ID_7
, 0, 0},
574 {R_028634_SPI_VS_OUT_ID_8
, 0, 0},
575 {R_028638_SPI_VS_OUT_ID_9
, 0, 0},
576 {R_0286C4_SPI_VS_OUT_CONFIG
, 0, 0},
577 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
578 {R_028858_SQ_PGM_START_VS
, REG_FLAG_NEED_BO
, 0},
579 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
580 {R_028868_SQ_PGM_RESOURCES_VS
, 0, 0},
581 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
582 {R_028894_SQ_PGM_START_FS
, REG_FLAG_NEED_BO
, 0},
583 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
584 {R_0288A4_SQ_PGM_RESOURCES_FS
, 0, 0},
585 {R_0288D0_SQ_PGM_CF_OFFSET_VS
, 0, 0},
586 {R_0288DC_SQ_PGM_CF_OFFSET_FS
, 0, 0},
587 {R_028644_SPI_PS_INPUT_CNTL_0
, 0, 0},
588 {R_028648_SPI_PS_INPUT_CNTL_1
, 0, 0},
589 {R_02864C_SPI_PS_INPUT_CNTL_2
, 0, 0},
590 {R_028650_SPI_PS_INPUT_CNTL_3
, 0, 0},
591 {R_028654_SPI_PS_INPUT_CNTL_4
, 0, 0},
592 {R_028658_SPI_PS_INPUT_CNTL_5
, 0, 0},
593 {R_02865C_SPI_PS_INPUT_CNTL_6
, 0, 0},
594 {R_028660_SPI_PS_INPUT_CNTL_7
, 0, 0},
595 {R_028664_SPI_PS_INPUT_CNTL_8
, 0, 0},
596 {R_028668_SPI_PS_INPUT_CNTL_9
, 0, 0},
597 {R_02866C_SPI_PS_INPUT_CNTL_10
, 0, 0},
598 {R_028670_SPI_PS_INPUT_CNTL_11
, 0, 0},
599 {R_028674_SPI_PS_INPUT_CNTL_12
, 0, 0},
600 {R_028678_SPI_PS_INPUT_CNTL_13
, 0, 0},
601 {R_02867C_SPI_PS_INPUT_CNTL_14
, 0, 0},
602 {R_028680_SPI_PS_INPUT_CNTL_15
, 0, 0},
603 {R_028684_SPI_PS_INPUT_CNTL_16
, 0, 0},
604 {R_028688_SPI_PS_INPUT_CNTL_17
, 0, 0},
605 {R_02868C_SPI_PS_INPUT_CNTL_18
, 0, 0},
606 {R_028690_SPI_PS_INPUT_CNTL_19
, 0, 0},
607 {R_028694_SPI_PS_INPUT_CNTL_20
, 0, 0},
608 {R_028698_SPI_PS_INPUT_CNTL_21
, 0, 0},
609 {R_02869C_SPI_PS_INPUT_CNTL_22
, 0, 0},
610 {R_0286A0_SPI_PS_INPUT_CNTL_23
, 0, 0},
611 {R_0286A4_SPI_PS_INPUT_CNTL_24
, 0, 0},
612 {R_0286A8_SPI_PS_INPUT_CNTL_25
, 0, 0},
613 {R_0286AC_SPI_PS_INPUT_CNTL_26
, 0, 0},
614 {R_0286B0_SPI_PS_INPUT_CNTL_27
, 0, 0},
615 {R_0286B4_SPI_PS_INPUT_CNTL_28
, 0, 0},
616 {R_0286B8_SPI_PS_INPUT_CNTL_29
, 0, 0},
617 {R_0286BC_SPI_PS_INPUT_CNTL_30
, 0, 0},
618 {R_0286C0_SPI_PS_INPUT_CNTL_31
, 0, 0},
619 {R_0286CC_SPI_PS_IN_CONTROL_0
, 0, 0},
620 {R_0286D0_SPI_PS_IN_CONTROL_1
, 0, 0},
621 {R_0286D8_SPI_INPUT_Z
, 0, 0},
622 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
623 {R_028840_SQ_PGM_START_PS
, REG_FLAG_NEED_BO
, 0},
624 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
625 {R_028850_SQ_PGM_RESOURCES_PS
, 0, 0},
626 {R_028854_SQ_PGM_EXPORTS_PS
, 0, 0},
627 {R_0288CC_SQ_PGM_CF_OFFSET_PS
, 0, 0},
628 {R_028400_VGT_MAX_VTX_INDX
, 0, 0},
629 {R_028404_VGT_MIN_VTX_INDX
, 0, 0},
630 {R_028408_VGT_INDX_OFFSET
, 0, 0},
631 {R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
, 0, 0},
632 {R_028A84_VGT_PRIMITIVEID_EN
, 0, 0},
633 {R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, 0, 0},
634 {R_028AA0_VGT_INSTANCE_STEP_RATE_0
, 0, 0},
635 {R_028AA4_VGT_INSTANCE_STEP_RATE_1
, 0, 0},
638 /* SHADER RESOURCE R600/R700 */
639 int r600_resource_init(struct r600_context
*ctx
, struct r600_range
*range
, unsigned offset
, unsigned nblocks
, unsigned stride
, struct r600_reg
*reg
, int nreg
, unsigned offset_base
)
642 struct r600_block
*block
;
643 range
->blocks
= calloc(nblocks
, sizeof(struct r600_block
*));
644 if (range
->blocks
== NULL
)
647 reg
[0].offset
+= offset
;
648 for (i
= 0; i
< nblocks
; i
++) {
649 block
= calloc(1, sizeof(struct r600_block
));
654 range
->blocks
[i
] = block
;
655 r600_init_block(ctx
, block
, reg
, 0, nreg
, PKT3_SET_RESOURCE
, offset_base
);
657 reg
[0].offset
+= stride
;
663 static int r600_resource_range_init(struct r600_context
*ctx
, struct r600_range
*range
, unsigned offset
, unsigned nblocks
, unsigned stride
)
665 struct r600_reg r600_shader_resource
[] = {
666 {R_038000_RESOURCE0_WORD0
, REG_FLAG_NEED_BO
, 0},
667 {R_038004_RESOURCE0_WORD1
, REG_FLAG_NEED_BO
, 0},
668 {R_038008_RESOURCE0_WORD2
, 0, 0},
669 {R_03800C_RESOURCE0_WORD3
, 0, 0},
670 {R_038010_RESOURCE0_WORD4
, 0, 0},
671 {R_038014_RESOURCE0_WORD5
, 0, 0},
672 {R_038018_RESOURCE0_WORD6
, 0, 0},
674 unsigned nreg
= Elements(r600_shader_resource
);
676 return r600_resource_init(ctx
, range
, offset
, nblocks
, stride
, r600_shader_resource
, nreg
, R600_RESOURCE_OFFSET
);
679 /* SHADER SAMPLER R600/R700 */
680 static int r600_state_sampler_init(struct r600_context
*ctx
, uint32_t offset
)
682 struct r600_reg r600_shader_sampler
[] = {
683 {R_03C000_SQ_TEX_SAMPLER_WORD0_0
, 0, 0},
684 {R_03C004_SQ_TEX_SAMPLER_WORD1_0
, 0, 0},
685 {R_03C008_SQ_TEX_SAMPLER_WORD2_0
, 0, 0},
687 unsigned nreg
= Elements(r600_shader_sampler
);
689 for (int i
= 0; i
< nreg
; i
++) {
690 r600_shader_sampler
[i
].offset
+= offset
;
692 return r600_context_add_block(ctx
, r600_shader_sampler
, nreg
, PKT3_SET_SAMPLER
, R600_SAMPLER_OFFSET
);
695 /* SHADER SAMPLER BORDER R600/R700 */
696 static int r600_state_sampler_border_init(struct r600_context
*ctx
, uint32_t offset
)
698 struct r600_reg r600_shader_sampler_border
[] = {
699 {R_00A400_TD_PS_SAMPLER0_BORDER_RED
, 0, 0},
700 {R_00A404_TD_PS_SAMPLER0_BORDER_GREEN
, 0, 0},
701 {R_00A408_TD_PS_SAMPLER0_BORDER_BLUE
, 0, 0},
702 {R_00A40C_TD_PS_SAMPLER0_BORDER_ALPHA
, 0, 0},
704 unsigned nreg
= Elements(r600_shader_sampler_border
);
706 for (int i
= 0; i
< nreg
; i
++) {
707 r600_shader_sampler_border
[i
].offset
+= offset
;
709 return r600_context_add_block(ctx
, r600_shader_sampler_border
, nreg
, PKT3_SET_CONFIG_REG
, R600_CONFIG_REG_OFFSET
);
712 static int r600_loop_const_init(struct r600_context
*ctx
, uint32_t offset
)
715 struct r600_reg r600_loop_consts
[32];
718 for (i
= 0; i
< nreg
; i
++) {
719 r600_loop_consts
[i
].offset
= R600_LOOP_CONST_OFFSET
+ ((offset
+ i
) * 4);
720 r600_loop_consts
[i
].flags
= REG_FLAG_DIRTY_ALWAYS
;
721 r600_loop_consts
[i
].sbu_flags
= 0;
723 return r600_context_add_block(ctx
, r600_loop_consts
, nreg
, PKT3_SET_LOOP_CONST
, R600_LOOP_CONST_OFFSET
);
726 static void r600_free_resource_range(struct r600_context
*ctx
, struct r600_range
*range
, int nblocks
)
728 struct r600_block
*block
;
730 for (i
= 0; i
< nblocks
; i
++) {
731 block
= range
->blocks
[i
];
733 for (int k
= 1; k
<= block
->nbo
; k
++)
734 pipe_resource_reference((struct pipe_resource
**)&block
->reloc
[k
].bo
, NULL
);
743 void r600_context_fini(struct r600_context
*ctx
)
745 struct r600_block
*block
;
746 struct r600_range
*range
;
748 for (int i
= 0; i
< NUM_RANGES
; i
++) {
749 if (!ctx
->range
[i
].blocks
)
751 for (int j
= 0; j
< (1 << HASH_SHIFT
); j
++) {
752 block
= ctx
->range
[i
].blocks
[j
];
754 for (int k
= 0, offset
= block
->start_offset
; k
< block
->nreg
; k
++, offset
+= 4) {
755 range
= &ctx
->range
[CTX_RANGE_ID(offset
)];
756 range
->blocks
[CTX_BLOCK_ID(offset
)] = NULL
;
758 for (int k
= 1; k
<= block
->nbo
; k
++) {
759 pipe_resource_reference((struct pipe_resource
**)&block
->reloc
[k
].bo
, NULL
);
764 free(ctx
->range
[i
].blocks
);
766 r600_free_resource_range(ctx
, &ctx
->ps_resources
, ctx
->num_ps_resources
);
767 r600_free_resource_range(ctx
, &ctx
->vs_resources
, ctx
->num_vs_resources
);
768 r600_free_resource_range(ctx
, &ctx
->fs_resources
, ctx
->num_fs_resources
);
771 ctx
->ws
->cs_destroy(ctx
->cs
);
774 static void r600_add_resource_block(struct r600_context
*ctx
, struct r600_range
*range
, int num_blocks
, int *index
)
777 for (int j
= 0; j
< num_blocks
; j
++) {
778 if (!range
->blocks
[j
])
781 ctx
->blocks
[c
++] = range
->blocks
[j
];
786 int r600_setup_block_table(struct r600_context
*ctx
)
788 /* setup block table */
790 ctx
->blocks
= calloc(ctx
->nblocks
, sizeof(void*));
793 for (int i
= 0; i
< NUM_RANGES
; i
++) {
794 if (!ctx
->range
[i
].blocks
)
796 for (int j
= 0, add
; j
< (1 << HASH_SHIFT
); j
++) {
797 if (!ctx
->range
[i
].blocks
[j
])
801 for (int k
= 0; k
< c
; k
++) {
802 if (ctx
->blocks
[k
] == ctx
->range
[i
].blocks
[j
]) {
808 assert(c
< ctx
->nblocks
);
809 ctx
->blocks
[c
++] = ctx
->range
[i
].blocks
[j
];
810 j
+= (ctx
->range
[i
].blocks
[j
]->nreg
) - 1;
815 r600_add_resource_block(ctx
, &ctx
->ps_resources
, ctx
->num_ps_resources
, &c
);
816 r600_add_resource_block(ctx
, &ctx
->vs_resources
, ctx
->num_vs_resources
, &c
);
817 r600_add_resource_block(ctx
, &ctx
->fs_resources
, ctx
->num_fs_resources
, &c
);
821 int r600_context_init(struct r600_context
*ctx
)
825 LIST_INITHEAD(&ctx
->active_query_list
);
827 /* init dirty list */
828 LIST_INITHEAD(&ctx
->dirty
);
829 LIST_INITHEAD(&ctx
->resource_dirty
);
830 LIST_INITHEAD(&ctx
->enable_list
);
832 ctx
->range
= calloc(NUM_RANGES
, sizeof(struct r600_range
));
839 r
= r600_context_add_block(ctx
, r600_config_reg_list
,
840 Elements(r600_config_reg_list
), PKT3_SET_CONFIG_REG
, R600_CONFIG_REG_OFFSET
);
843 r
= r600_context_add_block(ctx
, r600_context_reg_list
,
844 Elements(r600_context_reg_list
), PKT3_SET_CONTEXT_REG
, R600_CONTEXT_REG_OFFSET
);
847 r
= r600_context_add_block(ctx
, r600_ctl_const_list
,
848 Elements(r600_ctl_const_list
), PKT3_SET_CTL_CONST
, R600_CTL_CONST_OFFSET
);
852 /* PS SAMPLER BORDER */
853 for (int j
= 0, offset
= 0; j
< 18; j
++, offset
+= 0x10) {
854 r
= r600_state_sampler_border_init(ctx
, offset
);
859 /* VS SAMPLER BORDER */
860 for (int j
= 0, offset
= 0x200; j
< 18; j
++, offset
+= 0x10) {
861 r
= r600_state_sampler_border_init(ctx
, offset
);
866 for (int j
= 0, offset
= 0; j
< 18; j
++, offset
+= 0xC) {
867 r
= r600_state_sampler_init(ctx
, offset
);
872 for (int j
= 0, offset
= 0xD8; j
< 18; j
++, offset
+= 0xC) {
873 r
= r600_state_sampler_init(ctx
, offset
);
878 ctx
->num_ps_resources
= 160;
879 ctx
->num_vs_resources
= 160;
880 ctx
->num_fs_resources
= 16;
881 r
= r600_resource_range_init(ctx
, &ctx
->ps_resources
, 0, 160, 0x1c);
884 r
= r600_resource_range_init(ctx
, &ctx
->vs_resources
, 0x1180, 160, 0x1c);
887 r
= r600_resource_range_init(ctx
, &ctx
->fs_resources
, 0x2300, 16, 0x1c);
892 r600_loop_const_init(ctx
, 0);
894 r600_loop_const_init(ctx
, 32);
896 r
= r600_setup_block_table(ctx
);
900 ctx
->cs
= ctx
->ws
->cs_create(ctx
->ws
);
906 r600_context_fini(ctx
);
910 void r600_need_cs_space(struct r600_context
*ctx
, unsigned num_dw
,
911 boolean count_draw_in
)
913 struct r600_atom
*state
;
915 /* The number of dwords we already used in the CS so far. */
916 num_dw
+= ctx
->cs
->cdw
;
919 /* The number of dwords all the dirty states would take. */
920 LIST_FOR_EACH_ENTRY(state
, &ctx
->dirty_states
, head
) {
921 num_dw
+= state
->num_dw
;
924 num_dw
+= ctx
->pm4_dirty_cdwords
;
926 /* The upper-bound of how much a draw command would take. */
927 num_dw
+= R600_MAX_DRAW_CS_DWORDS
;
930 /* Count in queries_suspend. */
931 num_dw
+= ctx
->num_cs_dw_queries_suspend
;
933 /* Count in streamout_end at the end of CS. */
934 num_dw
+= ctx
->num_cs_dw_streamout_end
;
936 /* Count in render_condition(NULL) at the end of CS. */
937 if (ctx
->predicate_drawing
) {
941 /* Count in framebuffer cache flushes at the end of CS. */
942 num_dw
+= 7; /* one SURFACE_SYNC and CACHE_FLUSH_AND_INV (r6xx-only) */
944 /* Save 16 dwords for the fence mechanism. */
947 /* Flush if there's not enough space. */
948 if (num_dw
> RADEON_MAX_CMDBUF_DWORDS
) {
949 r600_flush(&ctx
->context
, NULL
, RADEON_FLUSH_ASYNC
);
953 void r600_context_dirty_block(struct r600_context
*ctx
,
954 struct r600_block
*block
,
955 int dirty
, int index
)
957 if ((index
+ 1) > block
->nreg_dirty
)
958 block
->nreg_dirty
= index
+ 1;
960 if ((dirty
!= (block
->status
& R600_BLOCK_STATUS_DIRTY
)) || !(block
->status
& R600_BLOCK_STATUS_ENABLED
)) {
961 block
->status
|= R600_BLOCK_STATUS_DIRTY
;
962 ctx
->pm4_dirty_cdwords
+= block
->pm4_ndwords
;
963 if (!(block
->status
& R600_BLOCK_STATUS_ENABLED
)) {
964 block
->status
|= R600_BLOCK_STATUS_ENABLED
;
965 LIST_ADDTAIL(&block
->enable_list
, &ctx
->enable_list
);
967 LIST_ADDTAIL(&block
->list
,&ctx
->dirty
);
969 if (block
->flags
& REG_FLAG_FLUSH_CHANGE
) {
970 r600_context_ps_partial_flush(ctx
);
975 void r600_context_pipe_state_set(struct r600_context
*ctx
, struct r600_pipe_state
*state
)
977 struct r600_block
*block
;
979 for (int i
= 0; i
< state
->nregs
; i
++) {
980 unsigned id
, reloc_id
;
981 struct r600_pipe_reg
*reg
= &state
->regs
[i
];
986 dirty
= block
->status
& R600_BLOCK_STATUS_DIRTY
;
988 if (reg
->value
!= block
->reg
[id
]) {
989 block
->reg
[id
] = reg
->value
;
990 dirty
|= R600_BLOCK_STATUS_DIRTY
;
992 if (block
->flags
& REG_FLAG_DIRTY_ALWAYS
)
993 dirty
|= R600_BLOCK_STATUS_DIRTY
;
994 if (block
->pm4_bo_index
[id
]) {
995 /* find relocation */
996 reloc_id
= block
->pm4_bo_index
[id
];
997 pipe_resource_reference((struct pipe_resource
**)&block
->reloc
[reloc_id
].bo
, ®
->bo
->b
.b
.b
);
998 block
->reloc
[reloc_id
].bo_usage
= reg
->bo_usage
;
999 /* always force dirty for relocs for now */
1000 dirty
|= R600_BLOCK_STATUS_DIRTY
;
1004 r600_context_dirty_block(ctx
, block
, dirty
, id
);
1008 static void r600_context_dirty_resource_block(struct r600_context
*ctx
,
1009 struct r600_block
*block
,
1010 int dirty
, int index
)
1012 block
->nreg_dirty
= index
+ 1;
1014 if ((dirty
!= (block
->status
& R600_BLOCK_STATUS_RESOURCE_DIRTY
)) || !(block
->status
& R600_BLOCK_STATUS_ENABLED
)) {
1015 block
->status
|= R600_BLOCK_STATUS_RESOURCE_DIRTY
;
1016 ctx
->pm4_dirty_cdwords
+= block
->pm4_ndwords
;
1017 if (!(block
->status
& R600_BLOCK_STATUS_ENABLED
)) {
1018 block
->status
|= R600_BLOCK_STATUS_ENABLED
;
1019 LIST_ADDTAIL(&block
->enable_list
, &ctx
->enable_list
);
1021 LIST_ADDTAIL(&block
->list
,&ctx
->resource_dirty
);
1025 void r600_context_pipe_state_set_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, struct r600_block
*block
)
1028 int num_regs
= ctx
->chip_class
>= EVERGREEN
? 8 : 7;
1031 if (state
== NULL
) {
1032 block
->status
&= ~(R600_BLOCK_STATUS_ENABLED
| R600_BLOCK_STATUS_RESOURCE_DIRTY
);
1033 pipe_resource_reference((struct pipe_resource
**)&block
->reloc
[1].bo
, NULL
);
1034 pipe_resource_reference((struct pipe_resource
**)&block
->reloc
[2].bo
, NULL
);
1035 LIST_DELINIT(&block
->list
);
1036 LIST_DELINIT(&block
->enable_list
);
1040 is_vertex
= ((state
->val
[num_regs
-1] & 0xc0000000) == 0xc0000000);
1041 dirty
= block
->status
& R600_BLOCK_STATUS_RESOURCE_DIRTY
;
1043 if (memcmp(block
->reg
, state
->val
, num_regs
*4)) {
1044 memcpy(block
->reg
, state
->val
, num_regs
* 4);
1045 dirty
|= R600_BLOCK_STATUS_RESOURCE_DIRTY
;
1048 /* if no BOs on block, force dirty */
1049 if (!block
->reloc
[1].bo
|| !block
->reloc
[2].bo
)
1050 dirty
|= R600_BLOCK_STATUS_RESOURCE_DIRTY
;
1054 if (block
->reloc
[1].bo
->buf
!= state
->bo
[0]->buf
)
1055 dirty
|= R600_BLOCK_STATUS_RESOURCE_DIRTY
;
1057 if ((block
->reloc
[1].bo
->buf
!= state
->bo
[0]->buf
) ||
1058 (block
->reloc
[2].bo
->buf
!= state
->bo
[1]->buf
))
1059 dirty
|= R600_BLOCK_STATUS_RESOURCE_DIRTY
;
1065 /* VERTEX RESOURCE, we preted there is 2 bo to relocate so
1066 * we have single case btw VERTEX & TEXTURE resource
1068 pipe_resource_reference((struct pipe_resource
**)&block
->reloc
[1].bo
, &state
->bo
[0]->b
.b
.b
);
1069 block
->reloc
[1].bo_usage
= state
->bo_usage
[0];
1070 pipe_resource_reference((struct pipe_resource
**)&block
->reloc
[2].bo
, NULL
);
1072 /* TEXTURE RESOURCE */
1073 pipe_resource_reference((struct pipe_resource
**)&block
->reloc
[1].bo
, &state
->bo
[0]->b
.b
.b
);
1074 block
->reloc
[1].bo_usage
= state
->bo_usage
[0];
1075 pipe_resource_reference((struct pipe_resource
**)&block
->reloc
[2].bo
, &state
->bo
[1]->b
.b
.b
);
1076 block
->reloc
[2].bo_usage
= state
->bo_usage
[1];
1080 block
->status
|= R600_BLOCK_STATUS_RESOURCE_VERTEX
;
1082 block
->status
&= ~R600_BLOCK_STATUS_RESOURCE_VERTEX
;
1084 r600_context_dirty_resource_block(ctx
, block
, dirty
, num_regs
- 1);
1088 void r600_context_pipe_state_set_ps_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
)
1090 struct r600_block
*block
= ctx
->ps_resources
.blocks
[rid
];
1092 r600_context_pipe_state_set_resource(ctx
, state
, block
);
1095 void r600_context_pipe_state_set_vs_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
)
1097 struct r600_block
*block
= ctx
->vs_resources
.blocks
[rid
];
1099 r600_context_pipe_state_set_resource(ctx
, state
, block
);
1102 void r600_context_pipe_state_set_fs_resource(struct r600_context
*ctx
, struct r600_pipe_resource_state
*state
, unsigned rid
)
1104 struct r600_block
*block
= ctx
->fs_resources
.blocks
[rid
];
1106 r600_context_pipe_state_set_resource(ctx
, state
, block
);
1109 static inline void r600_context_pipe_state_set_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned offset
)
1111 struct r600_range
*range
;
1112 struct r600_block
*block
;
1116 range
= &ctx
->range
[CTX_RANGE_ID(offset
)];
1117 block
= range
->blocks
[CTX_BLOCK_ID(offset
)];
1118 if (state
== NULL
) {
1119 block
->status
&= ~(R600_BLOCK_STATUS_ENABLED
| R600_BLOCK_STATUS_DIRTY
);
1120 LIST_DELINIT(&block
->list
);
1121 LIST_DELINIT(&block
->enable_list
);
1124 dirty
= block
->status
& R600_BLOCK_STATUS_DIRTY
;
1125 for (i
= 0; i
< 3; i
++) {
1126 if (block
->reg
[i
] != state
->regs
[i
].value
) {
1127 block
->reg
[i
] = state
->regs
[i
].value
;
1128 dirty
|= R600_BLOCK_STATUS_DIRTY
;
1133 r600_context_dirty_block(ctx
, block
, dirty
, 2);
1137 static inline void r600_context_pipe_state_set_sampler_border(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned offset
)
1139 struct r600_range
*range
;
1140 struct r600_block
*block
;
1144 range
= &ctx
->range
[CTX_RANGE_ID(offset
)];
1145 block
= range
->blocks
[CTX_BLOCK_ID(offset
)];
1146 if (state
== NULL
) {
1147 block
->status
&= ~(R600_BLOCK_STATUS_ENABLED
| R600_BLOCK_STATUS_DIRTY
);
1148 LIST_DELINIT(&block
->list
);
1149 LIST_DELINIT(&block
->enable_list
);
1152 if (state
->nregs
<= 3) {
1155 dirty
= block
->status
& R600_BLOCK_STATUS_DIRTY
;
1156 for (i
= 0; i
< 4; i
++) {
1157 if (block
->reg
[i
] != state
->regs
[i
+ 3].value
) {
1158 block
->reg
[i
] = state
->regs
[i
+ 3].value
;
1159 dirty
|= R600_BLOCK_STATUS_DIRTY
;
1163 /* We have to flush the shaders before we change the border color
1164 * registers, or previous draw commands that haven't completed yet
1165 * will end up using the new border color. */
1166 if (dirty
& R600_BLOCK_STATUS_DIRTY
)
1167 r600_context_ps_partial_flush(ctx
);
1169 r600_context_dirty_block(ctx
, block
, dirty
, 3);
1172 void r600_context_pipe_state_set_ps_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned id
)
1176 offset
= 0x0003C000 + id
* 0xc;
1177 r600_context_pipe_state_set_sampler(ctx
, state
, offset
);
1178 offset
= 0x0000A400 + id
* 0x10;
1179 r600_context_pipe_state_set_sampler_border(ctx
, state
, offset
);
1182 void r600_context_pipe_state_set_vs_sampler(struct r600_context
*ctx
, struct r600_pipe_state
*state
, unsigned id
)
1186 offset
= 0x0003C0D8 + id
* 0xc;
1187 r600_context_pipe_state_set_sampler(ctx
, state
, offset
);
1188 offset
= 0x0000A600 + id
* 0x10;
1189 r600_context_pipe_state_set_sampler_border(ctx
, state
, offset
);
1192 struct r600_resource
*r600_context_reg_bo(struct r600_context
*ctx
, unsigned offset
)
1194 struct r600_range
*range
;
1195 struct r600_block
*block
;
1198 range
= &ctx
->range
[CTX_RANGE_ID(offset
)];
1199 block
= range
->blocks
[CTX_BLOCK_ID(offset
)];
1200 offset
-= block
->start_offset
;
1201 id
= block
->pm4_bo_index
[offset
>> 2];
1202 if (block
->reloc
[id
].bo
) {
1203 return block
->reloc
[id
].bo
;
1208 void r600_context_block_emit_dirty(struct r600_context
*ctx
, struct r600_block
*block
)
1210 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1211 int optional
= block
->nbo
== 0 && !(block
->flags
& REG_FLAG_DIRTY_ALWAYS
);
1212 int cp_dwords
= block
->pm4_ndwords
, start_dword
= 0;
1214 int nbo
= block
->nbo
;
1216 if (block
->nreg_dirty
== 0 && optional
) {
1221 ctx
->flags
|= R600_CONTEXT_CHECK_EVENT_FLUSH
;
1223 for (int j
= 0; j
< block
->nreg
; j
++) {
1224 if (block
->pm4_bo_index
[j
]) {
1225 /* find relocation */
1226 struct r600_block_reloc
*reloc
= &block
->reloc
[block
->pm4_bo_index
[j
]];
1228 block
->pm4
[reloc
->bo_pm4_index
] =
1229 r600_context_bo_reloc(ctx
, reloc
->bo
, reloc
->bo_usage
);
1231 block
->pm4
[reloc
->bo_pm4_index
] = 0;
1239 ctx
->flags
&= ~R600_CONTEXT_CHECK_EVENT_FLUSH
;
1242 optional
&= (block
->nreg_dirty
!= block
->nreg
);
1244 new_dwords
= block
->nreg_dirty
;
1245 start_dword
= cs
->cdw
;
1246 cp_dwords
= new_dwords
+ 2;
1248 memcpy(&cs
->buf
[cs
->cdw
], block
->pm4
, cp_dwords
* 4);
1249 cs
->cdw
+= cp_dwords
;
1254 newword
= cs
->buf
[start_dword
];
1255 newword
&= PKT_COUNT_C
;
1256 newword
|= PKT_COUNT_S(new_dwords
);
1257 cs
->buf
[start_dword
] = newword
;
1260 block
->status
^= R600_BLOCK_STATUS_DIRTY
;
1261 block
->nreg_dirty
= 0;
1262 LIST_DELINIT(&block
->list
);
1265 void r600_context_block_resource_emit_dirty(struct r600_context
*ctx
, struct r600_block
*block
)
1267 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1268 int cp_dwords
= block
->pm4_ndwords
;
1269 int nbo
= block
->nbo
;
1271 ctx
->flags
|= R600_CONTEXT_CHECK_EVENT_FLUSH
;
1273 if (block
->status
& R600_BLOCK_STATUS_RESOURCE_VERTEX
) {
1275 cp_dwords
-= 2; /* don't copy the second NOP */
1278 for (int j
= 0; j
< nbo
; j
++) {
1279 if (block
->pm4_bo_index
[j
]) {
1280 /* find relocation */
1281 struct r600_block_reloc
*reloc
= &block
->reloc
[block
->pm4_bo_index
[j
]];
1282 block
->pm4
[reloc
->bo_pm4_index
] =
1283 r600_context_bo_reloc(ctx
, reloc
->bo
, reloc
->bo_usage
);
1286 ctx
->flags
&= ~R600_CONTEXT_CHECK_EVENT_FLUSH
;
1288 memcpy(&cs
->buf
[cs
->cdw
], block
->pm4
, cp_dwords
* 4);
1289 cs
->cdw
+= cp_dwords
;
1291 block
->status
^= R600_BLOCK_STATUS_RESOURCE_DIRTY
;
1292 block
->nreg_dirty
= 0;
1293 LIST_DELINIT(&block
->list
);
1296 void r600_context_draw(struct r600_context
*ctx
, const struct r600_draw
*draw
)
1298 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1299 unsigned ndwords
= 7;
1302 if (draw
->indices
) {
1305 if (ctx
->num_cs_dw_queries_suspend
) {
1306 if (ctx
->family
>= CHIP_RV770
)
1311 /* when increasing ndwords, bump the max limit too */
1312 assert(ndwords
<= R600_MAX_DRAW_CS_DWORDS
);
1314 /* queries need some special values
1315 * (this is non-zero if any query is active) */
1316 if (ctx
->num_cs_dw_queries_suspend
) {
1317 if (ctx
->family
>= CHIP_RV770
) {
1318 pm4
= &cs
->buf
[cs
->cdw
];
1319 pm4
[0] = PKT3(PKT3_SET_CONTEXT_REG
, 1, 0);
1320 pm4
[1] = (R_028D0C_DB_RENDER_CONTROL
- R600_CONTEXT_REG_OFFSET
) >> 2;
1321 pm4
[2] = draw
->db_render_control
| S_028D0C_R700_PERFECT_ZPASS_COUNTS(1);
1325 pm4
= &cs
->buf
[cs
->cdw
];
1326 pm4
[0] = PKT3(PKT3_SET_CONTEXT_REG
, 1, 0);
1327 pm4
[1] = (R_028D10_DB_RENDER_OVERRIDE
- R600_CONTEXT_REG_OFFSET
) >> 2;
1328 pm4
[2] = draw
->db_render_override
| S_028D10_NOOP_CULL_DISABLE(1);
1334 pm4
= &cs
->buf
[cs
->cdw
];
1335 pm4
[0] = PKT3(PKT3_INDEX_TYPE
, 0, ctx
->predicate_drawing
);
1336 pm4
[1] = draw
->vgt_index_type
;
1337 pm4
[2] = PKT3(PKT3_NUM_INSTANCES
, 0, ctx
->predicate_drawing
);
1338 pm4
[3] = draw
->vgt_num_instances
;
1339 if (draw
->indices
) {
1340 pm4
[4] = PKT3(PKT3_DRAW_INDEX
, 3, ctx
->predicate_drawing
);
1341 pm4
[5] = draw
->indices_bo_offset
;
1343 pm4
[7] = draw
->vgt_num_indices
;
1344 pm4
[8] = draw
->vgt_draw_initiator
;
1345 pm4
[9] = PKT3(PKT3_NOP
, 0, ctx
->predicate_drawing
);
1346 pm4
[10] = r600_context_bo_reloc(ctx
, draw
->indices
, RADEON_USAGE_READ
);
1348 pm4
[4] = PKT3(PKT3_DRAW_INDEX_AUTO
, 1, ctx
->predicate_drawing
);
1349 pm4
[5] = draw
->vgt_num_indices
;
1350 pm4
[6] = draw
->vgt_draw_initiator
;
1355 void r600_inval_shader_cache(struct r600_context
*ctx
)
1357 ctx
->atom_surface_sync
.flush_flags
|= S_0085F0_SH_ACTION_ENA(1);
1358 r600_atom_dirty(ctx
, &ctx
->atom_surface_sync
.atom
);
1361 void r600_inval_texture_cache(struct r600_context
*ctx
)
1363 ctx
->atom_surface_sync
.flush_flags
|= S_0085F0_TC_ACTION_ENA(1);
1364 r600_atom_dirty(ctx
, &ctx
->atom_surface_sync
.atom
);
1367 void r600_inval_vertex_cache(struct r600_context
*ctx
)
1369 if (ctx
->family
== CHIP_RV610
||
1370 ctx
->family
== CHIP_RV620
||
1371 ctx
->family
== CHIP_RS780
||
1372 ctx
->family
== CHIP_RS880
||
1373 ctx
->family
== CHIP_RV710
||
1374 ctx
->family
== CHIP_CEDAR
||
1375 ctx
->family
== CHIP_PALM
||
1376 ctx
->family
== CHIP_SUMO
||
1377 ctx
->family
== CHIP_SUMO2
||
1378 ctx
->family
== CHIP_CAICOS
||
1379 ctx
->family
== CHIP_CAYMAN
) {
1380 /* Some GPUs don't have the vertex cache and must use the texture cache instead. */
1381 ctx
->atom_surface_sync
.flush_flags
|= S_0085F0_TC_ACTION_ENA(1);
1383 ctx
->atom_surface_sync
.flush_flags
|= S_0085F0_VC_ACTION_ENA(1);
1385 r600_atom_dirty(ctx
, &ctx
->atom_surface_sync
.atom
);
1388 void r600_flush_framebuffer(struct r600_context
*ctx
, bool flush_now
)
1390 if (!(ctx
->flags
& R600_CONTEXT_DST_CACHES_DIRTY
))
1393 ctx
->atom_surface_sync
.flush_flags
|=
1394 r600_get_cb_flush_flags(ctx
) |
1395 (ctx
->framebuffer
.zsbuf
? S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1) : 0);
1398 r600_emit_atom(ctx
, &ctx
->atom_surface_sync
.atom
);
1400 r600_atom_dirty(ctx
, &ctx
->atom_surface_sync
.atom
);
1403 /* Also add a complete cache flush to work around broken flushing on R6xx. */
1404 if (ctx
->chip_class
== R600
) {
1406 r600_emit_atom(ctx
, &ctx
->atom_r6xx_flush_and_inv
);
1408 r600_atom_dirty(ctx
, &ctx
->atom_r6xx_flush_and_inv
);
1412 ctx
->flags
&= ~R600_CONTEXT_DST_CACHES_DIRTY
;
1415 void r600_context_flush(struct r600_context
*ctx
, unsigned flags
)
1417 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1418 struct r600_block
*enable_block
= NULL
;
1419 bool queries_suspended
= false;
1420 bool streamout_suspended
= false;
1422 if (cs
->cdw
== ctx
->init_dwords
)
1425 /* suspend queries */
1426 if (ctx
->num_cs_dw_queries_suspend
) {
1427 r600_context_queries_suspend(ctx
);
1428 queries_suspended
= true;
1431 if (ctx
->num_cs_dw_streamout_end
) {
1432 r600_context_streamout_end(ctx
);
1433 streamout_suspended
= true;
1436 r600_flush_framebuffer(ctx
, true);
1438 /* partial flush is needed to avoid lockups on some chips with user fences */
1439 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
1440 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
1443 ctx
->ws
->cs_flush(ctx
->cs
, flags
);
1445 ctx
->pm4_dirty_cdwords
= 0;
1450 if (streamout_suspended
) {
1451 ctx
->streamout_start
= TRUE
;
1452 ctx
->streamout_append_bitmask
= ~0;
1455 /* resume queries */
1456 if (queries_suspended
) {
1457 r600_context_queries_resume(ctx
);
1460 /* set all valid group as dirty so they get reemited on
1463 LIST_FOR_EACH_ENTRY(enable_block
, &ctx
->enable_list
, enable_list
) {
1464 if (!(enable_block
->flags
& BLOCK_FLAG_RESOURCE
)) {
1465 if(!(enable_block
->status
& R600_BLOCK_STATUS_DIRTY
)) {
1466 LIST_ADDTAIL(&enable_block
->list
,&ctx
->dirty
);
1467 enable_block
->status
|= R600_BLOCK_STATUS_DIRTY
;
1470 if(!(enable_block
->status
& R600_BLOCK_STATUS_RESOURCE_DIRTY
)) {
1471 LIST_ADDTAIL(&enable_block
->list
,&ctx
->resource_dirty
);
1472 enable_block
->status
|= R600_BLOCK_STATUS_RESOURCE_DIRTY
;
1475 ctx
->pm4_dirty_cdwords
+= enable_block
->pm4_ndwords
;
1476 enable_block
->nreg_dirty
= enable_block
->nreg
;
1480 void r600_context_emit_fence(struct r600_context
*ctx
, struct r600_resource
*fence_bo
, unsigned offset
, unsigned value
)
1482 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1485 r600_need_cs_space(ctx
, 10, FALSE
);
1487 va
= r600_resource_va(&ctx
->screen
->screen
, (void*)fence_bo
);
1488 va
= va
+ (offset
<< 2);
1490 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
1491 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
1492 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0);
1493 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5);
1494 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* ADDRESS_LO */
1495 /* DATA_SEL | INT_EN | ADDRESS_HI */
1496 cs
->buf
[cs
->cdw
++] = (1 << 29) | (0 << 24) | ((va
>> 32UL) & 0xFF);
1497 cs
->buf
[cs
->cdw
++] = value
; /* DATA_LO */
1498 cs
->buf
[cs
->cdw
++] = 0; /* DATA_HI */
1499 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1500 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, fence_bo
, RADEON_USAGE_WRITE
);
1503 static unsigned r600_query_read_result(char *map
, unsigned start_index
, unsigned end_index
,
1504 bool test_status_bit
)
1506 uint32_t *current_result
= (uint32_t*)map
;
1507 uint64_t start
, end
;
1509 start
= (uint64_t)current_result
[start_index
] |
1510 (uint64_t)current_result
[start_index
+1] << 32;
1511 end
= (uint64_t)current_result
[end_index
] |
1512 (uint64_t)current_result
[end_index
+1] << 32;
1514 if (!test_status_bit
||
1515 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
1521 static boolean
r600_query_result(struct r600_context
*ctx
, struct r600_query
*query
, boolean wait
)
1523 unsigned results_base
= query
->results_start
;
1526 map
= ctx
->ws
->buffer_map(query
->buffer
->buf
, ctx
->cs
,
1527 PIPE_TRANSFER_READ
|
1528 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
));
1532 /* count all results across all data blocks */
1533 switch (query
->type
) {
1534 case PIPE_QUERY_OCCLUSION_COUNTER
:
1535 while (results_base
!= query
->results_end
) {
1536 query
->result
.u64
+=
1537 r600_query_read_result(map
+ results_base
, 0, 2, true);
1538 results_base
= (results_base
+ 16) % query
->buffer
->b
.b
.b
.width0
;
1541 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1542 while (results_base
!= query
->results_end
) {
1543 query
->result
.b
= query
->result
.b
||
1544 r600_query_read_result(map
+ results_base
, 0, 2, true) != 0;
1545 results_base
= (results_base
+ 16) % query
->buffer
->b
.b
.b
.width0
;
1548 case PIPE_QUERY_TIME_ELAPSED
:
1549 while (results_base
!= query
->results_end
) {
1550 query
->result
.u64
+=
1551 r600_query_read_result(map
+ results_base
, 0, 2, false);
1552 results_base
= (results_base
+ query
->result_size
) % query
->buffer
->b
.b
.b
.width0
;
1555 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1556 /* SAMPLE_STREAMOUTSTATS stores this structure:
1558 * u64 NumPrimitivesWritten;
1559 * u64 PrimitiveStorageNeeded;
1561 * We only need NumPrimitivesWritten here. */
1562 while (results_base
!= query
->results_end
) {
1563 query
->result
.u64
+=
1564 r600_query_read_result(map
+ results_base
, 2, 6, true);
1565 results_base
= (results_base
+ query
->result_size
) % query
->buffer
->b
.b
.b
.width0
;
1568 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1569 /* Here we read PrimitiveStorageNeeded. */
1570 while (results_base
!= query
->results_end
) {
1571 query
->result
.u64
+=
1572 r600_query_read_result(map
+ results_base
, 0, 4, true);
1573 results_base
= (results_base
+ query
->result_size
) % query
->buffer
->b
.b
.b
.width0
;
1576 case PIPE_QUERY_SO_STATISTICS
:
1577 while (results_base
!= query
->results_end
) {
1578 query
->result
.so
.num_primitives_written
+=
1579 r600_query_read_result(map
+ results_base
, 2, 6, true);
1580 query
->result
.so
.primitives_storage_needed
+=
1581 r600_query_read_result(map
+ results_base
, 0, 4, true);
1582 results_base
= (results_base
+ query
->result_size
) % query
->buffer
->b
.b
.b
.width0
;
1585 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1586 while (results_base
!= query
->results_end
) {
1587 query
->result
.b
= query
->result
.b
||
1588 r600_query_read_result(map
+ results_base
, 2, 6, true) !=
1589 r600_query_read_result(map
+ results_base
, 0, 4, true);
1590 results_base
= (results_base
+ query
->result_size
) % query
->buffer
->b
.b
.b
.width0
;
1597 query
->results_start
= query
->results_end
;
1598 ctx
->ws
->buffer_unmap(query
->buffer
->buf
);
1602 void r600_query_begin(struct r600_context
*ctx
, struct r600_query
*query
)
1604 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1605 unsigned new_results_end
, i
;
1609 r600_need_cs_space(ctx
, query
->num_cs_dw
* 2, TRUE
);
1611 new_results_end
= (query
->results_end
+ query
->result_size
) % query
->buffer
->b
.b
.b
.width0
;
1613 /* collect current results if query buffer is full */
1614 if (new_results_end
== query
->results_start
) {
1615 r600_query_result(ctx
, query
, TRUE
);
1618 switch (query
->type
) {
1619 case PIPE_QUERY_OCCLUSION_COUNTER
:
1620 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1621 results
= ctx
->ws
->buffer_map(query
->buffer
->buf
, ctx
->cs
, PIPE_TRANSFER_WRITE
);
1623 results
= (uint32_t*)((char*)results
+ query
->results_end
);
1624 memset(results
, 0, query
->result_size
);
1626 /* Set top bits for unused backends */
1627 for (i
= 0; i
< ctx
->max_db
; i
++) {
1628 if (!(ctx
->backend_mask
& (1<<i
))) {
1629 results
[(i
* 4)+1] = 0x80000000;
1630 results
[(i
* 4)+3] = 0x80000000;
1633 ctx
->ws
->buffer_unmap(query
->buffer
->buf
);
1636 case PIPE_QUERY_TIME_ELAPSED
:
1638 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1639 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1640 case PIPE_QUERY_SO_STATISTICS
:
1641 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1642 results
= ctx
->ws
->buffer_map(query
->buffer
->buf
, ctx
->cs
, PIPE_TRANSFER_WRITE
);
1643 results
= (uint32_t*)((char*)results
+ query
->results_end
);
1644 memset(results
, 0, query
->result_size
);
1645 ctx
->ws
->buffer_unmap(query
->buffer
->buf
);
1651 /* emit begin query */
1652 va
= r600_resource_va(&ctx
->screen
->screen
, (void*)query
->buffer
);
1653 va
+= query
->results_end
;
1655 switch (query
->type
) {
1656 case PIPE_QUERY_OCCLUSION_COUNTER
:
1657 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1658 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
1659 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1);
1660 cs
->buf
[cs
->cdw
++] = va
;
1661 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
1663 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1664 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1665 case PIPE_QUERY_SO_STATISTICS
:
1666 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1667 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
1668 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS
) | EVENT_INDEX(3);
1669 cs
->buf
[cs
->cdw
++] = query
->results_end
;
1670 cs
->buf
[cs
->cdw
++] = 0;
1672 case PIPE_QUERY_TIME_ELAPSED
:
1673 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0);
1674 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5);
1675 cs
->buf
[cs
->cdw
++] = va
;
1676 cs
->buf
[cs
->cdw
++] = (3 << 29) | ((va
>> 32UL) & 0xFF);
1677 cs
->buf
[cs
->cdw
++] = 0;
1678 cs
->buf
[cs
->cdw
++] = 0;
1683 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1684 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, query
->buffer
, RADEON_USAGE_WRITE
);
1686 ctx
->num_cs_dw_queries_suspend
+= query
->num_cs_dw
;
1689 void r600_query_end(struct r600_context
*ctx
, struct r600_query
*query
)
1691 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1694 va
= r600_resource_va(&ctx
->screen
->screen
, (void*)query
->buffer
);
1695 /* emit end query */
1696 switch (query
->type
) {
1697 case PIPE_QUERY_OCCLUSION_COUNTER
:
1698 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1699 va
+= query
->results_end
+ 8;
1700 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
1701 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1);
1702 cs
->buf
[cs
->cdw
++] = va
;
1703 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
1705 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1706 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1707 case PIPE_QUERY_SO_STATISTICS
:
1708 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1709 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
1710 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS
) | EVENT_INDEX(3);
1711 cs
->buf
[cs
->cdw
++] = query
->results_end
+ query
->result_size
/2;
1712 cs
->buf
[cs
->cdw
++] = 0;
1714 case PIPE_QUERY_TIME_ELAPSED
:
1715 va
+= query
->results_end
+ query
->result_size
/2;
1716 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0);
1717 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5);
1718 cs
->buf
[cs
->cdw
++] = va
;
1719 cs
->buf
[cs
->cdw
++] = (3 << 29) | ((va
>> 32UL) & 0xFF);
1720 cs
->buf
[cs
->cdw
++] = 0;
1721 cs
->buf
[cs
->cdw
++] = 0;
1726 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1727 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, query
->buffer
, RADEON_USAGE_WRITE
);
1729 query
->results_end
= (query
->results_end
+ query
->result_size
) % query
->buffer
->b
.b
.b
.width0
;
1730 ctx
->num_cs_dw_queries_suspend
-= query
->num_cs_dw
;
1733 void r600_query_predication(struct r600_context
*ctx
, struct r600_query
*query
, int operation
,
1736 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1739 if (operation
== PREDICATION_OP_CLEAR
) {
1740 r600_need_cs_space(ctx
, 3, FALSE
);
1742 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_PREDICATION
, 1, 0);
1743 cs
->buf
[cs
->cdw
++] = 0;
1744 cs
->buf
[cs
->cdw
++] = PRED_OP(PREDICATION_OP_CLEAR
);
1746 unsigned results_base
= query
->results_start
;
1750 /* find count of the query data blocks */
1751 count
= (query
->buffer
->b
.b
.b
.width0
+ query
->results_end
- query
->results_start
) % query
->buffer
->b
.b
.b
.width0
;
1752 count
/= query
->result_size
;
1754 r600_need_cs_space(ctx
, 5 * count
, TRUE
);
1756 op
= PRED_OP(operation
) | PREDICATION_DRAW_VISIBLE
|
1757 (flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
);
1758 va
= r600_resource_va(&ctx
->screen
->screen
, (void*)query
->buffer
);
1760 /* emit predicate packets for all data blocks */
1761 while (results_base
!= query
->results_end
) {
1762 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_PREDICATION
, 1, 0);
1763 cs
->buf
[cs
->cdw
++] = (va
+ results_base
) & 0xFFFFFFFFUL
;
1764 cs
->buf
[cs
->cdw
++] = op
| (((va
+ results_base
) >> 32UL) & 0xFF);
1765 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1766 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, query
->buffer
,
1768 results_base
= (results_base
+ query
->result_size
) % query
->buffer
->b
.b
.b
.width0
;
1770 /* set CONTINUE bit for all packets except the first */
1771 op
|= PREDICATION_CONTINUE
;
1776 struct r600_query
*r600_context_query_create(struct r600_context
*ctx
, unsigned query_type
)
1778 struct r600_query
*query
;
1779 unsigned buffer_size
= 4096;
1781 query
= CALLOC_STRUCT(r600_query
);
1785 query
->type
= query_type
;
1787 switch (query_type
) {
1788 case PIPE_QUERY_OCCLUSION_COUNTER
:
1789 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1790 query
->result_size
= 16 * ctx
->max_db
;
1791 query
->num_cs_dw
= 6;
1793 case PIPE_QUERY_TIME_ELAPSED
:
1794 query
->result_size
= 16;
1795 query
->num_cs_dw
= 8;
1797 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1798 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1799 case PIPE_QUERY_SO_STATISTICS
:
1800 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1801 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
1802 query
->result_size
= 32;
1803 query
->num_cs_dw
= 6;
1811 /* adjust buffer size to simplify offsets wrapping math */
1812 buffer_size
-= buffer_size
% query
->result_size
;
1814 /* Queries are normally read by the CPU after
1815 * being written by the gpu, hence staging is probably a good
1818 query
->buffer
= (struct r600_resource
*)
1819 pipe_buffer_create(&ctx
->screen
->screen
, PIPE_BIND_CUSTOM
, PIPE_USAGE_STAGING
, buffer_size
);
1820 if (!query
->buffer
) {
1827 void r600_context_query_destroy(struct r600_context
*ctx
, struct r600_query
*query
)
1829 pipe_resource_reference((struct pipe_resource
**)&query
->buffer
, NULL
);
1833 boolean
r600_context_query_result(struct r600_context
*ctx
,
1834 struct r600_query
*query
,
1835 boolean wait
, void *vresult
)
1837 boolean
*result_b
= (boolean
*)vresult
;
1838 uint64_t *result_u64
= (uint64_t*)vresult
;
1839 struct pipe_query_data_so_statistics
*result_so
=
1840 (struct pipe_query_data_so_statistics
*)vresult
;
1842 if (!r600_query_result(ctx
, query
, wait
))
1845 switch (query
->type
) {
1846 case PIPE_QUERY_OCCLUSION_COUNTER
:
1847 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1848 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1849 *result_u64
= query
->result
.u64
;
1851 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1852 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1853 *result_b
= query
->result
.b
;
1855 case PIPE_QUERY_TIME_ELAPSED
:
1856 *result_u64
= (1000000 * query
->result
.u64
) / ctx
->screen
->info
.r600_clock_crystal_freq
;
1858 case PIPE_QUERY_SO_STATISTICS
:
1859 *result_so
= query
->result
.so
;
1867 void r600_context_queries_suspend(struct r600_context
*ctx
)
1869 struct r600_query
*query
;
1871 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_query_list
, list
) {
1872 r600_query_end(ctx
, query
);
1874 assert(ctx
->num_cs_dw_queries_suspend
== 0);
1877 void r600_context_queries_resume(struct r600_context
*ctx
)
1879 struct r600_query
*query
;
1881 assert(ctx
->num_cs_dw_queries_suspend
== 0);
1883 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_query_list
, list
) {
1884 r600_query_begin(ctx
, query
);
1888 static void r600_flush_vgt_streamout(struct r600_context
*ctx
)
1890 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1892 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CONFIG_REG
, 1, 0);
1893 cs
->buf
[cs
->cdw
++] = (R_008490_CP_STRMOUT_CNTL
- R600_CONFIG_REG_OFFSET
) >> 2;
1894 cs
->buf
[cs
->cdw
++] = 0;
1896 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
1897 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH
) | EVENT_INDEX(0);
1899 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_WAIT_REG_MEM
, 5, 0);
1900 cs
->buf
[cs
->cdw
++] = WAIT_REG_MEM_EQUAL
; /* wait until the register is equal to the reference value */
1901 cs
->buf
[cs
->cdw
++] = R_008490_CP_STRMOUT_CNTL
>> 2; /* register */
1902 cs
->buf
[cs
->cdw
++] = 0;
1903 cs
->buf
[cs
->cdw
++] = S_008490_OFFSET_UPDATE_DONE(1); /* reference value */
1904 cs
->buf
[cs
->cdw
++] = S_008490_OFFSET_UPDATE_DONE(1); /* mask */
1905 cs
->buf
[cs
->cdw
++] = 4; /* poll interval */
1908 static void r600_set_streamout_enable(struct r600_context
*ctx
, unsigned buffer_enable_bit
)
1910 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1912 if (buffer_enable_bit
) {
1913 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CONTEXT_REG
, 1, 0);
1914 cs
->buf
[cs
->cdw
++] = (R_028AB0_VGT_STRMOUT_EN
- R600_CONTEXT_REG_OFFSET
) >> 2;
1915 cs
->buf
[cs
->cdw
++] = S_028AB0_STREAMOUT(1);
1917 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CONTEXT_REG
, 1, 0);
1918 cs
->buf
[cs
->cdw
++] = (R_028B20_VGT_STRMOUT_BUFFER_EN
- R600_CONTEXT_REG_OFFSET
) >> 2;
1919 cs
->buf
[cs
->cdw
++] = buffer_enable_bit
;
1921 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CONTEXT_REG
, 1, 0);
1922 cs
->buf
[cs
->cdw
++] = (R_028AB0_VGT_STRMOUT_EN
- R600_CONTEXT_REG_OFFSET
) >> 2;
1923 cs
->buf
[cs
->cdw
++] = S_028AB0_STREAMOUT(0);
1927 void r600_context_streamout_begin(struct r600_context
*ctx
)
1929 struct radeon_winsys_cs
*cs
= ctx
->cs
;
1930 struct r600_so_target
**t
= ctx
->so_targets
;
1931 unsigned *stride_in_dw
= ctx
->vs_so_stride_in_dw
;
1932 unsigned buffer_en
, i
, update_flags
= 0;
1935 buffer_en
= (ctx
->num_so_targets
>= 1 && t
[0] ? 1 : 0) |
1936 (ctx
->num_so_targets
>= 2 && t
[1] ? 2 : 0) |
1937 (ctx
->num_so_targets
>= 3 && t
[2] ? 4 : 0) |
1938 (ctx
->num_so_targets
>= 4 && t
[3] ? 8 : 0);
1940 ctx
->num_cs_dw_streamout_end
=
1941 12 + /* flush_vgt_streamout */
1942 util_bitcount(buffer_en
) * 8 +
1945 r600_need_cs_space(ctx
,
1946 12 + /* flush_vgt_streamout */
1948 util_bitcount(buffer_en
& ctx
->streamout_append_bitmask
) * 8 +
1949 util_bitcount(buffer_en
& ~ctx
->streamout_append_bitmask
) * 6 +
1950 (ctx
->family
> CHIP_R600
&& ctx
->family
< CHIP_RV770
? 2 : 0) +
1951 ctx
->num_cs_dw_streamout_end
, TRUE
);
1953 if (ctx
->chip_class
>= EVERGREEN
) {
1954 evergreen_flush_vgt_streamout(ctx
);
1955 evergreen_set_streamout_enable(ctx
, buffer_en
);
1957 r600_flush_vgt_streamout(ctx
);
1958 r600_set_streamout_enable(ctx
, buffer_en
);
1961 for (i
= 0; i
< ctx
->num_so_targets
; i
++) {
1963 t
[i
]->stride_in_dw
= stride_in_dw
[i
];
1965 va
= r600_resource_va(&ctx
->screen
->screen
,
1966 (void*)t
[i
]->b
.buffer
);
1968 update_flags
|= SURFACE_BASE_UPDATE_STRMOUT(i
);
1970 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CONTEXT_REG
, 3, 0);
1971 cs
->buf
[cs
->cdw
++] = (R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0
+
1972 16*i
- R600_CONTEXT_REG_OFFSET
) >> 2;
1973 cs
->buf
[cs
->cdw
++] = (t
[i
]->b
.buffer_offset
+
1974 t
[i
]->b
.buffer_size
) >> 2; /* BUFFER_SIZE (in DW) */
1975 cs
->buf
[cs
->cdw
++] = stride_in_dw
[i
]; /* VTX_STRIDE (in DW) */
1976 cs
->buf
[cs
->cdw
++] = va
>> 8; /* BUFFER_BASE */
1978 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1979 cs
->buf
[cs
->cdw
++] =
1980 r600_context_bo_reloc(ctx
, r600_resource(t
[i
]->b
.buffer
),
1981 RADEON_USAGE_WRITE
);
1983 if (ctx
->streamout_append_bitmask
& (1 << i
)) {
1984 va
= r600_resource_va(&ctx
->screen
->screen
,
1985 (void*)t
[i
]->filled_size
);
1987 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE
, 4, 0);
1988 cs
->buf
[cs
->cdw
++] = STRMOUT_SELECT_BUFFER(i
) |
1989 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM
); /* control */
1990 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1991 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1992 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* src address lo */
1993 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFFUL
; /* src address hi */
1995 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1996 cs
->buf
[cs
->cdw
++] =
1997 r600_context_bo_reloc(ctx
, t
[i
]->filled_size
,
2000 /* Start from the beginning. */
2001 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE
, 4, 0);
2002 cs
->buf
[cs
->cdw
++] = STRMOUT_SELECT_BUFFER(i
) |
2003 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET
); /* control */
2004 cs
->buf
[cs
->cdw
++] = 0; /* unused */
2005 cs
->buf
[cs
->cdw
++] = 0; /* unused */
2006 cs
->buf
[cs
->cdw
++] = t
[i
]->b
.buffer_offset
>> 2; /* buffer offset in DW */
2007 cs
->buf
[cs
->cdw
++] = 0; /* unused */
2012 if (ctx
->family
> CHIP_R600
&& ctx
->family
< CHIP_RV770
) {
2013 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SURFACE_BASE_UPDATE
, 0, 0);
2014 cs
->buf
[cs
->cdw
++] = update_flags
;
2018 void r600_context_streamout_end(struct r600_context
*ctx
)
2020 struct radeon_winsys_cs
*cs
= ctx
->cs
;
2021 struct r600_so_target
**t
= ctx
->so_targets
;
2022 unsigned i
, flush_flags
= 0;
2025 if (ctx
->chip_class
>= EVERGREEN
) {
2026 evergreen_flush_vgt_streamout(ctx
);
2028 r600_flush_vgt_streamout(ctx
);
2031 for (i
= 0; i
< ctx
->num_so_targets
; i
++) {
2033 va
= r600_resource_va(&ctx
->screen
->screen
,
2034 (void*)t
[i
]->filled_size
);
2035 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE
, 4, 0);
2036 cs
->buf
[cs
->cdw
++] = STRMOUT_SELECT_BUFFER(i
) |
2037 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE
) |
2038 STRMOUT_STORE_BUFFER_FILLED_SIZE
; /* control */
2039 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* dst address lo */
2040 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFFUL
; /* dst address hi */
2041 cs
->buf
[cs
->cdw
++] = 0; /* unused */
2042 cs
->buf
[cs
->cdw
++] = 0; /* unused */
2044 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
2045 cs
->buf
[cs
->cdw
++] =
2046 r600_context_bo_reloc(ctx
, t
[i
]->filled_size
,
2047 RADEON_USAGE_WRITE
);
2049 flush_flags
|= S_0085F0_SO0_DEST_BASE_ENA(1) << i
;
2053 if (ctx
->chip_class
>= EVERGREEN
) {
2054 evergreen_set_streamout_enable(ctx
, 0);
2056 r600_set_streamout_enable(ctx
, 0);
2059 if (ctx
->chip_class
< R700
) {
2060 r600_atom_dirty(ctx
, &ctx
->atom_r6xx_flush_and_inv
);
2062 ctx
->atom_surface_sync
.flush_flags
|= flush_flags
;
2063 r600_atom_dirty(ctx
, &ctx
->atom_surface_sync
.atom
);
2066 ctx
->num_cs_dw_streamout_end
= 0;
2069 for (i
= 0; i
< ctx
->num_so_targets
; i
++) {
2073 uint32_t *ptr
= ctx
->ws
->buffer_map(t
[i
]->filled_size
->buf
, ctx
->cs
, RADEON_USAGE_READ
);
2074 printf("FILLED_SIZE%i: %u\n", i
, *ptr
);
2075 ctx
->ws
->buffer_unmap(t
[i
]->filled_size
->buf
);
2080 void r600_context_draw_opaque_count(struct r600_context
*ctx
, struct r600_so_target
*t
)
2082 struct radeon_winsys_cs
*cs
= ctx
->cs
;
2083 uint64_t va
= r600_resource_va(&ctx
->screen
->screen
,
2084 (void*)t
->filled_size
);
2086 r600_need_cs_space(ctx
, 14 + 21, TRUE
);
2088 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CONTEXT_REG
, 1, 0);
2089 cs
->buf
[cs
->cdw
++] = (R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET
- R600_CONTEXT_REG_OFFSET
) >> 2;
2090 cs
->buf
[cs
->cdw
++] = 0;
2092 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CONTEXT_REG
, 1, 0);
2093 cs
->buf
[cs
->cdw
++] = (R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
- R600_CONTEXT_REG_OFFSET
) >> 2;
2094 cs
->buf
[cs
->cdw
++] = t
->stride_in_dw
;
2096 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_COPY_DW
, 4, 0);
2097 cs
->buf
[cs
->cdw
++] = COPY_DW_SRC_IS_MEM
| COPY_DW_DST_IS_REG
;
2098 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* src address lo */
2099 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFFUL
; /* src address hi */
2100 cs
->buf
[cs
->cdw
++] = R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2; /* dst register */
2101 cs
->buf
[cs
->cdw
++] = 0; /* unused */
2103 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
2104 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, t
->filled_size
, RADEON_USAGE_READ
);