2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
28 #include "si_shader.h"
29 #include "radeon/r600_cs.h"
32 #include "util/u_format.h"
33 #include "util/u_index_modify.h"
34 #include "util/u_memory.h"
35 #include "util/u_prim.h"
36 #include "util/u_upload_mgr.h"
42 static void si_shader_es(struct si_shader
*shader
)
44 struct si_pm4_state
*pm4
;
45 unsigned num_sgprs
, num_user_sgprs
;
46 unsigned vgpr_comp_cnt
;
49 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
54 va
= shader
->bo
->gpu_address
;
55 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
57 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 0;
59 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
60 num_sgprs
= shader
->num_sgprs
;
61 /* One SGPR after user SGPRs is pre-loaded with es2gs_offset */
62 if ((num_user_sgprs
+ 1) > num_sgprs
) {
63 /* Last 2 reserved SGPRs are used for VCC */
64 num_sgprs
= num_user_sgprs
+ 1 + 2;
66 assert(num_sgprs
<= 104);
68 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
69 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, va
>> 40);
70 si_pm4_set_reg(pm4
, R_00B328_SPI_SHADER_PGM_RSRC1_ES
,
71 S_00B328_VGPRS((shader
->num_vgprs
- 1) / 4) |
72 S_00B328_SGPRS((num_sgprs
- 1) / 8) |
73 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt
));
74 si_pm4_set_reg(pm4
, R_00B32C_SPI_SHADER_PGM_RSRC2_ES
,
75 S_00B32C_USER_SGPR(num_user_sgprs
));
78 static void si_shader_gs(struct si_shader
*shader
)
80 unsigned gs_vert_itemsize
= shader
->selector
->info
.num_outputs
* (16 >> 2);
81 unsigned gs_max_vert_out
= shader
->selector
->gs_max_out_vertices
;
82 unsigned gsvs_itemsize
= gs_vert_itemsize
* gs_max_vert_out
;
84 struct si_pm4_state
*pm4
;
85 unsigned num_sgprs
, num_user_sgprs
;
88 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
89 assert(gsvs_itemsize
< (1 << 15));
91 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
96 if (gs_max_vert_out
<= 128) {
97 cut_mode
= V_028A40_GS_CUT_128
;
98 } else if (gs_max_vert_out
<= 256) {
99 cut_mode
= V_028A40_GS_CUT_256
;
100 } else if (gs_max_vert_out
<= 512) {
101 cut_mode
= V_028A40_GS_CUT_512
;
103 assert(gs_max_vert_out
<= 1024);
104 cut_mode
= V_028A40_GS_CUT_1024
;
107 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
,
108 S_028A40_MODE(V_028A40_GS_SCENARIO_G
) |
109 S_028A40_CUT_MODE(cut_mode
)|
110 S_028A40_ES_WRITE_OPTIMIZE(1) |
111 S_028A40_GS_WRITE_OPTIMIZE(1));
113 si_pm4_set_reg(pm4
, R_028A60_VGT_GSVS_RING_OFFSET_1
, gsvs_itemsize
);
114 si_pm4_set_reg(pm4
, R_028A64_VGT_GSVS_RING_OFFSET_2
, gsvs_itemsize
);
115 si_pm4_set_reg(pm4
, R_028A68_VGT_GSVS_RING_OFFSET_3
, gsvs_itemsize
);
117 si_pm4_set_reg(pm4
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
118 util_bitcount64(shader
->selector
->gs_used_inputs
) * (16 >> 2));
119 si_pm4_set_reg(pm4
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, gsvs_itemsize
);
121 si_pm4_set_reg(pm4
, R_028B38_VGT_GS_MAX_VERT_OUT
, gs_max_vert_out
);
123 si_pm4_set_reg(pm4
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, gs_vert_itemsize
);
125 va
= shader
->bo
->gpu_address
;
126 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
127 si_pm4_set_reg(pm4
, R_00B220_SPI_SHADER_PGM_LO_GS
, va
>> 8);
128 si_pm4_set_reg(pm4
, R_00B224_SPI_SHADER_PGM_HI_GS
, va
>> 40);
130 num_user_sgprs
= SI_GS_NUM_USER_SGPR
;
131 num_sgprs
= shader
->num_sgprs
;
132 /* Two SGPRs after user SGPRs are pre-loaded with gs2vs_offset, gs_wave_id */
133 if ((num_user_sgprs
+ 2) > num_sgprs
) {
134 /* Last 2 reserved SGPRs are used for VCC */
135 num_sgprs
= num_user_sgprs
+ 2 + 2;
137 assert(num_sgprs
<= 104);
139 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
140 S_00B228_VGPRS((shader
->num_vgprs
- 1) / 4) |
141 S_00B228_SGPRS((num_sgprs
- 1) / 8));
142 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
143 S_00B22C_USER_SGPR(num_user_sgprs
));
146 static void si_shader_vs(struct si_shader
*shader
)
148 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
149 struct si_pm4_state
*pm4
;
150 unsigned num_sgprs
, num_user_sgprs
;
151 unsigned nparams
, i
, vgpr_comp_cnt
;
153 unsigned window_space
=
154 shader
->selector
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
];
156 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
161 va
= shader
->bo
->gpu_address
;
162 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
164 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 0;
166 if (shader
->is_gs_copy_shader
)
167 num_user_sgprs
= SI_GSCOPY_NUM_USER_SGPR
;
169 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
171 num_sgprs
= shader
->num_sgprs
;
172 if (num_user_sgprs
> num_sgprs
) {
173 /* Last 2 reserved SGPRs are used for VCC */
174 num_sgprs
= num_user_sgprs
+ 2;
176 assert(num_sgprs
<= 104);
178 /* Certain attributes (position, psize, etc.) don't count as params.
179 * VS is required to export at least one param and r600_shader_from_tgsi()
180 * takes care of adding a dummy export.
182 for (nparams
= 0, i
= 0 ; i
< info
->num_outputs
; i
++) {
183 switch (info
->output_semantic_name
[i
]) {
184 case TGSI_SEMANTIC_CLIPVERTEX
:
185 case TGSI_SEMANTIC_POSITION
:
186 case TGSI_SEMANTIC_PSIZE
:
195 si_pm4_set_reg(pm4
, R_0286C4_SPI_VS_OUT_CONFIG
,
196 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
198 si_pm4_set_reg(pm4
, R_02870C_SPI_SHADER_POS_FORMAT
,
199 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
200 S_02870C_POS1_EXPORT_FORMAT(shader
->nr_pos_exports
> 1 ?
201 V_02870C_SPI_SHADER_4COMP
:
202 V_02870C_SPI_SHADER_NONE
) |
203 S_02870C_POS2_EXPORT_FORMAT(shader
->nr_pos_exports
> 2 ?
204 V_02870C_SPI_SHADER_4COMP
:
205 V_02870C_SPI_SHADER_NONE
) |
206 S_02870C_POS3_EXPORT_FORMAT(shader
->nr_pos_exports
> 3 ?
207 V_02870C_SPI_SHADER_4COMP
:
208 V_02870C_SPI_SHADER_NONE
));
210 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
211 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, va
>> 40);
212 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
213 S_00B128_VGPRS((shader
->num_vgprs
- 1) / 4) |
214 S_00B128_SGPRS((num_sgprs
- 1) / 8) |
215 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
));
216 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
217 S_00B12C_USER_SGPR(num_user_sgprs
) |
218 S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
219 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
220 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
221 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
222 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
));
224 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
225 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
227 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
228 S_028818_VTX_W0_FMT(1) |
229 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
230 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
231 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
234 static void si_shader_ps(struct si_shader
*shader
)
236 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
237 struct si_pm4_state
*pm4
;
238 unsigned i
, spi_ps_in_control
;
239 unsigned num_sgprs
, num_user_sgprs
;
240 unsigned spi_baryc_cntl
= 0, spi_ps_input_ena
;
243 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
248 for (i
= 0; i
< info
->num_inputs
; i
++) {
249 switch (info
->input_semantic_name
[i
]) {
250 case TGSI_SEMANTIC_POSITION
:
251 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
253 * 0 -> Position = pixel center (default)
254 * 1 -> Position = pixel centroid
255 * 2 -> Position = at sample position
257 switch (info
->input_interpolate_loc
[i
]) {
258 case TGSI_INTERPOLATE_LOC_CENTROID
:
259 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(1);
261 case TGSI_INTERPOLATE_LOC_SAMPLE
:
262 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
266 if (info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] ==
267 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)
268 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_ULC(1);
273 spi_ps_in_control
= S_0286D8_NUM_INTERP(shader
->nparam
) |
274 S_0286D8_BC_OPTIMIZE_DISABLE(1);
276 si_pm4_set_reg(pm4
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
277 spi_ps_input_ena
= shader
->spi_ps_input_ena
;
278 /* we need to enable at least one of them, otherwise we hang the GPU */
279 assert(G_0286CC_PERSP_SAMPLE_ENA(spi_ps_input_ena
) ||
280 G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena
) ||
281 G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena
) ||
282 G_0286CC_PERSP_PULL_MODEL_ENA(spi_ps_input_ena
) ||
283 G_0286CC_LINEAR_SAMPLE_ENA(spi_ps_input_ena
) ||
284 G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena
) ||
285 G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena
) ||
286 G_0286CC_LINE_STIPPLE_TEX_ENA(spi_ps_input_ena
));
288 si_pm4_set_reg(pm4
, R_0286CC_SPI_PS_INPUT_ENA
, spi_ps_input_ena
);
289 si_pm4_set_reg(pm4
, R_0286D0_SPI_PS_INPUT_ADDR
, spi_ps_input_ena
);
290 si_pm4_set_reg(pm4
, R_0286D8_SPI_PS_IN_CONTROL
, spi_ps_in_control
);
292 si_pm4_set_reg(pm4
, R_028710_SPI_SHADER_Z_FORMAT
, shader
->spi_shader_z_format
);
293 si_pm4_set_reg(pm4
, R_028714_SPI_SHADER_COL_FORMAT
,
294 shader
->spi_shader_col_format
);
295 si_pm4_set_reg(pm4
, R_02823C_CB_SHADER_MASK
, shader
->cb_shader_mask
);
297 va
= shader
->bo
->gpu_address
;
298 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
299 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
300 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, va
>> 40);
302 num_user_sgprs
= SI_PS_NUM_USER_SGPR
;
303 num_sgprs
= shader
->num_sgprs
;
304 /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
305 if ((num_user_sgprs
+ 1) > num_sgprs
) {
306 /* Last 2 reserved SGPRs are used for VCC */
307 num_sgprs
= num_user_sgprs
+ 1 + 2;
309 assert(num_sgprs
<= 104);
311 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
312 S_00B028_VGPRS((shader
->num_vgprs
- 1) / 4) |
313 S_00B028_SGPRS((num_sgprs
- 1) / 8));
314 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
315 S_00B02C_EXTRA_LDS_SIZE(shader
->lds_size
) |
316 S_00B02C_USER_SGPR(num_user_sgprs
));
319 void si_shader_init_pm4_state(struct si_shader
*shader
)
321 switch (shader
->selector
->type
) {
322 case PIPE_SHADER_VERTEX
:
323 if (shader
->key
.vs
.as_es
)
324 si_shader_es(shader
);
326 si_shader_vs(shader
);
328 case PIPE_SHADER_GEOMETRY
:
329 si_shader_gs(shader
);
330 si_shader_vs(shader
->gs_copy_shader
);
332 case PIPE_SHADER_FRAGMENT
:
333 si_shader_ps(shader
);
344 static unsigned si_conv_pipe_prim(unsigned pprim
)
346 static const unsigned prim_conv
[] = {
347 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
348 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
349 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
350 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
351 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
352 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
353 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
354 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
355 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
356 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
357 [PIPE_PRIM_LINES_ADJACENCY
] = V_008958_DI_PT_LINELIST_ADJ
,
358 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_008958_DI_PT_LINESTRIP_ADJ
,
359 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_008958_DI_PT_TRILIST_ADJ
,
360 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_008958_DI_PT_TRISTRIP_ADJ
,
361 [R600_PRIM_RECTANGLE_LIST
] = V_008958_DI_PT_RECTLIST
363 unsigned result
= prim_conv
[pprim
];
365 R600_ERR("unsupported primitive type %d\n", pprim
);
370 static unsigned si_conv_prim_to_gs_out(unsigned mode
)
372 static const int prim_conv
[] = {
373 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
374 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
375 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
376 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
377 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
378 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
379 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
380 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
381 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
382 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
383 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
384 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
385 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
386 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
387 [R600_PRIM_RECTANGLE_LIST
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
389 assert(mode
< Elements(prim_conv
));
391 return prim_conv
[mode
];
394 static unsigned si_get_ia_multi_vgt_param(struct si_context
*sctx
,
395 const struct pipe_draw_info
*info
)
397 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
398 unsigned prim
= info
->mode
;
399 unsigned primgroup_size
= 128; /* recommended without a GS */
401 /* SWITCH_ON_EOP(0) is always preferable. */
402 bool wd_switch_on_eop
= false;
403 bool ia_switch_on_eop
= false;
404 bool partial_vs_wave
= false;
407 primgroup_size
= 64; /* recommended with a GS */
409 /* This is a hardware requirement. */
410 if ((rs
&& rs
->line_stipple_enable
) ||
411 (sctx
->b
.screen
->debug_flags
& DBG_SWITCH_ON_EOP
)) {
412 ia_switch_on_eop
= true;
413 wd_switch_on_eop
= true;
416 if (sctx
->b
.streamout
.streamout_enabled
||
417 sctx
->b
.streamout
.prims_gen_query_enabled
)
418 partial_vs_wave
= true;
420 if (sctx
->b
.chip_class
>= CIK
) {
421 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
422 * 4 shader engines. Set 1 to pass the assertion below.
423 * The other cases are hardware requirements. */
424 if (sctx
->b
.screen
->info
.max_se
< 4 ||
425 prim
== PIPE_PRIM_POLYGON
||
426 prim
== PIPE_PRIM_LINE_LOOP
||
427 prim
== PIPE_PRIM_TRIANGLE_FAN
||
428 prim
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
||
429 info
->primitive_restart
)
430 wd_switch_on_eop
= true;
432 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
433 * We don't know that for indirect drawing, so treat it as
434 * always problematic. */
435 if (sctx
->b
.family
== CHIP_HAWAII
&&
436 (info
->indirect
|| info
->instance_count
> 1))
437 wd_switch_on_eop
= true;
439 /* If the WD switch is false, the IA switch must be false too. */
440 assert(wd_switch_on_eop
|| !ia_switch_on_eop
);
443 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
444 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
445 S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1) |
446 S_028AA8_WD_SWITCH_ON_EOP(sctx
->b
.chip_class
>= CIK
? wd_switch_on_eop
: 0);
449 static bool si_update_draw_info_state(struct si_context
*sctx
,
450 const struct pipe_draw_info
*info
,
451 const struct pipe_index_buffer
*ib
)
453 struct si_pm4_state
*pm4
= CALLOC_STRUCT(si_pm4_state
);
454 struct si_shader
*vs
= si_get_vs_state(sctx
);
455 unsigned window_space
=
456 vs
->selector
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
];
457 unsigned prim
= si_conv_pipe_prim(info
->mode
);
458 unsigned gs_out_prim
=
459 si_conv_prim_to_gs_out(sctx
->gs_shader
?
460 sctx
->gs_shader
->gs_output_prim
:
462 unsigned ls_mask
= 0;
463 unsigned ia_multi_vgt_param
= si_get_ia_multi_vgt_param(sctx
, info
);
473 if (sctx
->b
.chip_class
>= CIK
) {
474 si_pm4_set_reg(pm4
, R_028B74_VGT_DISPATCH_DRAW_INDEX
,
475 ib
->index_size
== 4 ? 0xFC000000 : 0xFC00);
477 si_pm4_cmd_begin(pm4
, PKT3_DRAW_PREAMBLE
);
478 si_pm4_cmd_add(pm4
, prim
); /* VGT_PRIMITIVE_TYPE */
479 si_pm4_cmd_add(pm4
, ia_multi_vgt_param
); /* IA_MULTI_VGT_PARAM */
480 si_pm4_cmd_add(pm4
, 0); /* VGT_LS_HS_CONFIG */
481 si_pm4_cmd_end(pm4
, false);
483 si_pm4_set_reg(pm4
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
484 si_pm4_set_reg(pm4
, R_028AA8_IA_MULTI_VGT_PARAM
, ia_multi_vgt_param
);
487 si_pm4_set_reg(pm4
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out_prim
);
488 si_pm4_set_reg(pm4
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
, info
->restart_index
);
489 si_pm4_set_reg(pm4
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, info
->primitive_restart
);
491 if (prim
== V_008958_DI_PT_LINELIST
)
493 else if (prim
== V_008958_DI_PT_LINESTRIP
)
495 si_pm4_set_reg(pm4
, R_028A0C_PA_SC_LINE_STIPPLE
,
496 S_028A0C_AUTO_RESET_CNTL(ls_mask
) |
497 sctx
->pa_sc_line_stipple
);
499 if (info
->mode
== PIPE_PRIM_QUADS
|| info
->mode
== PIPE_PRIM_QUAD_STRIP
|| info
->mode
== PIPE_PRIM_POLYGON
) {
500 si_pm4_set_reg(pm4
, R_028814_PA_SU_SC_MODE_CNTL
,
501 S_028814_PROVOKING_VTX_LAST(1) | sctx
->pa_su_sc_mode_cntl
);
503 si_pm4_set_reg(pm4
, R_028814_PA_SU_SC_MODE_CNTL
, sctx
->pa_su_sc_mode_cntl
);
505 si_pm4_set_reg(pm4
, R_02881C_PA_CL_VS_OUT_CNTL
,
506 S_02881C_USE_VTX_POINT_SIZE(vs
->vs_out_point_size
) |
507 S_02881C_USE_VTX_EDGE_FLAG(vs
->vs_out_edgeflag
) |
508 S_02881C_USE_VTX_RENDER_TARGET_INDX(vs
->vs_out_layer
) |
509 S_02881C_VS_OUT_CCDIST0_VEC_ENA((vs
->clip_dist_write
& 0x0F) != 0) |
510 S_02881C_VS_OUT_CCDIST1_VEC_ENA((vs
->clip_dist_write
& 0xF0) != 0) |
511 S_02881C_VS_OUT_MISC_VEC_ENA(vs
->vs_out_misc_write
) |
512 (sctx
->queued
.named
.rasterizer
->clip_plane_enable
&
513 vs
->clip_dist_write
));
514 si_pm4_set_reg(pm4
, R_028810_PA_CL_CLIP_CNTL
,
515 sctx
->queued
.named
.rasterizer
->pa_cl_clip_cntl
|
516 (vs
->clip_dist_write
? 0 :
517 sctx
->queued
.named
.rasterizer
->clip_plane_enable
& 0x3F) |
518 S_028810_CLIP_DISABLE(window_space
));
520 si_pm4_set_state(sctx
, draw_info
, pm4
);
524 static void si_update_spi_map(struct si_context
*sctx
)
526 struct si_shader
*ps
= sctx
->ps_shader
->current
;
527 struct si_shader
*vs
= si_get_vs_state(sctx
);
528 struct tgsi_shader_info
*psinfo
= &ps
->selector
->info
;
529 struct tgsi_shader_info
*vsinfo
= &vs
->selector
->info
;
530 struct si_pm4_state
*pm4
= CALLOC_STRUCT(si_pm4_state
);
533 for (i
= 0; i
< psinfo
->num_inputs
; i
++) {
534 unsigned name
= psinfo
->input_semantic_name
[i
];
535 unsigned index
= psinfo
->input_semantic_index
[i
];
536 unsigned interpolate
= psinfo
->input_interpolate
[i
];
537 unsigned param_offset
= ps
->ps_input_param_offset
[i
];
539 if (name
== TGSI_SEMANTIC_POSITION
)
540 /* Read from preloaded VGPRs, not parameters */
546 if (interpolate
== TGSI_INTERPOLATE_CONSTANT
||
547 (interpolate
== TGSI_INTERPOLATE_COLOR
&&
548 ps
->key
.ps
.flatshade
)) {
549 tmp
|= S_028644_FLAT_SHADE(1);
552 if (name
== TGSI_SEMANTIC_GENERIC
&&
553 sctx
->sprite_coord_enable
& (1 << index
)) {
554 tmp
|= S_028644_PT_SPRITE_TEX(1);
557 for (j
= 0; j
< vsinfo
->num_outputs
; j
++) {
558 if (name
== vsinfo
->output_semantic_name
[j
] &&
559 index
== vsinfo
->output_semantic_index
[j
]) {
560 tmp
|= S_028644_OFFSET(vs
->vs_output_param_offset
[j
]);
565 if (j
== vsinfo
->num_outputs
) {
566 /* No corresponding output found, load defaults into input */
567 tmp
|= S_028644_OFFSET(0x20);
571 R_028644_SPI_PS_INPUT_CNTL_0
+ param_offset
* 4,
574 if (name
== TGSI_SEMANTIC_COLOR
&&
575 ps
->key
.ps
.color_two_side
) {
576 name
= TGSI_SEMANTIC_BCOLOR
;
582 si_pm4_set_state(sctx
, spi
, pm4
);
585 /* Initialize state related to ESGS / GSVS ring buffers */
586 static void si_init_gs_rings(struct si_context
*sctx
)
588 unsigned esgs_ring_size
= 128 * 1024;
589 unsigned gsvs_ring_size
= 64 * 1024 * 1024;
591 assert(!sctx
->gs_rings
);
592 sctx
->gs_rings
= CALLOC_STRUCT(si_pm4_state
);
594 sctx
->esgs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
595 PIPE_USAGE_DEFAULT
, esgs_ring_size
);
597 sctx
->gsvs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
598 PIPE_USAGE_DEFAULT
, gsvs_ring_size
);
600 if (sctx
->b
.chip_class
>= CIK
) {
601 si_pm4_set_reg(sctx
->gs_rings
, R_030900_VGT_ESGS_RING_SIZE
,
602 esgs_ring_size
/ 256);
603 si_pm4_set_reg(sctx
->gs_rings
, R_030904_VGT_GSVS_RING_SIZE
,
604 gsvs_ring_size
/ 256);
606 si_pm4_set_reg(sctx
->gs_rings
, R_0088C8_VGT_ESGS_RING_SIZE
,
607 esgs_ring_size
/ 256);
608 si_pm4_set_reg(sctx
->gs_rings
, R_0088CC_VGT_GSVS_RING_SIZE
,
609 gsvs_ring_size
/ 256);
612 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_ESGS
,
613 sctx
->esgs_ring
, 0, esgs_ring_size
,
615 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_ESGS
,
616 sctx
->esgs_ring
, 0, esgs_ring_size
,
618 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_GSVS
,
619 sctx
->gsvs_ring
, 0, gsvs_ring_size
,
623 static void si_update_derived_state(struct si_context
*sctx
)
625 struct pipe_context
* ctx
= (struct pipe_context
*)sctx
;
627 if (!sctx
->blitter
->running
) {
628 /* Flush depth textures which need to be flushed. */
629 for (int i
= 0; i
< SI_NUM_SHADERS
; i
++) {
630 if (sctx
->samplers
[i
].depth_texture_mask
) {
631 si_flush_depth_textures(sctx
, &sctx
->samplers
[i
]);
633 if (sctx
->samplers
[i
].compressed_colortex_mask
) {
634 si_decompress_color_textures(sctx
, &sctx
->samplers
[i
]);
639 if (sctx
->gs_shader
) {
640 si_shader_select(ctx
, sctx
->gs_shader
);
641 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
->current
->pm4
);
642 si_pm4_bind_state(sctx
, vs
, sctx
->gs_shader
->current
->gs_copy_shader
->pm4
);
644 sctx
->b
.streamout
.stride_in_dw
= sctx
->gs_shader
->so
.stride
;
646 si_shader_select(ctx
, sctx
->vs_shader
);
647 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
->current
->pm4
);
650 si_init_gs_rings(sctx
);
651 if (sctx
->emitted
.named
.gs_rings
!= sctx
->gs_rings
)
652 sctx
->b
.flags
|= R600_CONTEXT_VGT_FLUSH
;
653 si_pm4_bind_state(sctx
, gs_rings
, sctx
->gs_rings
);
655 si_set_ring_buffer(ctx
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS
,
657 sctx
->gs_shader
->gs_max_out_vertices
*
658 sctx
->gs_shader
->info
.num_outputs
* 16,
659 64, true, true, 4, 16);
662 sctx
->gs_on
= CALLOC_STRUCT(si_pm4_state
);
664 si_pm4_set_reg(sctx
->gs_on
, R_028B54_VGT_SHADER_STAGES_EN
,
665 S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
667 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
));
669 si_pm4_bind_state(sctx
, gs_onoff
, sctx
->gs_on
);
671 si_shader_select(ctx
, sctx
->vs_shader
);
672 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
->current
->pm4
);
674 sctx
->b
.streamout
.stride_in_dw
= sctx
->vs_shader
->so
.stride
;
677 sctx
->gs_off
= CALLOC_STRUCT(si_pm4_state
);
679 si_pm4_set_reg(sctx
->gs_off
, R_028A40_VGT_GS_MODE
, 0);
680 si_pm4_set_reg(sctx
->gs_off
, R_028B54_VGT_SHADER_STAGES_EN
, 0);
682 si_pm4_bind_state(sctx
, gs_onoff
, sctx
->gs_off
);
683 si_pm4_bind_state(sctx
, gs_rings
, NULL
);
684 si_pm4_bind_state(sctx
, gs
, NULL
);
685 si_pm4_bind_state(sctx
, es
, NULL
);
688 si_shader_select(ctx
, sctx
->ps_shader
);
690 if (!sctx
->ps_shader
->current
) {
691 struct si_shader_selector
*sel
;
693 /* use a dummy shader if compiling the shader (variant) failed */
694 si_make_dummy_ps(sctx
);
695 sel
= sctx
->dummy_pixel_shader
;
696 si_shader_select(ctx
, sel
);
697 sctx
->ps_shader
->current
= sel
->current
;
700 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
->current
->pm4
);
702 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
))
703 si_update_spi_map(sctx
);
705 if (sctx
->ps_db_shader_control
!= sctx
->ps_shader
->current
->db_shader_control
) {
706 sctx
->ps_db_shader_control
= sctx
->ps_shader
->current
->db_shader_control
;
707 sctx
->db_render_state
.dirty
= true;
711 static void si_state_draw(struct si_context
*sctx
,
712 const struct pipe_draw_info
*info
,
713 const struct pipe_index_buffer
*ib
)
715 unsigned sh_base_reg
= (sctx
->gs_shader
? R_00B330_SPI_SHADER_USER_DATA_ES_0
:
716 R_00B130_SPI_SHADER_USER_DATA_VS_0
);
717 struct si_pm4_state
*pm4
= CALLOC_STRUCT(si_pm4_state
);
722 if (info
->count_from_stream_output
) {
723 struct r600_so_target
*t
=
724 (struct r600_so_target
*)info
->count_from_stream_output
;
725 uint64_t va
= t
->buf_filled_size
->gpu_address
+
726 t
->buf_filled_size_offset
;
728 si_pm4_set_reg(pm4
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
,
731 si_pm4_cmd_begin(pm4
, PKT3_COPY_DATA
);
733 COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
734 COPY_DATA_DST_SEL(COPY_DATA_REG
) |
735 COPY_DATA_WR_CONFIRM
);
736 si_pm4_cmd_add(pm4
, va
); /* src address lo */
737 si_pm4_cmd_add(pm4
, va
>> 32UL); /* src address hi */
738 si_pm4_cmd_add(pm4
, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2);
739 si_pm4_cmd_add(pm4
, 0); /* unused */
740 si_pm4_add_bo(pm4
, t
->buf_filled_size
, RADEON_USAGE_READ
,
742 si_pm4_cmd_end(pm4
, true);
746 si_pm4_cmd_begin(pm4
, PKT3_INDEX_TYPE
);
747 if (ib
->index_size
== 4) {
748 si_pm4_cmd_add(pm4
, V_028A7C_VGT_INDEX_32
| (SI_BIG_ENDIAN
?
749 V_028A7C_VGT_DMA_SWAP_32_BIT
: 0));
751 si_pm4_cmd_add(pm4
, V_028A7C_VGT_INDEX_16
| (SI_BIG_ENDIAN
?
752 V_028A7C_VGT_DMA_SWAP_16_BIT
: 0));
754 si_pm4_cmd_end(pm4
, sctx
->b
.predicate_drawing
);
756 if (!info
->indirect
) {
757 si_pm4_cmd_begin(pm4
, PKT3_NUM_INSTANCES
);
758 si_pm4_cmd_add(pm4
, info
->instance_count
);
759 si_pm4_cmd_end(pm4
, sctx
->b
.predicate_drawing
);
761 si_pm4_set_reg(pm4
, sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4,
762 info
->indexed
? info
->index_bias
: info
->start
);
763 si_pm4_set_reg(pm4
, sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4,
764 info
->start_instance
);
766 si_pm4_add_bo(pm4
, (struct r600_resource
*)info
->indirect
,
767 RADEON_USAGE_READ
, RADEON_PRIO_MIN
);
771 uint32_t max_size
= (ib
->buffer
->width0
- ib
->offset
) /
773 uint64_t va
= r600_resource(ib
->buffer
)->gpu_address
+ ib
->offset
;
775 si_pm4_add_bo(pm4
, (struct r600_resource
*)ib
->buffer
, RADEON_USAGE_READ
,
778 if (info
->indirect
) {
779 uint64_t indirect_va
= r600_resource(info
->indirect
)->gpu_address
;
780 si_cmd_draw_index_indirect(pm4
, indirect_va
, va
, max_size
,
781 info
->indirect_offset
,
782 sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4,
783 sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4,
784 sctx
->b
.predicate_drawing
);
786 va
+= info
->start
* ib
->index_size
;
787 si_cmd_draw_index_2(pm4
, max_size
, va
, info
->count
,
788 V_0287F0_DI_SRC_SEL_DMA
,
789 sctx
->b
.predicate_drawing
);
792 if (info
->indirect
) {
793 uint64_t indirect_va
= r600_resource(info
->indirect
)->gpu_address
;
794 si_cmd_draw_indirect(pm4
, indirect_va
, info
->indirect_offset
,
795 sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4,
796 sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4,
797 sctx
->b
.predicate_drawing
);
799 si_cmd_draw_index_auto(pm4
, info
->count
,
800 V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
801 S_0287F0_USE_OPAQUE(!!info
->count_from_stream_output
),
802 sctx
->b
.predicate_drawing
);
806 si_pm4_set_state(sctx
, draw
, pm4
);
809 void si_emit_cache_flush(struct r600_common_context
*sctx
, struct r600_atom
*atom
)
811 struct radeon_winsys_cs
*cs
= sctx
->rings
.gfx
.cs
;
812 uint32_t cp_coher_cntl
= 0;
814 PKT3_SHADER_TYPE_S(!!(sctx
->flags
& R600_CONTEXT_FLAG_COMPUTE
));
816 /* XXX SI flushes both ICACHE and KCACHE if either flag is set.
817 * XXX CIK shouldn't have this issue. Test CIK before separating the flags
818 * XXX to ensure there is no regression. Also find out if there is another
819 * XXX way to flush either ICACHE or KCACHE but not both for SI. */
820 if (sctx
->flags
& (R600_CONTEXT_INV_SHADER_CACHE
|
821 R600_CONTEXT_INV_CONST_CACHE
)) {
822 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1) |
823 S_0085F0_SH_KCACHE_ACTION_ENA(1);
825 if (sctx
->flags
& (R600_CONTEXT_INV_TEX_CACHE
|
826 R600_CONTEXT_STREAMOUT_FLUSH
)) {
827 cp_coher_cntl
|= S_0085F0_TC_ACTION_ENA(1) |
828 S_0085F0_TCL1_ACTION_ENA(1);
830 if (sctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_CB
) {
831 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
832 S_0085F0_CB0_DEST_BASE_ENA(1) |
833 S_0085F0_CB1_DEST_BASE_ENA(1) |
834 S_0085F0_CB2_DEST_BASE_ENA(1) |
835 S_0085F0_CB3_DEST_BASE_ENA(1) |
836 S_0085F0_CB4_DEST_BASE_ENA(1) |
837 S_0085F0_CB5_DEST_BASE_ENA(1) |
838 S_0085F0_CB6_DEST_BASE_ENA(1) |
839 S_0085F0_CB7_DEST_BASE_ENA(1);
841 if (sctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_DB
) {
842 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
843 S_0085F0_DB_DEST_BASE_ENA(1);
847 if (sctx
->chip_class
>= CIK
) {
848 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, 0) | compute
);
849 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
850 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
851 radeon_emit(cs
, 0xff); /* CP_COHER_SIZE_HI */
852 radeon_emit(cs
, 0); /* CP_COHER_BASE */
853 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
854 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
856 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, 0) | compute
);
857 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
858 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
859 radeon_emit(cs
, 0); /* CP_COHER_BASE */
860 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
864 if (sctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_CB_META
) {
865 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
866 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
868 if (sctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_DB_META
) {
869 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
870 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
872 if (sctx
->flags
& R600_CONTEXT_FLUSH_WITH_INV_L2
) {
873 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
874 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH
) | EVENT_INDEX(7) |
878 if (sctx
->flags
& (R600_CONTEXT_WAIT_3D_IDLE
|
879 R600_CONTEXT_PS_PARTIAL_FLUSH
)) {
880 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
881 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
882 } else if (sctx
->flags
& R600_CONTEXT_STREAMOUT_FLUSH
) {
883 /* Needed if streamout buffers are going to be used as a source. */
884 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
885 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
888 if (sctx
->flags
& R600_CONTEXT_CS_PARTIAL_FLUSH
) {
889 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
890 radeon_emit(cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
| EVENT_INDEX(4)));
893 if (sctx
->flags
& R600_CONTEXT_VGT_FLUSH
) {
894 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
895 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
897 if (sctx
->flags
& R600_CONTEXT_VGT_STREAMOUT_SYNC
) {
898 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
899 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC
) | EVENT_INDEX(0));
905 const struct r600_atom si_atom_cache_flush
= { si_emit_cache_flush
, 21 }; /* number of CS dwords */
907 static void si_get_draw_start_count(struct si_context
*sctx
,
908 const struct pipe_draw_info
*info
,
909 unsigned *start
, unsigned *count
)
911 if (info
->indirect
) {
912 struct r600_resource
*indirect
=
913 (struct r600_resource
*)info
->indirect
;
914 int *data
= r600_buffer_map_sync_with_rings(&sctx
->b
,
915 indirect
, PIPE_TRANSFER_READ
);
916 data
+= info
->indirect_offset
/sizeof(int);
920 *start
= info
->start
;
921 *count
= info
->count
;
925 void si_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
927 struct si_context
*sctx
= (struct si_context
*)ctx
;
928 struct pipe_index_buffer ib
= {};
931 if (!info
->count
&& !info
->indirect
&&
932 (info
->indexed
|| !info
->count_from_stream_output
))
935 if (!sctx
->ps_shader
|| !sctx
->vs_shader
)
938 si_update_derived_state(sctx
);
940 if (sctx
->vertex_buffers_dirty
) {
941 si_update_vertex_buffers(sctx
);
942 sctx
->vertex_buffers_dirty
= false;
946 /* Initialize the index buffer struct. */
947 pipe_resource_reference(&ib
.buffer
, sctx
->index_buffer
.buffer
);
948 ib
.user_buffer
= sctx
->index_buffer
.user_buffer
;
949 ib
.index_size
= sctx
->index_buffer
.index_size
;
950 ib
.offset
= sctx
->index_buffer
.offset
;
952 /* Translate or upload, if needed. */
953 if (ib
.index_size
== 1) {
954 struct pipe_resource
*out_buffer
= NULL
;
955 unsigned out_offset
, start
, count
, start_offset
;
958 si_get_draw_start_count(sctx
, info
, &start
, &count
);
959 start_offset
= start
* ib
.index_size
;
961 u_upload_alloc(sctx
->b
.uploader
, start_offset
, count
* 2,
962 &out_offset
, &out_buffer
, &ptr
);
964 util_shorten_ubyte_elts_to_userptr(&sctx
->b
.b
, &ib
, 0,
965 ib
.offset
+ start_offset
,
968 pipe_resource_reference(&ib
.buffer
, NULL
);
969 ib
.user_buffer
= NULL
;
970 ib
.buffer
= out_buffer
;
971 /* info->start will be added by the drawing code */
972 ib
.offset
= out_offset
- start_offset
;
974 } else if (ib
.user_buffer
&& !ib
.buffer
) {
975 unsigned start
, count
, start_offset
;
977 si_get_draw_start_count(sctx
, info
, &start
, &count
);
978 start_offset
= start
* ib
.index_size
;
980 u_upload_data(sctx
->b
.uploader
, start_offset
, count
* ib
.index_size
,
981 (char*)ib
.user_buffer
+ start_offset
,
982 &ib
.offset
, &ib
.buffer
);
983 /* info->start will be added by the drawing code */
984 ib
.offset
-= start_offset
;
988 if (!si_update_draw_info_state(sctx
, info
, &ib
))
991 si_state_draw(sctx
, info
, &ib
);
993 sctx
->pm4_dirty_cdwords
+= si_pm4_dirty_dw(sctx
);
995 /* Check flush flags. */
997 sctx
->atoms
.s
.cache_flush
->dirty
= true;
999 si_need_cs_space(sctx
, 0, TRUE
);
1002 for (i
= 0; i
< SI_NUM_ATOMS(sctx
); i
++) {
1003 if (sctx
->atoms
.array
[i
]->dirty
) {
1004 sctx
->atoms
.array
[i
]->emit(&sctx
->b
, sctx
->atoms
.array
[i
]);
1005 sctx
->atoms
.array
[i
]->dirty
= false;
1009 si_pm4_emit_dirty(sctx
);
1010 sctx
->pm4_dirty_cdwords
= 0;
1013 if (sctx
->screen
->b
.trace_bo
) {
1014 si_trace_emit(sctx
);
1018 /* Workaround for a VGT hang when streamout is enabled.
1019 * It must be done after drawing. */
1020 if (sctx
->b
.family
== CHIP_HAWAII
&&
1021 (sctx
->b
.streamout
.streamout_enabled
||
1022 sctx
->b
.streamout
.prims_gen_query_enabled
)) {
1023 sctx
->b
.flags
|= R600_CONTEXT_VGT_STREAMOUT_SYNC
;
1026 /* Set the depth buffer as dirty. */
1027 if (sctx
->framebuffer
.state
.zsbuf
) {
1028 struct pipe_surface
*surf
= sctx
->framebuffer
.state
.zsbuf
;
1029 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
1031 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
1033 if (sctx
->framebuffer
.compressed_cb_mask
) {
1034 struct pipe_surface
*surf
;
1035 struct r600_texture
*rtex
;
1036 unsigned mask
= sctx
->framebuffer
.compressed_cb_mask
;
1039 unsigned i
= u_bit_scan(&mask
);
1040 surf
= sctx
->framebuffer
.state
.cbufs
[i
];
1041 rtex
= (struct r600_texture
*)surf
->texture
;
1043 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
1047 pipe_resource_reference(&ib
.buffer
, NULL
);
1048 sctx
->b
.num_draw_calls
++;
1052 void si_trace_emit(struct si_context
*sctx
)
1054 struct si_screen
*sscreen
= sctx
->screen
;
1055 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
1058 va
= sscreen
->b
.trace_bo
->gpu_address
;
1059 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
, sscreen
->b
.trace_bo
,
1060 RADEON_USAGE_READWRITE
, RADEON_PRIO_MIN
);
1061 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 4, 0));
1062 radeon_emit(cs
, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_MEM_SYNC
) |
1063 PKT3_WRITE_DATA_WR_CONFIRM
|
1064 PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME
));
1065 radeon_emit(cs
, va
& 0xFFFFFFFFUL
);
1066 radeon_emit(cs
, (va
>> 32UL) & 0xFFFFFFFFUL
);
1067 radeon_emit(cs
, cs
->cdw
);
1068 radeon_emit(cs
, sscreen
->b
.cs_count
);