2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
27 #include "util/u_memory.h"
28 #include "util/u_framebuffer.h"
29 #include "util/u_blitter.h"
30 #include "tgsi/tgsi_parse.h"
32 #include "si_shader.h"
34 #include "../radeon/r600_cs.h"
41 static void si_pipe_shader_vs(struct pipe_context
*ctx
, struct si_pipe_shader
*shader
)
43 struct si_context
*rctx
= (struct si_context
*)ctx
;
44 struct si_pm4_state
*pm4
;
45 unsigned num_sgprs
, num_user_sgprs
;
46 unsigned nparams
, i
, vgpr_comp_cnt
;
49 si_pm4_delete_state(rctx
, vs
, shader
->pm4
);
50 pm4
= shader
->pm4
= si_pm4_alloc_state(rctx
);
55 /* Certain attributes (position, psize, etc.) don't count as params.
56 * VS is required to export at least one param and r600_shader_from_tgsi()
57 * takes care of adding a dummy export.
59 for (nparams
= 0, i
= 0 ; i
< shader
->shader
.noutput
; i
++) {
60 switch (shader
->shader
.output
[i
].name
) {
61 case TGSI_SEMANTIC_CLIPVERTEX
:
62 case TGSI_SEMANTIC_POSITION
:
63 case TGSI_SEMANTIC_PSIZE
:
72 si_pm4_set_reg(pm4
, R_0286C4_SPI_VS_OUT_CONFIG
,
73 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
75 si_pm4_set_reg(pm4
, R_02870C_SPI_SHADER_POS_FORMAT
,
76 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
77 S_02870C_POS1_EXPORT_FORMAT(shader
->shader
.nr_pos_exports
> 1 ?
78 V_02870C_SPI_SHADER_4COMP
:
79 V_02870C_SPI_SHADER_NONE
) |
80 S_02870C_POS2_EXPORT_FORMAT(shader
->shader
.nr_pos_exports
> 2 ?
81 V_02870C_SPI_SHADER_4COMP
:
82 V_02870C_SPI_SHADER_NONE
) |
83 S_02870C_POS3_EXPORT_FORMAT(shader
->shader
.nr_pos_exports
> 3 ?
84 V_02870C_SPI_SHADER_4COMP
:
85 V_02870C_SPI_SHADER_NONE
));
87 va
= r600_resource_va(ctx
->screen
, (void *)shader
->bo
);
88 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
);
89 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
90 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, va
>> 40);
92 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
93 num_sgprs
= shader
->num_sgprs
;
94 if (num_user_sgprs
> num_sgprs
) {
95 /* Last 2 reserved SGPRs are used for VCC */
96 num_sgprs
= num_user_sgprs
+ 2;
98 assert(num_sgprs
<= 104);
100 vgpr_comp_cnt
= shader
->shader
.uses_instanceid
? 3 : 0;
102 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
103 S_00B128_VGPRS((shader
->num_vgprs
- 1) / 4) |
104 S_00B128_SGPRS((num_sgprs
- 1) / 8) |
105 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
));
106 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
107 S_00B12C_USER_SGPR(num_user_sgprs
) |
108 S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
109 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
110 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
111 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
112 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
));
114 si_pm4_bind_state(rctx
, vs
, shader
->pm4
);
115 rctx
->b
.flags
|= R600_CONTEXT_INV_SHADER_CACHE
;
118 static void si_pipe_shader_ps(struct pipe_context
*ctx
, struct si_pipe_shader
*shader
)
120 struct si_context
*rctx
= (struct si_context
*)ctx
;
121 struct si_pm4_state
*pm4
;
122 unsigned i
, exports_ps
, spi_ps_in_control
, db_shader_control
;
123 unsigned num_sgprs
, num_user_sgprs
;
124 unsigned spi_baryc_cntl
= 0, spi_ps_input_ena
, spi_shader_z_format
;
127 si_pm4_delete_state(rctx
, ps
, shader
->pm4
);
128 pm4
= shader
->pm4
= si_pm4_alloc_state(rctx
);
133 db_shader_control
= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z
) |
134 S_02880C_ALPHA_TO_MASK_DISABLE(rctx
->fb_cb0_is_integer
);
136 for (i
= 0; i
< shader
->shader
.ninput
; i
++) {
137 switch (shader
->shader
.input
[i
].name
) {
138 case TGSI_SEMANTIC_POSITION
:
139 if (shader
->shader
.input
[i
].centroid
) {
140 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
142 * 0 -> Position = pixel center (default)
143 * 1 -> Position = pixel centroid
144 * 2 -> Position = iterated sample number XXX:
145 * What does this mean?
147 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(1);
150 case TGSI_SEMANTIC_FACE
:
155 for (i
= 0; i
< shader
->shader
.noutput
; i
++) {
156 if (shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_POSITION
)
157 db_shader_control
|= S_02880C_Z_EXPORT_ENABLE(1);
158 if (shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_STENCIL
)
159 db_shader_control
|= S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(1);
161 if (shader
->shader
.uses_kill
|| shader
->key
.ps
.alpha_func
!= PIPE_FUNC_ALWAYS
)
162 db_shader_control
|= S_02880C_KILL_ENABLE(1);
165 for (i
= 0; i
< shader
->shader
.noutput
; i
++) {
166 if (shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_POSITION
||
167 shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_STENCIL
)
171 /* always at least export 1 component per pixel */
175 spi_ps_in_control
= S_0286D8_NUM_INTERP(shader
->shader
.ninterp
) |
176 S_0286D8_BC_OPTIMIZE_DISABLE(1);
178 si_pm4_set_reg(pm4
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
179 spi_ps_input_ena
= shader
->spi_ps_input_ena
;
180 /* we need to enable at least one of them, otherwise we hang the GPU */
181 assert(G_0286CC_PERSP_SAMPLE_ENA(spi_ps_input_ena
) ||
182 G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena
) ||
183 G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena
) ||
184 G_0286CC_PERSP_PULL_MODEL_ENA(spi_ps_input_ena
) ||
185 G_0286CC_LINEAR_SAMPLE_ENA(spi_ps_input_ena
) ||
186 G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena
) ||
187 G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena
) ||
188 G_0286CC_LINE_STIPPLE_TEX_ENA(spi_ps_input_ena
));
190 si_pm4_set_reg(pm4
, R_0286CC_SPI_PS_INPUT_ENA
, spi_ps_input_ena
);
191 si_pm4_set_reg(pm4
, R_0286D0_SPI_PS_INPUT_ADDR
, spi_ps_input_ena
);
192 si_pm4_set_reg(pm4
, R_0286D8_SPI_PS_IN_CONTROL
, spi_ps_in_control
);
194 if (G_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(db_shader_control
))
195 spi_shader_z_format
= V_028710_SPI_SHADER_32_GR
;
196 else if (G_02880C_Z_EXPORT_ENABLE(db_shader_control
))
197 spi_shader_z_format
= V_028710_SPI_SHADER_32_R
;
199 spi_shader_z_format
= 0;
200 si_pm4_set_reg(pm4
, R_028710_SPI_SHADER_Z_FORMAT
, spi_shader_z_format
);
201 si_pm4_set_reg(pm4
, R_028714_SPI_SHADER_COL_FORMAT
,
202 shader
->spi_shader_col_format
);
203 si_pm4_set_reg(pm4
, R_02823C_CB_SHADER_MASK
, shader
->cb_shader_mask
);
205 va
= r600_resource_va(ctx
->screen
, (void *)shader
->bo
);
206 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
);
207 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
208 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, va
>> 40);
210 num_user_sgprs
= SI_PS_NUM_USER_SGPR
;
211 num_sgprs
= shader
->num_sgprs
;
212 /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
213 if ((num_user_sgprs
+ 1) > num_sgprs
) {
214 /* Last 2 reserved SGPRs are used for VCC */
215 num_sgprs
= num_user_sgprs
+ 1 + 2;
217 assert(num_sgprs
<= 104);
219 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
220 S_00B028_VGPRS((shader
->num_vgprs
- 1) / 4) |
221 S_00B028_SGPRS((num_sgprs
- 1) / 8));
222 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
223 S_00B02C_EXTRA_LDS_SIZE(shader
->lds_size
) |
224 S_00B02C_USER_SGPR(num_user_sgprs
));
226 si_pm4_set_reg(pm4
, R_02880C_DB_SHADER_CONTROL
, db_shader_control
);
228 shader
->cb0_is_integer
= rctx
->fb_cb0_is_integer
;
229 shader
->sprite_coord_enable
= rctx
->sprite_coord_enable
;
230 si_pm4_bind_state(rctx
, ps
, shader
->pm4
);
231 rctx
->b
.flags
|= R600_CONTEXT_INV_SHADER_CACHE
;
238 static unsigned si_conv_pipe_prim(unsigned pprim
)
240 static const unsigned prim_conv
[] = {
241 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
242 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
243 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
244 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
245 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
246 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
247 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
248 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
249 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
250 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
251 [PIPE_PRIM_LINES_ADJACENCY
] = ~0,
252 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = ~0,
253 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = ~0,
254 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = ~0
256 unsigned result
= prim_conv
[pprim
];
258 R600_ERR("unsupported primitive type %d\n", pprim
);
263 static unsigned si_conv_prim_to_gs_out(unsigned mode
)
265 static const int prim_conv
[] = {
266 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
267 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
268 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
269 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
270 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
271 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
272 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
273 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
274 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
275 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
276 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
277 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
278 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
279 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
281 assert(mode
< Elements(prim_conv
));
283 return prim_conv
[mode
];
286 static bool si_update_draw_info_state(struct si_context
*rctx
,
287 const struct pipe_draw_info
*info
,
288 const struct pipe_index_buffer
*ib
)
290 struct si_pm4_state
*pm4
= si_pm4_alloc_state(rctx
);
291 struct si_shader
*vs
= &rctx
->vs_shader
->current
->shader
;
292 unsigned prim
= si_conv_pipe_prim(info
->mode
);
293 unsigned gs_out_prim
= si_conv_prim_to_gs_out(info
->mode
);
294 unsigned ls_mask
= 0;
304 if (rctx
->b
.chip_class
>= CIK
) {
305 struct si_state_rasterizer
*rs
= rctx
->queued
.named
.rasterizer
;
306 bool wd_switch_on_eop
= prim
== V_008958_DI_PT_POLYGON
||
307 prim
== V_008958_DI_PT_LINELOOP
||
308 prim
== V_008958_DI_PT_TRIFAN
||
309 prim
== V_008958_DI_PT_TRISTRIP_ADJ
||
310 info
->primitive_restart
||
311 (rs
? rs
->line_stipple_enable
: false);
312 /* If the WD switch is false, the IA switch must be false too. */
313 bool ia_switch_on_eop
= wd_switch_on_eop
;
315 si_pm4_set_reg(pm4
, R_028AA8_IA_MULTI_VGT_PARAM
,
316 S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
317 S_028AA8_PARTIAL_VS_WAVE_ON(1) |
318 S_028AA8_PRIMGROUP_SIZE(63) |
319 S_028AA8_WD_SWITCH_ON_EOP(wd_switch_on_eop
));
320 si_pm4_set_reg(pm4
, R_028B74_VGT_DISPATCH_DRAW_INDEX
,
321 ib
->index_size
== 4 ? 0xFC000000 : 0xFC00);
323 si_pm4_set_reg(pm4
, R_030908_VGT_PRIMITIVE_TYPE
, prim
);
325 si_pm4_set_reg(pm4
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
328 si_pm4_set_reg(pm4
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out_prim
);
329 si_pm4_set_reg(pm4
, R_028408_VGT_INDX_OFFSET
,
330 info
->indexed
? info
->index_bias
: info
->start
);
331 si_pm4_set_reg(pm4
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
, info
->restart_index
);
332 si_pm4_set_reg(pm4
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, info
->primitive_restart
);
333 si_pm4_set_reg(pm4
, R_00B130_SPI_SHADER_USER_DATA_VS_0
+ SI_SGPR_START_INSTANCE
* 4,
334 info
->start_instance
);
336 if (prim
== V_008958_DI_PT_LINELIST
)
338 else if (prim
== V_008958_DI_PT_LINESTRIP
)
340 si_pm4_set_reg(pm4
, R_028A0C_PA_SC_LINE_STIPPLE
,
341 S_028A0C_AUTO_RESET_CNTL(ls_mask
) |
342 rctx
->pa_sc_line_stipple
);
344 if (info
->mode
== PIPE_PRIM_QUADS
|| info
->mode
== PIPE_PRIM_QUAD_STRIP
|| info
->mode
== PIPE_PRIM_POLYGON
) {
345 si_pm4_set_reg(pm4
, R_028814_PA_SU_SC_MODE_CNTL
,
346 S_028814_PROVOKING_VTX_LAST(1) | rctx
->pa_su_sc_mode_cntl
);
348 si_pm4_set_reg(pm4
, R_028814_PA_SU_SC_MODE_CNTL
, rctx
->pa_su_sc_mode_cntl
);
350 si_pm4_set_reg(pm4
, R_02881C_PA_CL_VS_OUT_CNTL
,
351 S_02881C_USE_VTX_POINT_SIZE(vs
->vs_out_point_size
) |
352 S_02881C_USE_VTX_EDGE_FLAG(vs
->vs_out_edgeflag
) |
353 S_02881C_USE_VTX_RENDER_TARGET_INDX(vs
->vs_out_layer
) |
354 S_02881C_VS_OUT_CCDIST0_VEC_ENA((vs
->clip_dist_write
& 0x0F) != 0) |
355 S_02881C_VS_OUT_CCDIST1_VEC_ENA((vs
->clip_dist_write
& 0xF0) != 0) |
356 S_02881C_VS_OUT_MISC_VEC_ENA(vs
->vs_out_misc_write
) |
357 (rctx
->queued
.named
.rasterizer
->clip_plane_enable
&
358 vs
->clip_dist_write
));
359 si_pm4_set_reg(pm4
, R_028810_PA_CL_CLIP_CNTL
,
360 rctx
->queued
.named
.rasterizer
->pa_cl_clip_cntl
|
361 (vs
->clip_dist_write
? 0 :
362 rctx
->queued
.named
.rasterizer
->clip_plane_enable
& 0x3F));
364 si_pm4_set_state(rctx
, draw_info
, pm4
);
368 static void si_update_spi_map(struct si_context
*rctx
)
370 struct si_shader
*ps
= &rctx
->ps_shader
->current
->shader
;
371 struct si_shader
*vs
= &rctx
->vs_shader
->current
->shader
;
372 struct si_pm4_state
*pm4
= si_pm4_alloc_state(rctx
);
375 for (i
= 0; i
< ps
->ninput
; i
++) {
376 unsigned name
= ps
->input
[i
].name
;
377 unsigned param_offset
= ps
->input
[i
].param_offset
;
379 if (name
== TGSI_SEMANTIC_POSITION
)
380 /* Read from preloaded VGPRs, not parameters */
386 if (ps
->input
[i
].interpolate
== TGSI_INTERPOLATE_CONSTANT
||
387 (ps
->input
[i
].interpolate
== TGSI_INTERPOLATE_COLOR
&&
388 rctx
->ps_shader
->current
->key
.ps
.flatshade
)) {
389 tmp
|= S_028644_FLAT_SHADE(1);
392 if (name
== TGSI_SEMANTIC_GENERIC
&&
393 rctx
->sprite_coord_enable
& (1 << ps
->input
[i
].sid
)) {
394 tmp
|= S_028644_PT_SPRITE_TEX(1);
397 for (j
= 0; j
< vs
->noutput
; j
++) {
398 if (name
== vs
->output
[j
].name
&&
399 ps
->input
[i
].sid
== vs
->output
[j
].sid
) {
400 tmp
|= S_028644_OFFSET(vs
->output
[j
].param_offset
);
405 if (j
== vs
->noutput
) {
406 /* No corresponding output found, load defaults into input */
407 tmp
|= S_028644_OFFSET(0x20);
411 R_028644_SPI_PS_INPUT_CNTL_0
+ param_offset
* 4,
414 if (name
== TGSI_SEMANTIC_COLOR
&&
415 rctx
->ps_shader
->current
->key
.ps
.color_two_side
) {
416 name
= TGSI_SEMANTIC_BCOLOR
;
422 si_pm4_set_state(rctx
, spi
, pm4
);
425 static void si_update_derived_state(struct si_context
*rctx
)
427 struct pipe_context
* ctx
= (struct pipe_context
*)rctx
;
428 unsigned vs_dirty
= 0, ps_dirty
= 0;
430 if (!rctx
->blitter
->running
) {
431 /* Flush depth textures which need to be flushed. */
432 for (int i
= 0; i
< SI_NUM_SHADERS
; i
++) {
433 if (rctx
->samplers
[i
].depth_texture_mask
) {
434 si_flush_depth_textures(rctx
, &rctx
->samplers
[i
]);
436 if (rctx
->samplers
[i
].compressed_colortex_mask
) {
437 si_decompress_color_textures(rctx
, &rctx
->samplers
[i
]);
442 si_shader_select(ctx
, rctx
->vs_shader
, &vs_dirty
);
444 if (!rctx
->vs_shader
->current
->pm4
) {
445 si_pipe_shader_vs(ctx
, rctx
->vs_shader
->current
);
450 si_pm4_bind_state(rctx
, vs
, rctx
->vs_shader
->current
->pm4
);
454 si_shader_select(ctx
, rctx
->ps_shader
, &ps_dirty
);
456 if (!rctx
->ps_shader
->current
->pm4
) {
457 si_pipe_shader_ps(ctx
, rctx
->ps_shader
->current
);
460 if (rctx
->ps_shader
->current
->cb0_is_integer
!= rctx
->fb_cb0_is_integer
) {
461 si_pipe_shader_ps(ctx
, rctx
->ps_shader
->current
);
466 si_pm4_bind_state(rctx
, ps
, rctx
->ps_shader
->current
->pm4
);
469 if (si_pm4_state_changed(rctx
, ps
) || si_pm4_state_changed(rctx
, vs
)) {
470 /* XXX: Emitting the PS state even when only the VS changed
471 * fixes random failures with piglit glsl-max-varyings.
474 rctx
->emitted
.named
.ps
= NULL
;
475 si_update_spi_map(rctx
);
479 static void si_vertex_buffer_update(struct si_context
*rctx
)
481 struct pipe_context
*ctx
= &rctx
->b
.b
;
482 struct si_pm4_state
*pm4
= si_pm4_alloc_state(rctx
);
483 bool bound
[PIPE_MAX_ATTRIBS
] = {};
487 rctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
;
489 count
= rctx
->vertex_elements
->count
;
490 assert(count
<= 256 / 4);
492 si_pm4_sh_data_begin(pm4
);
493 for (i
= 0 ; i
< count
; i
++) {
494 struct pipe_vertex_element
*ve
= &rctx
->vertex_elements
->elements
[i
];
495 struct pipe_vertex_buffer
*vb
;
496 struct r600_resource
*rbuffer
;
499 if (ve
->vertex_buffer_index
>= rctx
->nr_vertex_buffers
)
502 vb
= &rctx
->vertex_buffer
[ve
->vertex_buffer_index
];
503 rbuffer
= (struct r600_resource
*)vb
->buffer
;
508 offset
+= vb
->buffer_offset
;
509 offset
+= ve
->src_offset
;
511 va
= r600_resource_va(ctx
->screen
, (void*)rbuffer
);
514 /* Fill in T# buffer resource description */
515 si_pm4_sh_data_add(pm4
, va
& 0xFFFFFFFF);
516 si_pm4_sh_data_add(pm4
, (S_008F04_BASE_ADDRESS_HI(va
>> 32) |
517 S_008F04_STRIDE(vb
->stride
)));
519 /* Round up by rounding down and adding 1 */
520 si_pm4_sh_data_add(pm4
,
521 (vb
->buffer
->width0
- offset
-
522 util_format_get_blocksize(ve
->src_format
)) /
525 si_pm4_sh_data_add(pm4
, vb
->buffer
->width0
- offset
);
526 si_pm4_sh_data_add(pm4
, rctx
->vertex_elements
->rsrc_word3
[i
]);
528 if (!bound
[ve
->vertex_buffer_index
]) {
529 si_pm4_add_bo(pm4
, rbuffer
, RADEON_USAGE_READ
);
530 bound
[ve
->vertex_buffer_index
] = true;
533 si_pm4_sh_data_end(pm4
, R_00B130_SPI_SHADER_USER_DATA_VS_0
, SI_SGPR_VERTEX_BUFFER
);
534 si_pm4_set_state(rctx
, vertex_buffers
, pm4
);
537 static void si_state_draw(struct si_context
*rctx
,
538 const struct pipe_draw_info
*info
,
539 const struct pipe_index_buffer
*ib
)
541 struct si_pm4_state
*pm4
= si_pm4_alloc_state(rctx
);
546 /* queries need some special values
547 * (this is non-zero if any query is active) */
548 if (rctx
->num_cs_dw_nontimer_queries_suspend
) {
549 if (rctx
->b
.chip_class
>= CIK
) {
550 si_pm4_set_reg(pm4
, R_028004_DB_COUNT_CONTROL
,
551 S_028004_PERFECT_ZPASS_COUNTS(1) |
552 S_028004_SAMPLE_RATE(rctx
->fb_log_samples
) |
553 S_028004_ZPASS_ENABLE(1) |
554 S_028004_SLICE_EVEN_ENABLE(1) |
555 S_028004_SLICE_ODD_ENABLE(1));
557 si_pm4_set_reg(pm4
, R_028004_DB_COUNT_CONTROL
,
558 S_028004_PERFECT_ZPASS_COUNTS(1) |
559 S_028004_SAMPLE_RATE(rctx
->fb_log_samples
));
563 if (info
->count_from_stream_output
) {
564 struct r600_so_target
*t
=
565 (struct r600_so_target
*)info
->count_from_stream_output
;
566 uint64_t va
= r600_resource_va(&rctx
->screen
->b
.b
,
567 &t
->buf_filled_size
->b
.b
);
568 va
+= t
->buf_filled_size_offset
;
570 si_pm4_set_reg(pm4
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
,
573 si_pm4_cmd_begin(pm4
, PKT3_COPY_DATA
);
575 COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
576 COPY_DATA_DST_SEL(COPY_DATA_REG
) |
577 COPY_DATA_WR_CONFIRM
);
578 si_pm4_cmd_add(pm4
, va
); /* src address lo */
579 si_pm4_cmd_add(pm4
, va
>> 32UL); /* src address hi */
580 si_pm4_cmd_add(pm4
, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2);
581 si_pm4_cmd_add(pm4
, 0); /* unused */
582 si_pm4_add_bo(pm4
, t
->buf_filled_size
, RADEON_USAGE_READ
);
583 si_pm4_cmd_end(pm4
, true);
587 si_pm4_cmd_begin(pm4
, PKT3_INDEX_TYPE
);
588 if (ib
->index_size
== 4) {
589 si_pm4_cmd_add(pm4
, V_028A7C_VGT_INDEX_32
| (SI_BIG_ENDIAN
?
590 V_028A7C_VGT_DMA_SWAP_32_BIT
: 0));
592 si_pm4_cmd_add(pm4
, V_028A7C_VGT_INDEX_16
| (SI_BIG_ENDIAN
?
593 V_028A7C_VGT_DMA_SWAP_16_BIT
: 0));
595 si_pm4_cmd_end(pm4
, rctx
->predicate_drawing
);
597 si_pm4_cmd_begin(pm4
, PKT3_NUM_INSTANCES
);
598 si_pm4_cmd_add(pm4
, info
->instance_count
);
599 si_pm4_cmd_end(pm4
, rctx
->predicate_drawing
);
602 uint32_t max_size
= (ib
->buffer
->width0
- ib
->offset
) /
603 rctx
->index_buffer
.index_size
;
605 va
= r600_resource_va(&rctx
->screen
->b
.b
, ib
->buffer
);
608 si_pm4_add_bo(pm4
, (struct r600_resource
*)ib
->buffer
, RADEON_USAGE_READ
);
609 si_cmd_draw_index_2(pm4
, max_size
, va
, info
->count
,
610 V_0287F0_DI_SRC_SEL_DMA
,
611 rctx
->predicate_drawing
);
613 uint32_t initiator
= V_0287F0_DI_SRC_SEL_AUTO_INDEX
;
614 initiator
|= S_0287F0_USE_OPAQUE(!!info
->count_from_stream_output
);
615 si_cmd_draw_index_auto(pm4
, info
->count
, initiator
, rctx
->predicate_drawing
);
618 si_pm4_set_state(rctx
, draw
, pm4
);
621 void si_emit_cache_flush(struct r600_common_context
*rctx
, struct r600_atom
*atom
)
623 struct radeon_winsys_cs
*cs
= rctx
->rings
.gfx
.cs
;
624 uint32_t cp_coher_cntl
= 0;
626 /* XXX SI flushes both ICACHE and KCACHE if either flag is set.
627 * XXX CIK shouldn't have this issue. Test CIK before separating the flags
628 * XXX to ensure there is no regression. Also find out if there is another
629 * XXX way to flush either ICACHE or KCACHE but not both for SI. */
630 if (rctx
->flags
& (R600_CONTEXT_INV_SHADER_CACHE
|
631 R600_CONTEXT_INV_CONST_CACHE
)) {
632 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1) |
633 S_0085F0_SH_KCACHE_ACTION_ENA(1);
635 if (rctx
->flags
& (R600_CONTEXT_INV_TEX_CACHE
|
636 R600_CONTEXT_STREAMOUT_FLUSH
)) {
637 cp_coher_cntl
|= S_0085F0_TC_ACTION_ENA(1) |
638 S_0085F0_TCL1_ACTION_ENA(1);
640 if (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_CB
) {
641 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
642 S_0085F0_CB0_DEST_BASE_ENA(1) |
643 S_0085F0_CB1_DEST_BASE_ENA(1) |
644 S_0085F0_CB2_DEST_BASE_ENA(1) |
645 S_0085F0_CB3_DEST_BASE_ENA(1) |
646 S_0085F0_CB4_DEST_BASE_ENA(1) |
647 S_0085F0_CB5_DEST_BASE_ENA(1) |
648 S_0085F0_CB6_DEST_BASE_ENA(1) |
649 S_0085F0_CB7_DEST_BASE_ENA(1);
651 if (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_DB
) {
652 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
653 S_0085F0_DB_DEST_BASE_ENA(1);
657 if (rctx
->chip_class
>= CIK
) {
658 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, 0));
659 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
660 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
661 radeon_emit(cs
, 0xff); /* CP_COHER_SIZE_HI */
662 radeon_emit(cs
, 0); /* CP_COHER_BASE */
663 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
664 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
666 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, 0));
667 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
668 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
669 radeon_emit(cs
, 0); /* CP_COHER_BASE */
670 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
674 if (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_CB_META
) {
675 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
676 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
678 if (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_DB_META
) {
679 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
680 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
683 if (rctx
->flags
& R600_CONTEXT_WAIT_3D_IDLE
) {
684 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
685 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
686 } else if (rctx
->flags
& R600_CONTEXT_STREAMOUT_FLUSH
) {
687 /* Needed if streamout buffers are going to be used as a source. */
688 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
689 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
695 const struct r600_atom si_atom_cache_flush
= { si_emit_cache_flush
, 13 }; /* number of CS dwords */
697 void si_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
699 struct si_context
*rctx
= (struct si_context
*)ctx
;
700 struct pipe_index_buffer ib
= {};
703 if (!info
->count
&& (info
->indexed
|| !info
->count_from_stream_output
))
706 if (!rctx
->ps_shader
|| !rctx
->vs_shader
)
709 si_update_derived_state(rctx
);
710 si_vertex_buffer_update(rctx
);
713 /* Initialize the index buffer struct. */
714 pipe_resource_reference(&ib
.buffer
, rctx
->index_buffer
.buffer
);
715 ib
.user_buffer
= rctx
->index_buffer
.user_buffer
;
716 ib
.index_size
= rctx
->index_buffer
.index_size
;
717 ib
.offset
= rctx
->index_buffer
.offset
+ info
->start
* ib
.index_size
;
719 /* Translate or upload, if needed. */
720 si_translate_index_buffer(rctx
, &ib
, info
->count
);
722 if (ib
.user_buffer
&& !ib
.buffer
) {
723 si_upload_index_buffer(rctx
, &ib
, info
->count
);
727 if (!si_update_draw_info_state(rctx
, info
, &ib
))
730 si_state_draw(rctx
, info
, &ib
);
732 rctx
->pm4_dirty_cdwords
+= si_pm4_dirty_dw(rctx
);
734 /* Check flush flags. */
736 rctx
->atoms
.cache_flush
->dirty
= true;
738 si_need_cs_space(rctx
, 0, TRUE
);
741 for (i
= 0; i
< SI_NUM_ATOMS(rctx
); i
++) {
742 if (rctx
->atoms
.array
[i
]->dirty
) {
743 rctx
->atoms
.array
[i
]->emit(&rctx
->b
, rctx
->atoms
.array
[i
]);
744 rctx
->atoms
.array
[i
]->dirty
= false;
748 si_pm4_emit_dirty(rctx
);
749 rctx
->pm4_dirty_cdwords
= 0;
752 if (rctx
->screen
->trace_bo
) {
753 r600_trace_emit(rctx
);
757 /* Set the depth buffer as dirty. */
758 if (rctx
->framebuffer
.zsbuf
) {
759 struct pipe_surface
*surf
= rctx
->framebuffer
.zsbuf
;
760 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
762 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
764 if (rctx
->fb_compressed_cb_mask
) {
765 struct pipe_surface
*surf
;
766 struct r600_texture
*rtex
;
767 unsigned mask
= rctx
->fb_compressed_cb_mask
;
770 unsigned i
= u_bit_scan(&mask
);
771 surf
= rctx
->framebuffer
.cbufs
[i
];
772 rtex
= (struct r600_texture
*)surf
->texture
;
774 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
778 pipe_resource_reference(&ib
.buffer
, NULL
);