2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
27 #include "util/u_memory.h"
28 #include "util/u_framebuffer.h"
29 #include "util/u_blitter.h"
30 #include "tgsi/tgsi_parse.h"
31 #include "radeonsi_pipe.h"
32 #include "radeonsi_shader.h"
34 #include "../radeon/r600_cs.h"
41 static void si_pipe_shader_vs(struct pipe_context
*ctx
, struct si_pipe_shader
*shader
)
43 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
44 struct si_pm4_state
*pm4
;
45 unsigned num_sgprs
, num_user_sgprs
;
46 unsigned nparams
, i
, vgpr_comp_cnt
;
49 si_pm4_delete_state(rctx
, vs
, shader
->pm4
);
50 pm4
= shader
->pm4
= si_pm4_alloc_state(rctx
);
55 /* Certain attributes (position, psize, etc.) don't count as params.
56 * VS is required to export at least one param and r600_shader_from_tgsi()
57 * takes care of adding a dummy export.
59 for (nparams
= 0, i
= 0 ; i
< shader
->shader
.noutput
; i
++) {
60 switch (shader
->shader
.output
[i
].name
) {
61 case TGSI_SEMANTIC_CLIPVERTEX
:
62 case TGSI_SEMANTIC_POSITION
:
63 case TGSI_SEMANTIC_PSIZE
:
72 si_pm4_set_reg(pm4
, R_0286C4_SPI_VS_OUT_CONFIG
,
73 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
75 si_pm4_set_reg(pm4
, R_02870C_SPI_SHADER_POS_FORMAT
,
76 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
77 S_02870C_POS1_EXPORT_FORMAT(shader
->shader
.nr_pos_exports
> 1 ?
78 V_02870C_SPI_SHADER_4COMP
:
79 V_02870C_SPI_SHADER_NONE
) |
80 S_02870C_POS2_EXPORT_FORMAT(shader
->shader
.nr_pos_exports
> 2 ?
81 V_02870C_SPI_SHADER_4COMP
:
82 V_02870C_SPI_SHADER_NONE
) |
83 S_02870C_POS3_EXPORT_FORMAT(shader
->shader
.nr_pos_exports
> 3 ?
84 V_02870C_SPI_SHADER_4COMP
:
85 V_02870C_SPI_SHADER_NONE
));
87 va
= r600_resource_va(ctx
->screen
, (void *)shader
->bo
);
88 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
);
89 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
90 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, va
>> 40);
92 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
93 num_sgprs
= shader
->num_sgprs
;
94 if (num_user_sgprs
> num_sgprs
) {
95 /* Last 2 reserved SGPRs are used for VCC */
96 num_sgprs
= num_user_sgprs
+ 2;
98 assert(num_sgprs
<= 104);
100 vgpr_comp_cnt
= shader
->shader
.uses_instanceid
? 3 : 0;
102 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
103 S_00B128_VGPRS((shader
->num_vgprs
- 1) / 4) |
104 S_00B128_SGPRS((num_sgprs
- 1) / 8) |
105 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
));
106 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
107 S_00B12C_USER_SGPR(num_user_sgprs
) |
108 S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
109 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
110 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
111 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
112 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
));
114 if (rctx
->b
.chip_class
>= CIK
) {
115 si_pm4_set_reg(pm4
, R_00B118_SPI_SHADER_PGM_RSRC3_VS
,
116 S_00B118_CU_EN(0xffff));
117 si_pm4_set_reg(pm4
, R_00B11C_SPI_SHADER_LATE_ALLOC_VS
,
121 si_pm4_bind_state(rctx
, vs
, shader
->pm4
);
122 rctx
->b
.flags
|= R600_CONTEXT_INV_SHADER_CACHE
;
125 static void si_pipe_shader_ps(struct pipe_context
*ctx
, struct si_pipe_shader
*shader
)
127 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
128 struct si_pm4_state
*pm4
;
129 unsigned i
, exports_ps
, spi_ps_in_control
, db_shader_control
;
130 unsigned num_sgprs
, num_user_sgprs
;
131 unsigned spi_baryc_cntl
= 0, spi_ps_input_ena
, spi_shader_z_format
;
134 si_pm4_delete_state(rctx
, ps
, shader
->pm4
);
135 pm4
= shader
->pm4
= si_pm4_alloc_state(rctx
);
140 db_shader_control
= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z
) |
141 S_02880C_ALPHA_TO_MASK_DISABLE(rctx
->fb_cb0_is_integer
);
143 for (i
= 0; i
< shader
->shader
.ninput
; i
++) {
144 switch (shader
->shader
.input
[i
].name
) {
145 case TGSI_SEMANTIC_POSITION
:
146 if (shader
->shader
.input
[i
].centroid
) {
147 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
149 * 0 -> Position = pixel center (default)
150 * 1 -> Position = pixel centroid
151 * 2 -> Position = iterated sample number XXX:
152 * What does this mean?
154 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(1);
157 case TGSI_SEMANTIC_FACE
:
162 for (i
= 0; i
< shader
->shader
.noutput
; i
++) {
163 if (shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_POSITION
)
164 db_shader_control
|= S_02880C_Z_EXPORT_ENABLE(1);
165 if (shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_STENCIL
)
166 db_shader_control
|= S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(1);
168 if (shader
->shader
.uses_kill
|| shader
->key
.ps
.alpha_func
!= PIPE_FUNC_ALWAYS
)
169 db_shader_control
|= S_02880C_KILL_ENABLE(1);
172 for (i
= 0; i
< shader
->shader
.noutput
; i
++) {
173 if (shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_POSITION
||
174 shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_STENCIL
)
178 /* always at least export 1 component per pixel */
182 spi_ps_in_control
= S_0286D8_NUM_INTERP(shader
->shader
.ninterp
) |
183 S_0286D8_BC_OPTIMIZE_DISABLE(1);
185 si_pm4_set_reg(pm4
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
186 spi_ps_input_ena
= shader
->spi_ps_input_ena
;
187 /* we need to enable at least one of them, otherwise we hang the GPU */
188 assert(G_0286CC_PERSP_SAMPLE_ENA(spi_ps_input_ena
) ||
189 G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena
) ||
190 G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena
) ||
191 G_0286CC_PERSP_PULL_MODEL_ENA(spi_ps_input_ena
) ||
192 G_0286CC_LINEAR_SAMPLE_ENA(spi_ps_input_ena
) ||
193 G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena
) ||
194 G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena
) ||
195 G_0286CC_LINE_STIPPLE_TEX_ENA(spi_ps_input_ena
));
197 si_pm4_set_reg(pm4
, R_0286CC_SPI_PS_INPUT_ENA
, spi_ps_input_ena
);
198 si_pm4_set_reg(pm4
, R_0286D0_SPI_PS_INPUT_ADDR
, spi_ps_input_ena
);
199 si_pm4_set_reg(pm4
, R_0286D8_SPI_PS_IN_CONTROL
, spi_ps_in_control
);
201 if (G_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(db_shader_control
))
202 spi_shader_z_format
= V_028710_SPI_SHADER_32_GR
;
203 else if (G_02880C_Z_EXPORT_ENABLE(db_shader_control
))
204 spi_shader_z_format
= V_028710_SPI_SHADER_32_R
;
206 spi_shader_z_format
= 0;
207 si_pm4_set_reg(pm4
, R_028710_SPI_SHADER_Z_FORMAT
, spi_shader_z_format
);
208 si_pm4_set_reg(pm4
, R_028714_SPI_SHADER_COL_FORMAT
,
209 shader
->spi_shader_col_format
);
210 si_pm4_set_reg(pm4
, R_02823C_CB_SHADER_MASK
, shader
->cb_shader_mask
);
212 va
= r600_resource_va(ctx
->screen
, (void *)shader
->bo
);
213 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
);
214 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
215 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, va
>> 40);
217 num_user_sgprs
= SI_PS_NUM_USER_SGPR
;
218 num_sgprs
= shader
->num_sgprs
;
219 /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
220 if ((num_user_sgprs
+ 1) > num_sgprs
) {
221 /* Last 2 reserved SGPRs are used for VCC */
222 num_sgprs
= num_user_sgprs
+ 1 + 2;
224 assert(num_sgprs
<= 104);
226 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
227 S_00B028_VGPRS((shader
->num_vgprs
- 1) / 4) |
228 S_00B028_SGPRS((num_sgprs
- 1) / 8));
229 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
230 S_00B02C_EXTRA_LDS_SIZE(shader
->lds_size
) |
231 S_00B02C_USER_SGPR(num_user_sgprs
));
232 if (rctx
->b
.chip_class
>= CIK
) {
233 si_pm4_set_reg(pm4
, R_00B01C_SPI_SHADER_PGM_RSRC3_PS
,
234 S_00B01C_CU_EN(0xffff));
237 si_pm4_set_reg(pm4
, R_02880C_DB_SHADER_CONTROL
, db_shader_control
);
239 shader
->cb0_is_integer
= rctx
->fb_cb0_is_integer
;
240 shader
->sprite_coord_enable
= rctx
->sprite_coord_enable
;
241 si_pm4_bind_state(rctx
, ps
, shader
->pm4
);
242 rctx
->b
.flags
|= R600_CONTEXT_INV_SHADER_CACHE
;
249 static unsigned si_conv_pipe_prim(unsigned pprim
)
251 static const unsigned prim_conv
[] = {
252 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
253 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
254 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
255 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
256 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
257 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
258 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
259 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
260 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
261 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
262 [PIPE_PRIM_LINES_ADJACENCY
] = ~0,
263 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = ~0,
264 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = ~0,
265 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = ~0
267 unsigned result
= prim_conv
[pprim
];
269 R600_ERR("unsupported primitive type %d\n", pprim
);
274 static unsigned r600_conv_prim_to_gs_out(unsigned mode
)
276 static const int prim_conv
[] = {
277 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
278 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
279 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
280 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
281 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
282 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
283 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
284 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
285 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
286 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
287 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
288 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
289 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
290 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
292 assert(mode
< Elements(prim_conv
));
294 return prim_conv
[mode
];
297 static bool si_update_draw_info_state(struct r600_context
*rctx
,
298 const struct pipe_draw_info
*info
,
299 const struct pipe_index_buffer
*ib
)
301 struct si_pm4_state
*pm4
= si_pm4_alloc_state(rctx
);
302 struct si_shader
*vs
= &rctx
->vs_shader
->current
->shader
;
303 unsigned prim
= si_conv_pipe_prim(info
->mode
);
304 unsigned gs_out_prim
= r600_conv_prim_to_gs_out(info
->mode
);
305 unsigned ls_mask
= 0;
315 if (rctx
->b
.chip_class
>= CIK
) {
316 struct si_state_rasterizer
*rs
= rctx
->queued
.named
.rasterizer
;
317 bool wd_switch_on_eop
= prim
== V_008958_DI_PT_POLYGON
||
318 prim
== V_008958_DI_PT_LINELOOP
||
319 prim
== V_008958_DI_PT_TRIFAN
||
320 prim
== V_008958_DI_PT_TRISTRIP_ADJ
||
321 info
->primitive_restart
||
322 (rs
? rs
->line_stipple_enable
: false);
323 /* If the WD switch is false, the IA switch must be false too. */
324 bool ia_switch_on_eop
= wd_switch_on_eop
;
326 si_pm4_set_reg(pm4
, R_028AA8_IA_MULTI_VGT_PARAM
,
327 S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
328 S_028AA8_PARTIAL_VS_WAVE_ON(1) |
329 S_028AA8_PRIMGROUP_SIZE(63) |
330 S_028AA8_WD_SWITCH_ON_EOP(wd_switch_on_eop
));
331 si_pm4_set_reg(pm4
, R_028B74_VGT_DISPATCH_DRAW_INDEX
,
332 ib
->index_size
== 4 ? 0xFC000000 : 0xFC00);
334 si_pm4_set_reg(pm4
, R_030908_VGT_PRIMITIVE_TYPE
, prim
);
336 si_pm4_set_reg(pm4
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
339 si_pm4_set_reg(pm4
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out_prim
);
340 si_pm4_set_reg(pm4
, R_028400_VGT_MAX_VTX_INDX
, ~0);
341 si_pm4_set_reg(pm4
, R_028404_VGT_MIN_VTX_INDX
, 0);
342 si_pm4_set_reg(pm4
, R_028408_VGT_INDX_OFFSET
,
343 info
->indexed
? info
->index_bias
: info
->start
);
344 si_pm4_set_reg(pm4
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
, info
->restart_index
);
345 si_pm4_set_reg(pm4
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, info
->primitive_restart
);
346 si_pm4_set_reg(pm4
, R_00B130_SPI_SHADER_USER_DATA_VS_0
+ SI_SGPR_START_INSTANCE
* 4,
347 info
->start_instance
);
349 if (prim
== V_008958_DI_PT_LINELIST
)
351 else if (prim
== V_008958_DI_PT_LINESTRIP
)
353 si_pm4_set_reg(pm4
, R_028A0C_PA_SC_LINE_STIPPLE
,
354 S_028A0C_AUTO_RESET_CNTL(ls_mask
) |
355 rctx
->pa_sc_line_stipple
);
357 if (info
->mode
== PIPE_PRIM_QUADS
|| info
->mode
== PIPE_PRIM_QUAD_STRIP
|| info
->mode
== PIPE_PRIM_POLYGON
) {
358 si_pm4_set_reg(pm4
, R_028814_PA_SU_SC_MODE_CNTL
,
359 S_028814_PROVOKING_VTX_LAST(1) | rctx
->pa_su_sc_mode_cntl
);
361 si_pm4_set_reg(pm4
, R_028814_PA_SU_SC_MODE_CNTL
, rctx
->pa_su_sc_mode_cntl
);
363 si_pm4_set_reg(pm4
, R_02881C_PA_CL_VS_OUT_CNTL
,
364 S_02881C_USE_VTX_POINT_SIZE(vs
->vs_out_point_size
) |
365 S_02881C_USE_VTX_EDGE_FLAG(vs
->vs_out_edgeflag
) |
366 S_02881C_USE_VTX_RENDER_TARGET_INDX(vs
->vs_out_layer
) |
367 S_02881C_VS_OUT_CCDIST0_VEC_ENA((vs
->clip_dist_write
& 0x0F) != 0) |
368 S_02881C_VS_OUT_CCDIST1_VEC_ENA((vs
->clip_dist_write
& 0xF0) != 0) |
369 S_02881C_VS_OUT_MISC_VEC_ENA(vs
->vs_out_misc_write
) |
370 (rctx
->queued
.named
.rasterizer
->clip_plane_enable
&
371 vs
->clip_dist_write
));
372 si_pm4_set_reg(pm4
, R_028810_PA_CL_CLIP_CNTL
,
373 rctx
->queued
.named
.rasterizer
->pa_cl_clip_cntl
|
374 (vs
->clip_dist_write
? 0 :
375 rctx
->queued
.named
.rasterizer
->clip_plane_enable
& 0x3F));
377 si_pm4_set_state(rctx
, draw_info
, pm4
);
381 static void si_update_spi_map(struct r600_context
*rctx
)
383 struct si_shader
*ps
= &rctx
->ps_shader
->current
->shader
;
384 struct si_shader
*vs
= &rctx
->vs_shader
->current
->shader
;
385 struct si_pm4_state
*pm4
= si_pm4_alloc_state(rctx
);
388 for (i
= 0; i
< ps
->ninput
; i
++) {
389 unsigned name
= ps
->input
[i
].name
;
390 unsigned param_offset
= ps
->input
[i
].param_offset
;
392 if (name
== TGSI_SEMANTIC_POSITION
)
393 /* Read from preloaded VGPRs, not parameters */
399 if (ps
->input
[i
].interpolate
== TGSI_INTERPOLATE_CONSTANT
||
400 (ps
->input
[i
].interpolate
== TGSI_INTERPOLATE_COLOR
&&
401 rctx
->ps_shader
->current
->key
.ps
.flatshade
)) {
402 tmp
|= S_028644_FLAT_SHADE(1);
405 if (name
== TGSI_SEMANTIC_GENERIC
&&
406 rctx
->sprite_coord_enable
& (1 << ps
->input
[i
].sid
)) {
407 tmp
|= S_028644_PT_SPRITE_TEX(1);
410 for (j
= 0; j
< vs
->noutput
; j
++) {
411 if (name
== vs
->output
[j
].name
&&
412 ps
->input
[i
].sid
== vs
->output
[j
].sid
) {
413 tmp
|= S_028644_OFFSET(vs
->output
[j
].param_offset
);
418 if (j
== vs
->noutput
) {
419 /* No corresponding output found, load defaults into input */
420 tmp
|= S_028644_OFFSET(0x20);
424 R_028644_SPI_PS_INPUT_CNTL_0
+ param_offset
* 4,
427 if (name
== TGSI_SEMANTIC_COLOR
&&
428 rctx
->ps_shader
->current
->key
.ps
.color_two_side
) {
429 name
= TGSI_SEMANTIC_BCOLOR
;
435 si_pm4_set_state(rctx
, spi
, pm4
);
438 static void si_update_derived_state(struct r600_context
*rctx
)
440 struct pipe_context
* ctx
= (struct pipe_context
*)rctx
;
441 unsigned vs_dirty
= 0, ps_dirty
= 0;
443 if (!rctx
->blitter
->running
) {
444 /* Flush depth textures which need to be flushed. */
445 for (int i
= 0; i
< SI_NUM_SHADERS
; i
++) {
446 if (rctx
->samplers
[i
].depth_texture_mask
) {
447 si_flush_depth_textures(rctx
, &rctx
->samplers
[i
]);
449 if (rctx
->samplers
[i
].compressed_colortex_mask
) {
450 r600_decompress_color_textures(rctx
, &rctx
->samplers
[i
]);
455 si_shader_select(ctx
, rctx
->vs_shader
, &vs_dirty
);
457 if (!rctx
->vs_shader
->current
->pm4
) {
458 si_pipe_shader_vs(ctx
, rctx
->vs_shader
->current
);
463 si_pm4_bind_state(rctx
, vs
, rctx
->vs_shader
->current
->pm4
);
467 si_shader_select(ctx
, rctx
->ps_shader
, &ps_dirty
);
469 if (!rctx
->ps_shader
->current
->pm4
) {
470 si_pipe_shader_ps(ctx
, rctx
->ps_shader
->current
);
473 if (rctx
->ps_shader
->current
->cb0_is_integer
!= rctx
->fb_cb0_is_integer
) {
474 si_pipe_shader_ps(ctx
, rctx
->ps_shader
->current
);
479 si_pm4_bind_state(rctx
, ps
, rctx
->ps_shader
->current
->pm4
);
482 if (si_pm4_state_changed(rctx
, ps
) || si_pm4_state_changed(rctx
, vs
)) {
483 /* XXX: Emitting the PS state even when only the VS changed
484 * fixes random failures with piglit glsl-max-varyings.
487 rctx
->emitted
.named
.ps
= NULL
;
488 si_update_spi_map(rctx
);
492 static void si_vertex_buffer_update(struct r600_context
*rctx
)
494 struct pipe_context
*ctx
= &rctx
->b
.b
;
495 struct si_pm4_state
*pm4
= si_pm4_alloc_state(rctx
);
496 bool bound
[PIPE_MAX_ATTRIBS
] = {};
500 rctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
;
502 count
= rctx
->vertex_elements
->count
;
503 assert(count
<= 256 / 4);
505 si_pm4_sh_data_begin(pm4
);
506 for (i
= 0 ; i
< count
; i
++) {
507 struct pipe_vertex_element
*ve
= &rctx
->vertex_elements
->elements
[i
];
508 struct pipe_vertex_buffer
*vb
;
509 struct r600_resource
*rbuffer
;
512 if (ve
->vertex_buffer_index
>= rctx
->nr_vertex_buffers
)
515 vb
= &rctx
->vertex_buffer
[ve
->vertex_buffer_index
];
516 rbuffer
= (struct r600_resource
*)vb
->buffer
;
521 offset
+= vb
->buffer_offset
;
522 offset
+= ve
->src_offset
;
524 va
= r600_resource_va(ctx
->screen
, (void*)rbuffer
);
527 /* Fill in T# buffer resource description */
528 si_pm4_sh_data_add(pm4
, va
& 0xFFFFFFFF);
529 si_pm4_sh_data_add(pm4
, (S_008F04_BASE_ADDRESS_HI(va
>> 32) |
530 S_008F04_STRIDE(vb
->stride
)));
532 /* Round up by rounding down and adding 1 */
533 si_pm4_sh_data_add(pm4
,
534 (vb
->buffer
->width0
- offset
-
535 util_format_get_blocksize(ve
->src_format
)) /
538 si_pm4_sh_data_add(pm4
, vb
->buffer
->width0
- offset
);
539 si_pm4_sh_data_add(pm4
, rctx
->vertex_elements
->rsrc_word3
[i
]);
541 if (!bound
[ve
->vertex_buffer_index
]) {
542 si_pm4_add_bo(pm4
, rbuffer
, RADEON_USAGE_READ
);
543 bound
[ve
->vertex_buffer_index
] = true;
546 si_pm4_sh_data_end(pm4
, R_00B130_SPI_SHADER_USER_DATA_VS_0
, SI_SGPR_VERTEX_BUFFER
);
547 si_pm4_set_state(rctx
, vertex_buffers
, pm4
);
550 static void si_state_draw(struct r600_context
*rctx
,
551 const struct pipe_draw_info
*info
,
552 const struct pipe_index_buffer
*ib
)
554 struct si_pm4_state
*pm4
= si_pm4_alloc_state(rctx
);
559 /* queries need some special values
560 * (this is non-zero if any query is active) */
561 if (rctx
->num_cs_dw_nontimer_queries_suspend
) {
562 if (rctx
->b
.chip_class
>= CIK
) {
563 si_pm4_set_reg(pm4
, R_028004_DB_COUNT_CONTROL
,
564 S_028004_PERFECT_ZPASS_COUNTS(1) |
565 S_028004_SAMPLE_RATE(rctx
->fb_log_samples
) |
566 S_028004_ZPASS_ENABLE(1) |
567 S_028004_SLICE_EVEN_ENABLE(1) |
568 S_028004_SLICE_ODD_ENABLE(1));
570 si_pm4_set_reg(pm4
, R_028004_DB_COUNT_CONTROL
,
571 S_028004_PERFECT_ZPASS_COUNTS(1) |
572 S_028004_SAMPLE_RATE(rctx
->fb_log_samples
));
576 if (info
->count_from_stream_output
) {
577 struct r600_so_target
*t
=
578 (struct r600_so_target
*)info
->count_from_stream_output
;
579 uint64_t va
= r600_resource_va(&rctx
->screen
->b
.b
,
580 &t
->buf_filled_size
->b
.b
);
581 va
+= t
->buf_filled_size_offset
;
583 si_pm4_set_reg(pm4
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
,
586 si_pm4_cmd_begin(pm4
, PKT3_COPY_DATA
);
588 COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
589 COPY_DATA_DST_SEL(COPY_DATA_REG
) |
590 COPY_DATA_WR_CONFIRM
);
591 si_pm4_cmd_add(pm4
, va
); /* src address lo */
592 si_pm4_cmd_add(pm4
, va
>> 32UL); /* src address hi */
593 si_pm4_cmd_add(pm4
, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2);
594 si_pm4_cmd_add(pm4
, 0); /* unused */
595 si_pm4_add_bo(pm4
, t
->buf_filled_size
, RADEON_USAGE_READ
);
596 si_pm4_cmd_end(pm4
, true);
600 si_pm4_cmd_begin(pm4
, PKT3_INDEX_TYPE
);
601 if (ib
->index_size
== 4) {
602 si_pm4_cmd_add(pm4
, V_028A7C_VGT_INDEX_32
| (R600_BIG_ENDIAN
?
603 V_028A7C_VGT_DMA_SWAP_32_BIT
: 0));
605 si_pm4_cmd_add(pm4
, V_028A7C_VGT_INDEX_16
| (R600_BIG_ENDIAN
?
606 V_028A7C_VGT_DMA_SWAP_16_BIT
: 0));
608 si_pm4_cmd_end(pm4
, rctx
->predicate_drawing
);
610 si_pm4_cmd_begin(pm4
, PKT3_NUM_INSTANCES
);
611 si_pm4_cmd_add(pm4
, info
->instance_count
);
612 si_pm4_cmd_end(pm4
, rctx
->predicate_drawing
);
615 uint32_t max_size
= (ib
->buffer
->width0
- ib
->offset
) /
616 rctx
->index_buffer
.index_size
;
618 va
= r600_resource_va(&rctx
->screen
->b
.b
, ib
->buffer
);
621 si_pm4_add_bo(pm4
, (struct r600_resource
*)ib
->buffer
, RADEON_USAGE_READ
);
622 si_cmd_draw_index_2(pm4
, max_size
, va
, info
->count
,
623 V_0287F0_DI_SRC_SEL_DMA
,
624 rctx
->predicate_drawing
);
626 uint32_t initiator
= V_0287F0_DI_SRC_SEL_AUTO_INDEX
;
627 initiator
|= S_0287F0_USE_OPAQUE(!!info
->count_from_stream_output
);
628 si_cmd_draw_index_auto(pm4
, info
->count
, initiator
, rctx
->predicate_drawing
);
631 si_pm4_set_state(rctx
, draw
, pm4
);
632 si_update_db_draw_state(rctx
, (struct r600_surface
*)rctx
->framebuffer
.zsbuf
);
635 void si_emit_cache_flush(struct r600_common_context
*rctx
, struct r600_atom
*atom
)
637 struct radeon_winsys_cs
*cs
= rctx
->rings
.gfx
.cs
;
638 uint32_t cp_coher_cntl
= 0;
640 /* XXX SI flushes both ICACHE and KCACHE if either flag is set.
641 * XXX CIK shouldn't have this issue. Test CIK before separating the flags
642 * XXX to ensure there is no regression. Also find out if there is another
643 * XXX way to flush either ICACHE or KCACHE but not both for SI. */
644 if (rctx
->flags
& (R600_CONTEXT_INV_SHADER_CACHE
|
645 R600_CONTEXT_INV_CONST_CACHE
)) {
646 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1) |
647 S_0085F0_SH_KCACHE_ACTION_ENA(1);
649 if (rctx
->flags
& (R600_CONTEXT_INV_TEX_CACHE
|
650 R600_CONTEXT_STREAMOUT_FLUSH
)) {
651 cp_coher_cntl
|= S_0085F0_TC_ACTION_ENA(1) |
652 S_0085F0_TCL1_ACTION_ENA(1);
654 if (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_CB
) {
655 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
656 S_0085F0_CB0_DEST_BASE_ENA(1) |
657 S_0085F0_CB1_DEST_BASE_ENA(1) |
658 S_0085F0_CB2_DEST_BASE_ENA(1) |
659 S_0085F0_CB3_DEST_BASE_ENA(1) |
660 S_0085F0_CB4_DEST_BASE_ENA(1) |
661 S_0085F0_CB5_DEST_BASE_ENA(1) |
662 S_0085F0_CB6_DEST_BASE_ENA(1) |
663 S_0085F0_CB7_DEST_BASE_ENA(1);
665 if (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_DB
) {
666 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
667 S_0085F0_DB_DEST_BASE_ENA(1);
671 if (rctx
->chip_class
>= CIK
) {
672 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, 0));
673 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
674 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
675 radeon_emit(cs
, 0xff); /* CP_COHER_SIZE_HI */
676 radeon_emit(cs
, 0); /* CP_COHER_BASE */
677 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
678 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
680 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, 0));
681 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
682 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
683 radeon_emit(cs
, 0); /* CP_COHER_BASE */
684 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
688 if (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_CB_META
) {
689 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
690 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
693 if (rctx
->flags
& R600_CONTEXT_WAIT_3D_IDLE
) {
694 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
695 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
696 } else if (rctx
->flags
& R600_CONTEXT_STREAMOUT_FLUSH
) {
697 /* Needed if streamout buffers are going to be used as a source. */
698 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
699 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
705 const struct r600_atom si_atom_cache_flush
= { si_emit_cache_flush
, 11 }; /* number of CS dwords */
707 void si_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
709 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
710 struct pipe_index_buffer ib
= {};
713 if (!info
->count
&& (info
->indexed
|| !info
->count_from_stream_output
))
716 if (!rctx
->ps_shader
|| !rctx
->vs_shader
)
719 si_update_derived_state(rctx
);
720 si_vertex_buffer_update(rctx
);
723 /* Initialize the index buffer struct. */
724 pipe_resource_reference(&ib
.buffer
, rctx
->index_buffer
.buffer
);
725 ib
.user_buffer
= rctx
->index_buffer
.user_buffer
;
726 ib
.index_size
= rctx
->index_buffer
.index_size
;
727 ib
.offset
= rctx
->index_buffer
.offset
+ info
->start
* ib
.index_size
;
729 /* Translate or upload, if needed. */
730 r600_translate_index_buffer(rctx
, &ib
, info
->count
);
732 if (ib
.user_buffer
&& !ib
.buffer
) {
733 r600_upload_index_buffer(rctx
, &ib
, info
->count
);
737 if (!si_update_draw_info_state(rctx
, info
, &ib
))
740 si_state_draw(rctx
, info
, &ib
);
742 rctx
->pm4_dirty_cdwords
+= si_pm4_dirty_dw(rctx
);
744 /* Check flush flags. */
746 rctx
->atoms
.cache_flush
->dirty
= true;
748 si_need_cs_space(rctx
, 0, TRUE
);
751 for (i
= 0; i
< SI_NUM_ATOMS(rctx
); i
++) {
752 if (rctx
->atoms
.array
[i
]->dirty
) {
753 rctx
->atoms
.array
[i
]->emit(&rctx
->b
, rctx
->atoms
.array
[i
]);
754 rctx
->atoms
.array
[i
]->dirty
= false;
758 si_pm4_emit_dirty(rctx
);
759 rctx
->pm4_dirty_cdwords
= 0;
762 if (rctx
->screen
->trace_bo
) {
763 r600_trace_emit(rctx
);
767 /* Set the depth buffer as dirty. */
768 if (rctx
->framebuffer
.zsbuf
) {
769 struct pipe_surface
*surf
= rctx
->framebuffer
.zsbuf
;
770 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
772 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
774 if (rctx
->fb_compressed_cb_mask
) {
775 struct pipe_surface
*surf
;
776 struct r600_texture
*rtex
;
777 unsigned mask
= rctx
->fb_compressed_cb_mask
;
780 unsigned i
= u_bit_scan(&mask
);
781 surf
= rctx
->framebuffer
.cbufs
[i
];
782 rtex
= (struct r600_texture
*)surf
->texture
;
784 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
788 pipe_resource_reference(&ib
.buffer
, NULL
);