2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
27 #include "util/u_memory.h"
28 #include "util/u_framebuffer.h"
29 #include "util/u_blitter.h"
30 #include "tgsi/tgsi_parse.h"
31 #include "radeonsi_pipe.h"
32 #include "radeonsi_shader.h"
34 #include "../radeon/r600_cs.h"
41 static void si_pipe_shader_vs(struct pipe_context
*ctx
, struct si_pipe_shader
*shader
)
43 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
44 struct si_pm4_state
*pm4
;
45 unsigned num_sgprs
, num_user_sgprs
;
46 unsigned nparams
, i
, vgpr_comp_cnt
;
49 si_pm4_delete_state(rctx
, vs
, shader
->pm4
);
50 pm4
= shader
->pm4
= si_pm4_alloc_state(rctx
);
55 /* Certain attributes (position, psize, etc.) don't count as params.
56 * VS is required to export at least one param and r600_shader_from_tgsi()
57 * takes care of adding a dummy export.
59 for (nparams
= 0, i
= 0 ; i
< shader
->shader
.noutput
; i
++) {
60 switch (shader
->shader
.output
[i
].name
) {
61 case TGSI_SEMANTIC_CLIPVERTEX
:
62 case TGSI_SEMANTIC_POSITION
:
63 case TGSI_SEMANTIC_PSIZE
:
72 si_pm4_set_reg(pm4
, R_0286C4_SPI_VS_OUT_CONFIG
,
73 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
75 si_pm4_set_reg(pm4
, R_02870C_SPI_SHADER_POS_FORMAT
,
76 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
77 S_02870C_POS1_EXPORT_FORMAT(shader
->shader
.nr_pos_exports
> 1 ?
78 V_02870C_SPI_SHADER_4COMP
:
79 V_02870C_SPI_SHADER_NONE
) |
80 S_02870C_POS2_EXPORT_FORMAT(shader
->shader
.nr_pos_exports
> 2 ?
81 V_02870C_SPI_SHADER_4COMP
:
82 V_02870C_SPI_SHADER_NONE
) |
83 S_02870C_POS3_EXPORT_FORMAT(shader
->shader
.nr_pos_exports
> 3 ?
84 V_02870C_SPI_SHADER_4COMP
:
85 V_02870C_SPI_SHADER_NONE
));
87 va
= r600_resource_va(ctx
->screen
, (void *)shader
->bo
);
88 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
);
89 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
90 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, va
>> 40);
92 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
93 num_sgprs
= shader
->num_sgprs
;
94 if (num_user_sgprs
> num_sgprs
) {
95 /* Last 2 reserved SGPRs are used for VCC */
96 num_sgprs
= num_user_sgprs
+ 2;
98 assert(num_sgprs
<= 104);
100 vgpr_comp_cnt
= shader
->shader
.uses_instanceid
? 3 : 0;
102 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
103 S_00B128_VGPRS((shader
->num_vgprs
- 1) / 4) |
104 S_00B128_SGPRS((num_sgprs
- 1) / 8) |
105 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
));
106 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
107 S_00B12C_USER_SGPR(num_user_sgprs
));
109 if (rctx
->b
.chip_class
>= CIK
) {
110 si_pm4_set_reg(pm4
, R_00B118_SPI_SHADER_PGM_RSRC3_VS
,
111 S_00B118_CU_EN(0xffff));
112 si_pm4_set_reg(pm4
, R_00B11C_SPI_SHADER_LATE_ALLOC_VS
,
116 si_pm4_bind_state(rctx
, vs
, shader
->pm4
);
117 rctx
->b
.flags
|= R600_CONTEXT_INV_SHADER_CACHE
;
120 static void si_pipe_shader_ps(struct pipe_context
*ctx
, struct si_pipe_shader
*shader
)
122 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
123 struct si_pm4_state
*pm4
;
124 unsigned i
, exports_ps
, num_cout
, spi_ps_in_control
, db_shader_control
;
125 unsigned num_sgprs
, num_user_sgprs
;
126 unsigned spi_baryc_cntl
= 0, spi_ps_input_ena
, spi_shader_z_format
;
129 si_pm4_delete_state(rctx
, ps
, shader
->pm4
);
130 pm4
= shader
->pm4
= si_pm4_alloc_state(rctx
);
135 db_shader_control
= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z
) |
136 S_02880C_ALPHA_TO_MASK_DISABLE(rctx
->fb_cb0_is_integer
);
138 for (i
= 0; i
< shader
->shader
.ninput
; i
++) {
139 switch (shader
->shader
.input
[i
].name
) {
140 case TGSI_SEMANTIC_POSITION
:
141 if (shader
->shader
.input
[i
].centroid
) {
142 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
144 * 0 -> Position = pixel center (default)
145 * 1 -> Position = pixel centroid
146 * 2 -> Position = iterated sample number XXX:
147 * What does this mean?
149 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(1);
152 case TGSI_SEMANTIC_FACE
:
157 for (i
= 0; i
< shader
->shader
.noutput
; i
++) {
158 if (shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_POSITION
)
159 db_shader_control
|= S_02880C_Z_EXPORT_ENABLE(1);
160 if (shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_STENCIL
)
161 db_shader_control
|= S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(1);
163 if (shader
->shader
.uses_kill
|| shader
->key
.ps
.alpha_func
!= PIPE_FUNC_ALWAYS
)
164 db_shader_control
|= S_02880C_KILL_ENABLE(1);
168 for (i
= 0; i
< shader
->shader
.noutput
; i
++) {
169 if (shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_POSITION
||
170 shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_STENCIL
)
172 else if (shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_COLOR
) {
173 if (shader
->shader
.fs_write_all
)
174 num_cout
= shader
->shader
.nr_cbufs
;
180 /* always at least export 1 component per pixel */
184 spi_ps_in_control
= S_0286D8_NUM_INTERP(shader
->shader
.ninterp
) |
185 S_0286D8_BC_OPTIMIZE_DISABLE(1);
187 si_pm4_set_reg(pm4
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
188 spi_ps_input_ena
= shader
->spi_ps_input_ena
;
189 /* we need to enable at least one of them, otherwise we hang the GPU */
190 assert(G_0286CC_PERSP_SAMPLE_ENA(spi_ps_input_ena
) ||
191 G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena
) ||
192 G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena
) ||
193 G_0286CC_PERSP_PULL_MODEL_ENA(spi_ps_input_ena
) ||
194 G_0286CC_LINEAR_SAMPLE_ENA(spi_ps_input_ena
) ||
195 G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena
) ||
196 G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena
) ||
197 G_0286CC_LINE_STIPPLE_TEX_ENA(spi_ps_input_ena
));
199 si_pm4_set_reg(pm4
, R_0286CC_SPI_PS_INPUT_ENA
, spi_ps_input_ena
);
200 si_pm4_set_reg(pm4
, R_0286D0_SPI_PS_INPUT_ADDR
, spi_ps_input_ena
);
201 si_pm4_set_reg(pm4
, R_0286D8_SPI_PS_IN_CONTROL
, spi_ps_in_control
);
203 if (G_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(db_shader_control
))
204 spi_shader_z_format
= V_028710_SPI_SHADER_32_GR
;
205 else if (G_02880C_Z_EXPORT_ENABLE(db_shader_control
))
206 spi_shader_z_format
= V_028710_SPI_SHADER_32_R
;
208 spi_shader_z_format
= 0;
209 si_pm4_set_reg(pm4
, R_028710_SPI_SHADER_Z_FORMAT
, spi_shader_z_format
);
210 si_pm4_set_reg(pm4
, R_028714_SPI_SHADER_COL_FORMAT
,
211 shader
->spi_shader_col_format
);
212 si_pm4_set_reg(pm4
, R_02823C_CB_SHADER_MASK
, shader
->cb_shader_mask
);
214 va
= r600_resource_va(ctx
->screen
, (void *)shader
->bo
);
215 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
);
216 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
217 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, va
>> 40);
219 num_user_sgprs
= SI_PS_NUM_USER_SGPR
;
220 num_sgprs
= shader
->num_sgprs
;
221 /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
222 if ((num_user_sgprs
+ 1) > num_sgprs
) {
223 /* Last 2 reserved SGPRs are used for VCC */
224 num_sgprs
= num_user_sgprs
+ 1 + 2;
226 assert(num_sgprs
<= 104);
228 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
229 S_00B028_VGPRS((shader
->num_vgprs
- 1) / 4) |
230 S_00B028_SGPRS((num_sgprs
- 1) / 8));
231 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
232 S_00B02C_EXTRA_LDS_SIZE(shader
->lds_size
) |
233 S_00B02C_USER_SGPR(num_user_sgprs
));
234 if (rctx
->b
.chip_class
>= CIK
) {
235 si_pm4_set_reg(pm4
, R_00B01C_SPI_SHADER_PGM_RSRC3_PS
,
236 S_00B01C_CU_EN(0xffff));
239 si_pm4_set_reg(pm4
, R_02880C_DB_SHADER_CONTROL
, db_shader_control
);
241 shader
->cb0_is_integer
= rctx
->fb_cb0_is_integer
;
242 shader
->sprite_coord_enable
= rctx
->sprite_coord_enable
;
243 si_pm4_bind_state(rctx
, ps
, shader
->pm4
);
244 rctx
->b
.flags
|= R600_CONTEXT_INV_SHADER_CACHE
;
251 static unsigned si_conv_pipe_prim(unsigned pprim
)
253 static const unsigned prim_conv
[] = {
254 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
255 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
256 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
257 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
258 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
259 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
260 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
261 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
262 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
263 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
264 [PIPE_PRIM_LINES_ADJACENCY
] = ~0,
265 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = ~0,
266 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = ~0,
267 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = ~0
269 unsigned result
= prim_conv
[pprim
];
271 R600_ERR("unsupported primitive type %d\n", pprim
);
276 static bool si_update_draw_info_state(struct r600_context
*rctx
,
277 const struct pipe_draw_info
*info
)
279 struct si_pm4_state
*pm4
= si_pm4_alloc_state(rctx
);
280 struct si_shader
*vs
= &rctx
->vs_shader
->current
->shader
;
281 unsigned prim
= si_conv_pipe_prim(info
->mode
);
282 unsigned ls_mask
= 0;
292 if (rctx
->b
.chip_class
>= CIK
)
293 si_pm4_set_reg(pm4
, R_030908_VGT_PRIMITIVE_TYPE
, prim
);
295 si_pm4_set_reg(pm4
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
296 si_pm4_set_reg(pm4
, R_028400_VGT_MAX_VTX_INDX
, ~0);
297 si_pm4_set_reg(pm4
, R_028404_VGT_MIN_VTX_INDX
, 0);
298 si_pm4_set_reg(pm4
, R_028408_VGT_INDX_OFFSET
,
299 info
->indexed
? info
->index_bias
: info
->start
);
300 si_pm4_set_reg(pm4
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
, info
->restart_index
);
301 si_pm4_set_reg(pm4
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, info
->primitive_restart
);
302 si_pm4_set_reg(pm4
, R_00B130_SPI_SHADER_USER_DATA_VS_0
+ SI_SGPR_START_INSTANCE
* 4,
303 info
->start_instance
);
305 if (prim
== V_008958_DI_PT_LINELIST
)
307 else if (prim
== V_008958_DI_PT_LINESTRIP
)
309 si_pm4_set_reg(pm4
, R_028A0C_PA_SC_LINE_STIPPLE
,
310 S_028A0C_AUTO_RESET_CNTL(ls_mask
) |
311 rctx
->pa_sc_line_stipple
);
313 if (info
->mode
== PIPE_PRIM_QUADS
|| info
->mode
== PIPE_PRIM_QUAD_STRIP
|| info
->mode
== PIPE_PRIM_POLYGON
) {
314 si_pm4_set_reg(pm4
, R_028814_PA_SU_SC_MODE_CNTL
,
315 S_028814_PROVOKING_VTX_LAST(1) | rctx
->pa_su_sc_mode_cntl
);
317 si_pm4_set_reg(pm4
, R_028814_PA_SU_SC_MODE_CNTL
, rctx
->pa_su_sc_mode_cntl
);
319 si_pm4_set_reg(pm4
, R_02881C_PA_CL_VS_OUT_CNTL
,
320 S_02881C_USE_VTX_POINT_SIZE(vs
->vs_out_point_size
) |
321 S_02881C_VS_OUT_CCDIST0_VEC_ENA((vs
->clip_dist_write
& 0x0F) != 0) |
322 S_02881C_VS_OUT_CCDIST1_VEC_ENA((vs
->clip_dist_write
& 0xF0) != 0) |
323 S_02881C_VS_OUT_MISC_VEC_ENA(vs
->vs_out_misc_write
) |
324 (rctx
->queued
.named
.rasterizer
->clip_plane_enable
&
325 vs
->clip_dist_write
));
326 si_pm4_set_reg(pm4
, R_028810_PA_CL_CLIP_CNTL
,
327 rctx
->queued
.named
.rasterizer
->pa_cl_clip_cntl
|
328 (vs
->clip_dist_write
? 0 :
329 rctx
->queued
.named
.rasterizer
->clip_plane_enable
& 0x3F));
331 si_pm4_set_state(rctx
, draw_info
, pm4
);
335 static void si_update_spi_map(struct r600_context
*rctx
)
337 struct si_shader
*ps
= &rctx
->ps_shader
->current
->shader
;
338 struct si_shader
*vs
= &rctx
->vs_shader
->current
->shader
;
339 struct si_pm4_state
*pm4
= si_pm4_alloc_state(rctx
);
342 for (i
= 0; i
< ps
->ninput
; i
++) {
343 unsigned name
= ps
->input
[i
].name
;
344 unsigned param_offset
= ps
->input
[i
].param_offset
;
346 if (name
== TGSI_SEMANTIC_POSITION
)
347 /* Read from preloaded VGPRs, not parameters */
353 if (ps
->input
[i
].interpolate
== TGSI_INTERPOLATE_CONSTANT
||
354 (ps
->input
[i
].interpolate
== TGSI_INTERPOLATE_COLOR
&&
355 rctx
->ps_shader
->current
->key
.ps
.flatshade
)) {
356 tmp
|= S_028644_FLAT_SHADE(1);
359 if (name
== TGSI_SEMANTIC_GENERIC
&&
360 rctx
->sprite_coord_enable
& (1 << ps
->input
[i
].sid
)) {
361 tmp
|= S_028644_PT_SPRITE_TEX(1);
364 for (j
= 0; j
< vs
->noutput
; j
++) {
365 if (name
== vs
->output
[j
].name
&&
366 ps
->input
[i
].sid
== vs
->output
[j
].sid
) {
367 tmp
|= S_028644_OFFSET(vs
->output
[j
].param_offset
);
372 if (j
== vs
->noutput
) {
373 /* No corresponding output found, load defaults into input */
374 tmp
|= S_028644_OFFSET(0x20);
378 R_028644_SPI_PS_INPUT_CNTL_0
+ param_offset
* 4,
381 if (name
== TGSI_SEMANTIC_COLOR
&&
382 rctx
->ps_shader
->current
->key
.ps
.color_two_side
) {
383 name
= TGSI_SEMANTIC_BCOLOR
;
389 si_pm4_set_state(rctx
, spi
, pm4
);
392 static void si_update_derived_state(struct r600_context
*rctx
)
394 struct pipe_context
* ctx
= (struct pipe_context
*)rctx
;
395 unsigned vs_dirty
= 0, ps_dirty
= 0;
397 if (!rctx
->blitter
->running
) {
398 /* Flush depth textures which need to be flushed. */
399 for (int i
= 0; i
< SI_NUM_SHADERS
; i
++) {
400 if (rctx
->samplers
[i
].depth_texture_mask
) {
401 si_flush_depth_textures(rctx
, &rctx
->samplers
[i
]);
403 if (rctx
->samplers
[i
].compressed_colortex_mask
) {
404 r600_decompress_color_textures(rctx
, &rctx
->samplers
[i
]);
409 si_shader_select(ctx
, rctx
->vs_shader
, &vs_dirty
);
411 if (!rctx
->vs_shader
->current
->pm4
) {
412 si_pipe_shader_vs(ctx
, rctx
->vs_shader
->current
);
417 si_pm4_bind_state(rctx
, vs
, rctx
->vs_shader
->current
->pm4
);
421 si_shader_select(ctx
, rctx
->ps_shader
, &ps_dirty
);
423 if (!rctx
->ps_shader
->current
->pm4
) {
424 si_pipe_shader_ps(ctx
, rctx
->ps_shader
->current
);
427 if (!rctx
->ps_shader
->current
->bo
) {
428 if (!rctx
->dummy_pixel_shader
->pm4
)
429 si_pipe_shader_ps(ctx
, rctx
->dummy_pixel_shader
);
431 si_pm4_bind_state(rctx
, vs
, rctx
->dummy_pixel_shader
->pm4
);
435 if (rctx
->ps_shader
->current
->cb0_is_integer
!= rctx
->fb_cb0_is_integer
) {
436 si_pipe_shader_ps(ctx
, rctx
->ps_shader
->current
);
441 si_pm4_bind_state(rctx
, ps
, rctx
->ps_shader
->current
->pm4
);
444 if (si_pm4_state_changed(rctx
, ps
) || si_pm4_state_changed(rctx
, vs
)) {
445 /* XXX: Emitting the PS state even when only the VS changed
446 * fixes random failures with piglit glsl-max-varyings.
449 rctx
->emitted
.named
.ps
= NULL
;
450 si_update_spi_map(rctx
);
454 static void si_vertex_buffer_update(struct r600_context
*rctx
)
456 struct pipe_context
*ctx
= &rctx
->b
.b
;
457 struct si_pm4_state
*pm4
= si_pm4_alloc_state(rctx
);
458 bool bound
[PIPE_MAX_ATTRIBS
] = {};
462 rctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
;
464 count
= rctx
->vertex_elements
->count
;
465 assert(count
<= 256 / 4);
467 si_pm4_sh_data_begin(pm4
);
468 for (i
= 0 ; i
< count
; i
++) {
469 struct pipe_vertex_element
*ve
= &rctx
->vertex_elements
->elements
[i
];
470 struct pipe_vertex_buffer
*vb
;
471 struct r600_resource
*rbuffer
;
474 if (ve
->vertex_buffer_index
>= rctx
->nr_vertex_buffers
)
477 vb
= &rctx
->vertex_buffer
[ve
->vertex_buffer_index
];
478 rbuffer
= (struct r600_resource
*)vb
->buffer
;
483 offset
+= vb
->buffer_offset
;
484 offset
+= ve
->src_offset
;
486 va
= r600_resource_va(ctx
->screen
, (void*)rbuffer
);
489 /* Fill in T# buffer resource description */
490 si_pm4_sh_data_add(pm4
, va
& 0xFFFFFFFF);
491 si_pm4_sh_data_add(pm4
, (S_008F04_BASE_ADDRESS_HI(va
>> 32) |
492 S_008F04_STRIDE(vb
->stride
)));
494 /* Round up by rounding down and adding 1 */
495 si_pm4_sh_data_add(pm4
,
496 (vb
->buffer
->width0
- offset
-
497 util_format_get_blocksize(ve
->src_format
)) /
500 si_pm4_sh_data_add(pm4
, vb
->buffer
->width0
- offset
);
501 si_pm4_sh_data_add(pm4
, rctx
->vertex_elements
->rsrc_word3
[i
]);
503 if (!bound
[ve
->vertex_buffer_index
]) {
504 si_pm4_add_bo(pm4
, rbuffer
, RADEON_USAGE_READ
);
505 bound
[ve
->vertex_buffer_index
] = true;
508 si_pm4_sh_data_end(pm4
, R_00B130_SPI_SHADER_USER_DATA_VS_0
, SI_SGPR_VERTEX_BUFFER
);
509 si_pm4_set_state(rctx
, vertex_buffers
, pm4
);
512 static void si_state_draw(struct r600_context
*rctx
,
513 const struct pipe_draw_info
*info
,
514 const struct pipe_index_buffer
*ib
)
516 struct si_pm4_state
*pm4
= si_pm4_alloc_state(rctx
);
521 /* queries need some special values
522 * (this is non-zero if any query is active) */
523 if (rctx
->num_cs_dw_nontimer_queries_suspend
) {
524 struct si_state_dsa
*dsa
= rctx
->queued
.named
.dsa
;
526 si_pm4_set_reg(pm4
, R_028004_DB_COUNT_CONTROL
,
527 S_028004_PERFECT_ZPASS_COUNTS(1) |
528 S_028004_SAMPLE_RATE(rctx
->fb_log_samples
));
529 si_pm4_set_reg(pm4
, R_02800C_DB_RENDER_OVERRIDE
,
530 dsa
->db_render_override
|
531 S_02800C_NOOP_CULL_DISABLE(1));
535 si_pm4_cmd_begin(pm4
, PKT3_INDEX_TYPE
);
536 if (ib
->index_size
== 4) {
537 si_pm4_cmd_add(pm4
, V_028A7C_VGT_INDEX_32
| (R600_BIG_ENDIAN
?
538 V_028A7C_VGT_DMA_SWAP_32_BIT
: 0));
540 si_pm4_cmd_add(pm4
, V_028A7C_VGT_INDEX_16
| (R600_BIG_ENDIAN
?
541 V_028A7C_VGT_DMA_SWAP_16_BIT
: 0));
543 si_pm4_cmd_end(pm4
, rctx
->predicate_drawing
);
545 si_pm4_cmd_begin(pm4
, PKT3_NUM_INSTANCES
);
546 si_pm4_cmd_add(pm4
, info
->instance_count
);
547 si_pm4_cmd_end(pm4
, rctx
->predicate_drawing
);
550 uint32_t max_size
= (ib
->buffer
->width0
- ib
->offset
) /
551 rctx
->index_buffer
.index_size
;
553 va
= r600_resource_va(&rctx
->screen
->b
.b
, ib
->buffer
);
556 si_pm4_add_bo(pm4
, (struct r600_resource
*)ib
->buffer
, RADEON_USAGE_READ
);
557 si_cmd_draw_index_2(pm4
, max_size
, va
, info
->count
,
558 V_0287F0_DI_SRC_SEL_DMA
,
559 rctx
->predicate_drawing
);
561 uint32_t initiator
= V_0287F0_DI_SRC_SEL_AUTO_INDEX
;
562 initiator
|= S_0287F0_USE_OPAQUE(!!info
->count_from_stream_output
);
563 si_cmd_draw_index_auto(pm4
, info
->count
, initiator
, rctx
->predicate_drawing
);
565 si_pm4_set_state(rctx
, draw
, pm4
);
568 void si_emit_cache_flush(struct r600_common_context
*rctx
, struct r600_atom
*atom
)
570 struct radeon_winsys_cs
*cs
= rctx
->rings
.gfx
.cs
;
571 uint32_t cp_coher_cntl
= 0;
573 /* XXX SI flushes both ICACHE and KCACHE if either flag is set.
574 * XXX CIK shouldn't have this issue. Test CIK before separating the flags
575 * XXX to ensure there is no regression. Also find out if there is another
576 * XXX way to flush either ICACHE or KCACHE but not both for SI. */
577 if (rctx
->flags
& (R600_CONTEXT_INV_SHADER_CACHE
|
578 R600_CONTEXT_INV_CONST_CACHE
)) {
579 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1) |
580 S_0085F0_SH_KCACHE_ACTION_ENA(1);
582 if (rctx
->flags
& (R600_CONTEXT_INV_TEX_CACHE
|
583 R600_CONTEXT_STREAMOUT_FLUSH
)) {
584 cp_coher_cntl
|= S_0085F0_TC_ACTION_ENA(1) |
585 S_0085F0_TCL1_ACTION_ENA(1);
587 if (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_CB
) {
588 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
589 S_0085F0_CB0_DEST_BASE_ENA(1) |
590 S_0085F0_CB1_DEST_BASE_ENA(1) |
591 S_0085F0_CB2_DEST_BASE_ENA(1) |
592 S_0085F0_CB3_DEST_BASE_ENA(1) |
593 S_0085F0_CB4_DEST_BASE_ENA(1) |
594 S_0085F0_CB5_DEST_BASE_ENA(1) |
595 S_0085F0_CB6_DEST_BASE_ENA(1) |
596 S_0085F0_CB7_DEST_BASE_ENA(1);
598 if (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_DB
) {
599 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
600 S_0085F0_DB_DEST_BASE_ENA(1);
604 if (rctx
->chip_class
>= CIK
) {
605 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, 0));
606 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
607 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
608 radeon_emit(cs
, 0xff); /* CP_COHER_SIZE_HI */
609 radeon_emit(cs
, 0); /* CP_COHER_BASE */
610 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
611 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
613 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, 0));
614 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
615 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
616 radeon_emit(cs
, 0); /* CP_COHER_BASE */
617 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
621 if (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_CB_META
) {
622 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
623 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
629 const struct r600_atom si_atom_cache_flush
= { si_emit_cache_flush
, 9 }; /* number of CS dwords */
631 void si_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
633 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
634 struct pipe_index_buffer ib
= {};
637 if (!info
->count
&& (info
->indexed
|| !info
->count_from_stream_output
))
640 if (!rctx
->ps_shader
|| !rctx
->vs_shader
)
643 si_update_derived_state(rctx
);
644 si_vertex_buffer_update(rctx
);
647 /* Initialize the index buffer struct. */
648 pipe_resource_reference(&ib
.buffer
, rctx
->index_buffer
.buffer
);
649 ib
.user_buffer
= rctx
->index_buffer
.user_buffer
;
650 ib
.index_size
= rctx
->index_buffer
.index_size
;
651 ib
.offset
= rctx
->index_buffer
.offset
+ info
->start
* ib
.index_size
;
653 /* Translate or upload, if needed. */
654 r600_translate_index_buffer(rctx
, &ib
, info
->count
);
656 if (ib
.user_buffer
&& !ib
.buffer
) {
657 r600_upload_index_buffer(rctx
, &ib
, info
->count
);
661 rctx
->vs_shader_so_strides
= rctx
->vs_shader
->current
->so_strides
;
663 if (!si_update_draw_info_state(rctx
, info
))
666 si_state_draw(rctx
, info
, &ib
);
668 rctx
->pm4_dirty_cdwords
+= si_pm4_dirty_dw(rctx
);
670 /* Check flush flags. */
672 rctx
->atoms
.cache_flush
->dirty
= true;
674 si_need_cs_space(rctx
, 0, TRUE
);
677 for (i
= 0; i
< SI_NUM_ATOMS(rctx
); i
++) {
678 if (rctx
->atoms
.array
[i
]->dirty
) {
679 rctx
->atoms
.array
[i
]->emit(&rctx
->b
, rctx
->atoms
.array
[i
]);
680 rctx
->atoms
.array
[i
]->dirty
= false;
684 si_pm4_emit_dirty(rctx
);
685 rctx
->pm4_dirty_cdwords
= 0;
688 if (rctx
->screen
->trace_bo
) {
689 r600_trace_emit(rctx
);
694 /* Enable stream out if needed. */
695 if (rctx
->streamout_start
) {
696 r600_context_streamout_begin(rctx
);
697 rctx
->streamout_start
= FALSE
;
701 /* Set the depth buffer as dirty. */
702 if (rctx
->framebuffer
.zsbuf
) {
703 struct pipe_surface
*surf
= rctx
->framebuffer
.zsbuf
;
704 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
706 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
708 if (rctx
->fb_compressed_cb_mask
) {
709 struct pipe_surface
*surf
;
710 struct r600_texture
*rtex
;
711 unsigned mask
= rctx
->fb_compressed_cb_mask
;
714 unsigned i
= u_bit_scan(&mask
);
715 surf
= rctx
->framebuffer
.cbufs
[i
];
716 rtex
= (struct r600_texture
*)surf
->texture
;
718 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
722 pipe_resource_reference(&ib
.buffer
, NULL
);