2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
28 #include "si_shader.h"
29 #include "../radeon/r600_cs.h"
32 #include "util/u_blitter.h"
33 #include "util/u_format.h"
34 #include "util/u_index_modify.h"
35 #include "util/u_memory.h"
36 #include "util/u_upload_mgr.h"
42 static void si_pipe_shader_es(struct pipe_context
*ctx
, struct si_pipe_shader
*shader
)
44 struct si_context
*sctx
= (struct si_context
*)ctx
;
45 struct si_pm4_state
*pm4
;
46 unsigned num_sgprs
, num_user_sgprs
;
47 unsigned vgpr_comp_cnt
;
50 si_pm4_delete_state(sctx
, es
, shader
->pm4
);
51 pm4
= shader
->pm4
= si_pm4_alloc_state(sctx
);
56 va
= r600_resource_va(ctx
->screen
, (void *)shader
->bo
);
57 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
);
59 vgpr_comp_cnt
= shader
->shader
.uses_instanceid
? 3 : 0;
61 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
62 num_sgprs
= shader
->num_sgprs
;
63 /* One SGPR after user SGPRs is pre-loaded with es2gs_offset */
64 if ((num_user_sgprs
+ 1) > num_sgprs
) {
65 /* Last 2 reserved SGPRs are used for VCC */
66 num_sgprs
= num_user_sgprs
+ 1 + 2;
68 assert(num_sgprs
<= 104);
70 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
71 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, va
>> 40);
72 si_pm4_set_reg(pm4
, R_00B328_SPI_SHADER_PGM_RSRC1_ES
,
73 S_00B328_VGPRS((shader
->num_vgprs
- 1) / 4) |
74 S_00B328_SGPRS((num_sgprs
- 1) / 8) |
75 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt
));
76 si_pm4_set_reg(pm4
, R_00B32C_SPI_SHADER_PGM_RSRC2_ES
,
77 S_00B32C_USER_SGPR(num_user_sgprs
));
79 si_pm4_bind_state(sctx
, es
, shader
->pm4
);
80 sctx
->b
.flags
|= R600_CONTEXT_INV_SHADER_CACHE
;
83 static void si_pipe_shader_gs(struct pipe_context
*ctx
, struct si_pipe_shader
*shader
)
85 struct si_context
*sctx
= (struct si_context
*)ctx
;
86 unsigned gs_vert_itemsize
= shader
->shader
.noutput
* (16 >> 2);
87 unsigned gs_max_vert_out
= shader
->shader
.gs_max_out_vertices
;
88 unsigned gsvs_itemsize
= gs_vert_itemsize
* gs_max_vert_out
;
90 struct si_pm4_state
*pm4
;
91 unsigned num_sgprs
, num_user_sgprs
;
94 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
95 assert(gsvs_itemsize
< (1 << 15));
97 si_pm4_delete_state(sctx
, gs
, shader
->pm4
);
98 pm4
= shader
->pm4
= si_pm4_alloc_state(sctx
);
103 if (gs_max_vert_out
<= 128) {
104 cut_mode
= V_028A40_GS_CUT_128
;
105 } else if (gs_max_vert_out
<= 256) {
106 cut_mode
= V_028A40_GS_CUT_256
;
107 } else if (gs_max_vert_out
<= 512) {
108 cut_mode
= V_028A40_GS_CUT_512
;
110 assert(gs_max_vert_out
<= 1024);
111 cut_mode
= V_028A40_GS_CUT_1024
;
114 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
,
115 S_028A40_MODE(V_028A40_GS_SCENARIO_G
) |
116 S_028A40_CUT_MODE(cut_mode
)|
117 S_028A40_ES_WRITE_OPTIMIZE(1) |
118 S_028A40_GS_WRITE_OPTIMIZE(1));
120 si_pm4_set_reg(pm4
, R_028A60_VGT_GSVS_RING_OFFSET_1
, gsvs_itemsize
);
121 si_pm4_set_reg(pm4
, R_028A64_VGT_GSVS_RING_OFFSET_2
, gsvs_itemsize
);
122 si_pm4_set_reg(pm4
, R_028A68_VGT_GSVS_RING_OFFSET_3
, gsvs_itemsize
);
124 si_pm4_set_reg(pm4
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
125 shader
->shader
.nparam
* (16 >> 2));
126 si_pm4_set_reg(pm4
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, gsvs_itemsize
);
128 si_pm4_set_reg(pm4
, R_028B38_VGT_GS_MAX_VERT_OUT
, gs_max_vert_out
);
130 si_pm4_set_reg(pm4
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, gs_vert_itemsize
);
132 va
= r600_resource_va(ctx
->screen
, (void *)shader
->bo
);
133 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
);
134 si_pm4_set_reg(pm4
, R_00B220_SPI_SHADER_PGM_LO_GS
, va
>> 8);
135 si_pm4_set_reg(pm4
, R_00B224_SPI_SHADER_PGM_HI_GS
, va
>> 40);
137 num_user_sgprs
= SI_GS_NUM_USER_SGPR
;
138 num_sgprs
= shader
->num_sgprs
;
139 /* Two SGPRs after user SGPRs are pre-loaded with gs2vs_offset, gs_wave_id */
140 if ((num_user_sgprs
+ 2) > num_sgprs
) {
141 /* Last 2 reserved SGPRs are used for VCC */
142 num_sgprs
= num_user_sgprs
+ 2 + 2;
144 assert(num_sgprs
<= 104);
146 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
147 S_00B228_VGPRS((shader
->num_vgprs
- 1) / 4) |
148 S_00B228_SGPRS((num_sgprs
- 1) / 8));
149 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
150 S_00B22C_USER_SGPR(num_user_sgprs
));
152 si_pm4_bind_state(sctx
, gs
, shader
->pm4
);
153 sctx
->b
.flags
|= R600_CONTEXT_INV_SHADER_CACHE
;
156 static void si_pipe_shader_vs(struct pipe_context
*ctx
, struct si_pipe_shader
*shader
)
158 struct si_context
*sctx
= (struct si_context
*)ctx
;
159 struct si_pm4_state
*pm4
;
160 unsigned num_sgprs
, num_user_sgprs
;
161 unsigned nparams
, i
, vgpr_comp_cnt
;
164 si_pm4_delete_state(sctx
, vs
, shader
->pm4
);
165 pm4
= shader
->pm4
= si_pm4_alloc_state(sctx
);
170 va
= r600_resource_va(ctx
->screen
, (void *)shader
->bo
);
171 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
);
173 vgpr_comp_cnt
= shader
->shader
.uses_instanceid
? 3 : 0;
175 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
176 num_sgprs
= shader
->num_sgprs
;
177 if (num_user_sgprs
> num_sgprs
) {
178 /* Last 2 reserved SGPRs are used for VCC */
179 num_sgprs
= num_user_sgprs
+ 2;
181 assert(num_sgprs
<= 104);
183 /* Certain attributes (position, psize, etc.) don't count as params.
184 * VS is required to export at least one param and r600_shader_from_tgsi()
185 * takes care of adding a dummy export.
187 for (nparams
= 0, i
= 0 ; i
< shader
->shader
.noutput
; i
++) {
188 switch (shader
->shader
.output
[i
].name
) {
189 case TGSI_SEMANTIC_CLIPVERTEX
:
190 case TGSI_SEMANTIC_POSITION
:
191 case TGSI_SEMANTIC_PSIZE
:
200 si_pm4_set_reg(pm4
, R_0286C4_SPI_VS_OUT_CONFIG
,
201 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
203 si_pm4_set_reg(pm4
, R_02870C_SPI_SHADER_POS_FORMAT
,
204 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
205 S_02870C_POS1_EXPORT_FORMAT(shader
->shader
.nr_pos_exports
> 1 ?
206 V_02870C_SPI_SHADER_4COMP
:
207 V_02870C_SPI_SHADER_NONE
) |
208 S_02870C_POS2_EXPORT_FORMAT(shader
->shader
.nr_pos_exports
> 2 ?
209 V_02870C_SPI_SHADER_4COMP
:
210 V_02870C_SPI_SHADER_NONE
) |
211 S_02870C_POS3_EXPORT_FORMAT(shader
->shader
.nr_pos_exports
> 3 ?
212 V_02870C_SPI_SHADER_4COMP
:
213 V_02870C_SPI_SHADER_NONE
));
215 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
216 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, va
>> 40);
217 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
218 S_00B128_VGPRS((shader
->num_vgprs
- 1) / 4) |
219 S_00B128_SGPRS((num_sgprs
- 1) / 8) |
220 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
));
221 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
222 S_00B12C_USER_SGPR(num_user_sgprs
) |
223 S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
224 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
225 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
226 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
227 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
));
229 si_pm4_bind_state(sctx
, vs
, shader
->pm4
);
230 sctx
->b
.flags
|= R600_CONTEXT_INV_SHADER_CACHE
;
233 static void si_pipe_shader_ps(struct pipe_context
*ctx
, struct si_pipe_shader
*shader
)
235 struct si_context
*sctx
= (struct si_context
*)ctx
;
236 struct si_pm4_state
*pm4
;
237 unsigned i
, exports_ps
, spi_ps_in_control
, db_shader_control
;
238 unsigned num_sgprs
, num_user_sgprs
;
239 unsigned spi_baryc_cntl
= 0, spi_ps_input_ena
, spi_shader_z_format
;
242 si_pm4_delete_state(sctx
, ps
, shader
->pm4
);
243 pm4
= shader
->pm4
= si_pm4_alloc_state(sctx
);
248 db_shader_control
= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z
) |
249 S_02880C_ALPHA_TO_MASK_DISABLE(sctx
->fb_cb0_is_integer
);
251 for (i
= 0; i
< shader
->shader
.ninput
; i
++) {
252 switch (shader
->shader
.input
[i
].name
) {
253 case TGSI_SEMANTIC_POSITION
:
254 if (shader
->shader
.input
[i
].centroid
) {
255 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
257 * 0 -> Position = pixel center (default)
258 * 1 -> Position = pixel centroid
259 * 2 -> Position = iterated sample number XXX:
260 * What does this mean?
262 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(1);
265 case TGSI_SEMANTIC_FACE
:
270 for (i
= 0; i
< shader
->shader
.noutput
; i
++) {
271 if (shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_POSITION
)
272 db_shader_control
|= S_02880C_Z_EXPORT_ENABLE(1);
273 if (shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_STENCIL
)
274 db_shader_control
|= S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(1);
276 if (shader
->shader
.uses_kill
|| shader
->key
.ps
.alpha_func
!= PIPE_FUNC_ALWAYS
)
277 db_shader_control
|= S_02880C_KILL_ENABLE(1);
280 for (i
= 0; i
< shader
->shader
.noutput
; i
++) {
281 if (shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_POSITION
||
282 shader
->shader
.output
[i
].name
== TGSI_SEMANTIC_STENCIL
)
286 /* always at least export 1 component per pixel */
290 spi_ps_in_control
= S_0286D8_NUM_INTERP(shader
->shader
.nparam
) |
291 S_0286D8_BC_OPTIMIZE_DISABLE(1);
293 si_pm4_set_reg(pm4
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
294 spi_ps_input_ena
= shader
->spi_ps_input_ena
;
295 /* we need to enable at least one of them, otherwise we hang the GPU */
296 assert(G_0286CC_PERSP_SAMPLE_ENA(spi_ps_input_ena
) ||
297 G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena
) ||
298 G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena
) ||
299 G_0286CC_PERSP_PULL_MODEL_ENA(spi_ps_input_ena
) ||
300 G_0286CC_LINEAR_SAMPLE_ENA(spi_ps_input_ena
) ||
301 G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena
) ||
302 G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena
) ||
303 G_0286CC_LINE_STIPPLE_TEX_ENA(spi_ps_input_ena
));
305 si_pm4_set_reg(pm4
, R_0286CC_SPI_PS_INPUT_ENA
, spi_ps_input_ena
);
306 si_pm4_set_reg(pm4
, R_0286D0_SPI_PS_INPUT_ADDR
, spi_ps_input_ena
);
307 si_pm4_set_reg(pm4
, R_0286D8_SPI_PS_IN_CONTROL
, spi_ps_in_control
);
309 if (G_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(db_shader_control
))
310 spi_shader_z_format
= V_028710_SPI_SHADER_32_GR
;
311 else if (G_02880C_Z_EXPORT_ENABLE(db_shader_control
))
312 spi_shader_z_format
= V_028710_SPI_SHADER_32_R
;
314 spi_shader_z_format
= 0;
315 si_pm4_set_reg(pm4
, R_028710_SPI_SHADER_Z_FORMAT
, spi_shader_z_format
);
316 si_pm4_set_reg(pm4
, R_028714_SPI_SHADER_COL_FORMAT
,
317 shader
->spi_shader_col_format
);
318 si_pm4_set_reg(pm4
, R_02823C_CB_SHADER_MASK
, shader
->cb_shader_mask
);
320 va
= r600_resource_va(ctx
->screen
, (void *)shader
->bo
);
321 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
);
322 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
323 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, va
>> 40);
325 num_user_sgprs
= SI_PS_NUM_USER_SGPR
;
326 num_sgprs
= shader
->num_sgprs
;
327 /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
328 if ((num_user_sgprs
+ 1) > num_sgprs
) {
329 /* Last 2 reserved SGPRs are used for VCC */
330 num_sgprs
= num_user_sgprs
+ 1 + 2;
332 assert(num_sgprs
<= 104);
334 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
335 S_00B028_VGPRS((shader
->num_vgprs
- 1) / 4) |
336 S_00B028_SGPRS((num_sgprs
- 1) / 8));
337 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
338 S_00B02C_EXTRA_LDS_SIZE(shader
->lds_size
) |
339 S_00B02C_USER_SGPR(num_user_sgprs
));
341 si_pm4_set_reg(pm4
, R_02880C_DB_SHADER_CONTROL
, db_shader_control
);
343 shader
->cb0_is_integer
= sctx
->fb_cb0_is_integer
;
344 shader
->sprite_coord_enable
= sctx
->sprite_coord_enable
;
345 si_pm4_bind_state(sctx
, ps
, shader
->pm4
);
346 sctx
->b
.flags
|= R600_CONTEXT_INV_SHADER_CACHE
;
353 static unsigned si_conv_pipe_prim(unsigned pprim
)
355 static const unsigned prim_conv
[] = {
356 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
357 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
358 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
359 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
360 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
361 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
362 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
363 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
364 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
365 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
366 [PIPE_PRIM_LINES_ADJACENCY
] = ~0,
367 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = ~0,
368 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = ~0,
369 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = ~0
371 unsigned result
= prim_conv
[pprim
];
373 R600_ERR("unsupported primitive type %d\n", pprim
);
378 static unsigned si_conv_prim_to_gs_out(unsigned mode
)
380 static const int prim_conv
[] = {
381 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
382 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
383 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
384 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
385 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
386 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
387 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
388 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
389 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
390 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
391 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
392 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
393 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
394 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
396 assert(mode
< Elements(prim_conv
));
398 return prim_conv
[mode
];
401 static bool si_update_draw_info_state(struct si_context
*sctx
,
402 const struct pipe_draw_info
*info
,
403 const struct pipe_index_buffer
*ib
)
405 struct si_pm4_state
*pm4
= si_pm4_alloc_state(sctx
);
406 struct si_shader
*vs
= &sctx
->vs_shader
->current
->shader
;
407 unsigned prim
= si_conv_pipe_prim(info
->mode
);
408 unsigned gs_out_prim
=
409 si_conv_prim_to_gs_out(sctx
->gs_shader
?
410 sctx
->gs_shader
->current
->shader
.gs_output_prim
:
412 unsigned ls_mask
= 0;
422 if (sctx
->b
.chip_class
>= CIK
) {
423 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
424 bool wd_switch_on_eop
= prim
== V_008958_DI_PT_POLYGON
||
425 prim
== V_008958_DI_PT_LINELOOP
||
426 prim
== V_008958_DI_PT_TRIFAN
||
427 prim
== V_008958_DI_PT_TRISTRIP_ADJ
||
428 info
->primitive_restart
||
429 (rs
? rs
->line_stipple_enable
: false);
430 /* If the WD switch is false, the IA switch must be false too. */
431 bool ia_switch_on_eop
= wd_switch_on_eop
;
433 si_pm4_set_reg(pm4
, R_028AA8_IA_MULTI_VGT_PARAM
,
434 S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
435 S_028AA8_PARTIAL_VS_WAVE_ON(1) |
436 S_028AA8_PRIMGROUP_SIZE(63) |
437 S_028AA8_WD_SWITCH_ON_EOP(wd_switch_on_eop
));
438 si_pm4_set_reg(pm4
, R_028B74_VGT_DISPATCH_DRAW_INDEX
,
439 ib
->index_size
== 4 ? 0xFC000000 : 0xFC00);
441 si_pm4_set_reg(pm4
, R_030908_VGT_PRIMITIVE_TYPE
, prim
);
443 si_pm4_set_reg(pm4
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
446 si_pm4_set_reg(pm4
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out_prim
);
447 si_pm4_set_reg(pm4
, R_028408_VGT_INDX_OFFSET
,
448 info
->indexed
? info
->index_bias
: info
->start
);
449 si_pm4_set_reg(pm4
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
, info
->restart_index
);
450 si_pm4_set_reg(pm4
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, info
->primitive_restart
);
451 si_pm4_set_reg(pm4
, SI_SGPR_START_INSTANCE
* 4 +
452 (sctx
->gs_shader
? R_00B330_SPI_SHADER_USER_DATA_ES_0
:
453 R_00B130_SPI_SHADER_USER_DATA_VS_0
),
454 info
->start_instance
);
456 if (prim
== V_008958_DI_PT_LINELIST
)
458 else if (prim
== V_008958_DI_PT_LINESTRIP
)
460 si_pm4_set_reg(pm4
, R_028A0C_PA_SC_LINE_STIPPLE
,
461 S_028A0C_AUTO_RESET_CNTL(ls_mask
) |
462 sctx
->pa_sc_line_stipple
);
464 if (info
->mode
== PIPE_PRIM_QUADS
|| info
->mode
== PIPE_PRIM_QUAD_STRIP
|| info
->mode
== PIPE_PRIM_POLYGON
) {
465 si_pm4_set_reg(pm4
, R_028814_PA_SU_SC_MODE_CNTL
,
466 S_028814_PROVOKING_VTX_LAST(1) | sctx
->pa_su_sc_mode_cntl
);
468 si_pm4_set_reg(pm4
, R_028814_PA_SU_SC_MODE_CNTL
, sctx
->pa_su_sc_mode_cntl
);
470 si_pm4_set_reg(pm4
, R_02881C_PA_CL_VS_OUT_CNTL
,
471 S_02881C_USE_VTX_POINT_SIZE(vs
->vs_out_point_size
) |
472 S_02881C_USE_VTX_EDGE_FLAG(vs
->vs_out_edgeflag
) |
473 S_02881C_USE_VTX_RENDER_TARGET_INDX(vs
->vs_out_layer
) |
474 S_02881C_VS_OUT_CCDIST0_VEC_ENA((vs
->clip_dist_write
& 0x0F) != 0) |
475 S_02881C_VS_OUT_CCDIST1_VEC_ENA((vs
->clip_dist_write
& 0xF0) != 0) |
476 S_02881C_VS_OUT_MISC_VEC_ENA(vs
->vs_out_misc_write
) |
477 (sctx
->queued
.named
.rasterizer
->clip_plane_enable
&
478 vs
->clip_dist_write
));
479 si_pm4_set_reg(pm4
, R_028810_PA_CL_CLIP_CNTL
,
480 sctx
->queued
.named
.rasterizer
->pa_cl_clip_cntl
|
481 (vs
->clip_dist_write
? 0 :
482 sctx
->queued
.named
.rasterizer
->clip_plane_enable
& 0x3F));
484 si_pm4_set_state(sctx
, draw_info
, pm4
);
488 static void si_update_spi_map(struct si_context
*sctx
)
490 struct si_shader
*ps
= &sctx
->ps_shader
->current
->shader
;
491 struct si_shader
*vs
= &sctx
->vs_shader
->current
->shader
;
492 struct si_pm4_state
*pm4
= si_pm4_alloc_state(sctx
);
495 for (i
= 0; i
< ps
->ninput
; i
++) {
496 unsigned name
= ps
->input
[i
].name
;
497 unsigned param_offset
= ps
->input
[i
].param_offset
;
499 if (name
== TGSI_SEMANTIC_POSITION
)
500 /* Read from preloaded VGPRs, not parameters */
506 if (ps
->input
[i
].interpolate
== TGSI_INTERPOLATE_CONSTANT
||
507 (ps
->input
[i
].interpolate
== TGSI_INTERPOLATE_COLOR
&&
508 sctx
->ps_shader
->current
->key
.ps
.flatshade
)) {
509 tmp
|= S_028644_FLAT_SHADE(1);
512 if (name
== TGSI_SEMANTIC_GENERIC
&&
513 sctx
->sprite_coord_enable
& (1 << ps
->input
[i
].sid
)) {
514 tmp
|= S_028644_PT_SPRITE_TEX(1);
517 for (j
= 0; j
< vs
->noutput
; j
++) {
518 if (name
== vs
->output
[j
].name
&&
519 ps
->input
[i
].sid
== vs
->output
[j
].sid
) {
520 tmp
|= S_028644_OFFSET(vs
->output
[j
].param_offset
);
525 if (j
== vs
->noutput
) {
526 /* No corresponding output found, load defaults into input */
527 tmp
|= S_028644_OFFSET(0x20);
531 R_028644_SPI_PS_INPUT_CNTL_0
+ param_offset
* 4,
534 if (name
== TGSI_SEMANTIC_COLOR
&&
535 sctx
->ps_shader
->current
->key
.ps
.color_two_side
) {
536 name
= TGSI_SEMANTIC_BCOLOR
;
542 si_pm4_set_state(sctx
, spi
, pm4
);
545 /* Initialize state related to ESGS / GSVS ring buffers */
546 static void si_init_gs_rings(struct si_context
*sctx
)
548 unsigned size
= 128 * 1024;
550 assert(!sctx
->gs_rings
);
551 sctx
->gs_rings
= si_pm4_alloc_state(sctx
);
553 sctx
->esgs_ring
.buffer
=
554 pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
555 PIPE_USAGE_STATIC
, size
);
556 sctx
->esgs_ring
.buffer_size
= size
;
558 size
= 64 * 1024 * 1024;
559 sctx
->gsvs_ring
.buffer
=
560 pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
561 PIPE_USAGE_STATIC
, size
);
562 sctx
->gsvs_ring
.buffer_size
= size
;
564 if (sctx
->b
.chip_class
>= CIK
) {
565 si_pm4_set_reg(sctx
->gs_rings
, R_030900_VGT_ESGS_RING_SIZE
,
566 sctx
->esgs_ring
.buffer_size
/ 256);
567 si_pm4_set_reg(sctx
->gs_rings
, R_030904_VGT_GSVS_RING_SIZE
,
568 sctx
->gsvs_ring
.buffer_size
/ 256);
570 si_pm4_set_reg(sctx
->gs_rings
, R_0088C8_VGT_ESGS_RING_SIZE
,
571 sctx
->esgs_ring
.buffer_size
/ 256);
572 si_pm4_set_reg(sctx
->gs_rings
, R_0088CC_VGT_GSVS_RING_SIZE
,
573 sctx
->gsvs_ring
.buffer_size
/ 256);
576 si_set_ring_buffer(&sctx
->b
.b
, SI_SHADER_EXPORT
, 0, &sctx
->esgs_ring
,
577 0, sctx
->esgs_ring
.buffer_size
, true, true, 4, 64);
578 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, 0, &sctx
->esgs_ring
,
579 0, sctx
->esgs_ring
.buffer_size
, false, false, 0, 0);
580 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, 0, &sctx
->gsvs_ring
,
581 0, sctx
->gsvs_ring
.buffer_size
, false, false, 0, 0);
584 static void si_update_derived_state(struct si_context
*sctx
)
586 struct pipe_context
* ctx
= (struct pipe_context
*)sctx
;
587 unsigned vs_dirty
= 0, ps_dirty
= 0;
589 if (!sctx
->blitter
->running
) {
590 /* Flush depth textures which need to be flushed. */
591 for (int i
= 0; i
< SI_NUM_SHADERS
; i
++) {
592 if (sctx
->samplers
[i
].depth_texture_mask
) {
593 si_flush_depth_textures(sctx
, &sctx
->samplers
[i
]);
595 if (sctx
->samplers
[i
].compressed_colortex_mask
) {
596 si_decompress_color_textures(sctx
, &sctx
->samplers
[i
]);
601 if (sctx
->gs_shader
) {
602 unsigned es_dirty
= 0, gs_dirty
= 0;
604 si_shader_select(ctx
, sctx
->gs_shader
, &gs_dirty
);
606 if (!sctx
->gs_shader
->current
->pm4
) {
607 si_pipe_shader_gs(ctx
, sctx
->gs_shader
->current
);
608 si_pipe_shader_vs(ctx
,
609 sctx
->gs_shader
->current
->gs_copy_shader
);
614 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
->current
->pm4
);
615 si_pm4_bind_state(sctx
, vs
,
616 sctx
->gs_shader
->current
->gs_copy_shader
->pm4
);
619 si_shader_select(ctx
, sctx
->vs_shader
, &es_dirty
);
621 if (!sctx
->vs_shader
->current
->pm4
) {
622 si_pipe_shader_es(ctx
, sctx
->vs_shader
->current
);
627 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
->current
->pm4
);
631 si_init_gs_rings(sctx
);
632 if (sctx
->emitted
.named
.gs_rings
!= sctx
->gs_rings
)
633 sctx
->b
.flags
|= R600_CONTEXT_VGT_FLUSH
;
634 si_pm4_bind_state(sctx
, gs_rings
, sctx
->gs_rings
);
636 si_set_ring_buffer(ctx
, PIPE_SHADER_GEOMETRY
, 1, &sctx
->gsvs_ring
,
637 sctx
->gs_shader
->current
->shader
.gs_max_out_vertices
*
638 sctx
->gs_shader
->current
->shader
.noutput
* 16,
639 64, true, true, 4, 16);
642 sctx
->gs_on
= si_pm4_alloc_state(sctx
);
644 si_pm4_set_reg(sctx
->gs_on
, R_028B54_VGT_SHADER_STAGES_EN
,
645 S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
647 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
));
649 si_pm4_bind_state(sctx
, gs_onoff
, sctx
->gs_on
);
651 si_shader_select(ctx
, sctx
->vs_shader
, &vs_dirty
);
653 if (!sctx
->vs_shader
->current
->pm4
) {
654 si_pipe_shader_vs(ctx
, sctx
->vs_shader
->current
);
659 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
->current
->pm4
);
663 sctx
->gs_off
= si_pm4_alloc_state(sctx
);
665 si_pm4_set_reg(sctx
->gs_off
, R_028A40_VGT_GS_MODE
, 0);
666 si_pm4_set_reg(sctx
->gs_off
, R_028B54_VGT_SHADER_STAGES_EN
, 0);
668 si_pm4_bind_state(sctx
, gs_onoff
, sctx
->gs_off
);
669 si_pm4_bind_state(sctx
, gs_rings
, NULL
);
670 si_pm4_bind_state(sctx
, gs
, NULL
);
671 si_pm4_bind_state(sctx
, es
, NULL
);
674 si_shader_select(ctx
, sctx
->ps_shader
, &ps_dirty
);
676 if (!sctx
->ps_shader
->current
->pm4
) {
677 si_pipe_shader_ps(ctx
, sctx
->ps_shader
->current
);
680 if (sctx
->ps_shader
->current
->cb0_is_integer
!= sctx
->fb_cb0_is_integer
) {
681 si_pipe_shader_ps(ctx
, sctx
->ps_shader
->current
);
686 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
->current
->pm4
);
689 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
)) {
690 /* XXX: Emitting the PS state even when only the VS changed
691 * fixes random failures with piglit glsl-max-varyings.
694 sctx
->emitted
.named
.ps
= NULL
;
695 si_update_spi_map(sctx
);
699 static void si_vertex_buffer_update(struct si_context
*sctx
)
701 struct pipe_context
*ctx
= &sctx
->b
.b
;
702 struct si_pm4_state
*pm4
= si_pm4_alloc_state(sctx
);
703 bool bound
[PIPE_MAX_ATTRIBS
] = {};
707 sctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
;
709 count
= sctx
->vertex_elements
->count
;
710 assert(count
<= 256 / 4);
712 si_pm4_sh_data_begin(pm4
);
713 for (i
= 0 ; i
< count
; i
++) {
714 struct pipe_vertex_element
*ve
= &sctx
->vertex_elements
->elements
[i
];
715 struct pipe_vertex_buffer
*vb
;
716 struct r600_resource
*rbuffer
;
719 if (ve
->vertex_buffer_index
>= sctx
->nr_vertex_buffers
)
722 vb
= &sctx
->vertex_buffer
[ve
->vertex_buffer_index
];
723 rbuffer
= (struct r600_resource
*)vb
->buffer
;
728 offset
+= vb
->buffer_offset
;
729 offset
+= ve
->src_offset
;
731 va
= r600_resource_va(ctx
->screen
, (void*)rbuffer
);
734 /* Fill in T# buffer resource description */
735 si_pm4_sh_data_add(pm4
, va
& 0xFFFFFFFF);
736 si_pm4_sh_data_add(pm4
, (S_008F04_BASE_ADDRESS_HI(va
>> 32) |
737 S_008F04_STRIDE(vb
->stride
)));
739 /* Round up by rounding down and adding 1 */
740 si_pm4_sh_data_add(pm4
,
741 (vb
->buffer
->width0
- offset
-
742 util_format_get_blocksize(ve
->src_format
)) /
745 si_pm4_sh_data_add(pm4
, vb
->buffer
->width0
- offset
);
746 si_pm4_sh_data_add(pm4
, sctx
->vertex_elements
->rsrc_word3
[i
]);
748 if (!bound
[ve
->vertex_buffer_index
]) {
749 si_pm4_add_bo(pm4
, rbuffer
, RADEON_USAGE_READ
);
750 bound
[ve
->vertex_buffer_index
] = true;
753 si_pm4_sh_data_end(pm4
, sctx
->gs_shader
?
754 R_00B330_SPI_SHADER_USER_DATA_ES_0
:
755 R_00B130_SPI_SHADER_USER_DATA_VS_0
,
756 SI_SGPR_VERTEX_BUFFER
);
757 si_pm4_set_state(sctx
, vertex_buffers
, pm4
);
760 static void si_state_draw(struct si_context
*sctx
,
761 const struct pipe_draw_info
*info
,
762 const struct pipe_index_buffer
*ib
)
764 struct si_pm4_state
*pm4
= si_pm4_alloc_state(sctx
);
769 /* queries need some special values
770 * (this is non-zero if any query is active) */
771 if (sctx
->b
.num_occlusion_queries
> 0) {
772 if (sctx
->b
.chip_class
>= CIK
) {
773 si_pm4_set_reg(pm4
, R_028004_DB_COUNT_CONTROL
,
774 S_028004_PERFECT_ZPASS_COUNTS(1) |
775 S_028004_SAMPLE_RATE(sctx
->fb_log_samples
) |
776 S_028004_ZPASS_ENABLE(1) |
777 S_028004_SLICE_EVEN_ENABLE(1) |
778 S_028004_SLICE_ODD_ENABLE(1));
780 si_pm4_set_reg(pm4
, R_028004_DB_COUNT_CONTROL
,
781 S_028004_PERFECT_ZPASS_COUNTS(1) |
782 S_028004_SAMPLE_RATE(sctx
->fb_log_samples
));
786 if (info
->count_from_stream_output
) {
787 struct r600_so_target
*t
=
788 (struct r600_so_target
*)info
->count_from_stream_output
;
789 uint64_t va
= r600_resource_va(&sctx
->screen
->b
.b
,
790 &t
->buf_filled_size
->b
.b
);
791 va
+= t
->buf_filled_size_offset
;
793 si_pm4_set_reg(pm4
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
,
796 si_pm4_cmd_begin(pm4
, PKT3_COPY_DATA
);
798 COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
799 COPY_DATA_DST_SEL(COPY_DATA_REG
) |
800 COPY_DATA_WR_CONFIRM
);
801 si_pm4_cmd_add(pm4
, va
); /* src address lo */
802 si_pm4_cmd_add(pm4
, va
>> 32UL); /* src address hi */
803 si_pm4_cmd_add(pm4
, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2);
804 si_pm4_cmd_add(pm4
, 0); /* unused */
805 si_pm4_add_bo(pm4
, t
->buf_filled_size
, RADEON_USAGE_READ
);
806 si_pm4_cmd_end(pm4
, true);
810 si_pm4_cmd_begin(pm4
, PKT3_INDEX_TYPE
);
811 if (ib
->index_size
== 4) {
812 si_pm4_cmd_add(pm4
, V_028A7C_VGT_INDEX_32
| (SI_BIG_ENDIAN
?
813 V_028A7C_VGT_DMA_SWAP_32_BIT
: 0));
815 si_pm4_cmd_add(pm4
, V_028A7C_VGT_INDEX_16
| (SI_BIG_ENDIAN
?
816 V_028A7C_VGT_DMA_SWAP_16_BIT
: 0));
818 si_pm4_cmd_end(pm4
, sctx
->b
.predicate_drawing
);
820 si_pm4_cmd_begin(pm4
, PKT3_NUM_INSTANCES
);
821 si_pm4_cmd_add(pm4
, info
->instance_count
);
822 si_pm4_cmd_end(pm4
, sctx
->b
.predicate_drawing
);
825 uint32_t max_size
= (ib
->buffer
->width0
- ib
->offset
) /
826 sctx
->index_buffer
.index_size
;
828 va
= r600_resource_va(&sctx
->screen
->b
.b
, ib
->buffer
);
831 si_pm4_add_bo(pm4
, (struct r600_resource
*)ib
->buffer
, RADEON_USAGE_READ
);
832 si_cmd_draw_index_2(pm4
, max_size
, va
, info
->count
,
833 V_0287F0_DI_SRC_SEL_DMA
,
834 sctx
->b
.predicate_drawing
);
836 uint32_t initiator
= V_0287F0_DI_SRC_SEL_AUTO_INDEX
;
837 initiator
|= S_0287F0_USE_OPAQUE(!!info
->count_from_stream_output
);
838 si_cmd_draw_index_auto(pm4
, info
->count
, initiator
, sctx
->b
.predicate_drawing
);
841 si_pm4_set_state(sctx
, draw
, pm4
);
844 void si_emit_cache_flush(struct r600_common_context
*sctx
, struct r600_atom
*atom
)
846 struct radeon_winsys_cs
*cs
= sctx
->rings
.gfx
.cs
;
847 uint32_t cp_coher_cntl
= 0;
849 /* XXX SI flushes both ICACHE and KCACHE if either flag is set.
850 * XXX CIK shouldn't have this issue. Test CIK before separating the flags
851 * XXX to ensure there is no regression. Also find out if there is another
852 * XXX way to flush either ICACHE or KCACHE but not both for SI. */
853 if (sctx
->flags
& (R600_CONTEXT_INV_SHADER_CACHE
|
854 R600_CONTEXT_INV_CONST_CACHE
)) {
855 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1) |
856 S_0085F0_SH_KCACHE_ACTION_ENA(1);
858 if (sctx
->flags
& (R600_CONTEXT_INV_TEX_CACHE
|
859 R600_CONTEXT_STREAMOUT_FLUSH
)) {
860 cp_coher_cntl
|= S_0085F0_TC_ACTION_ENA(1) |
861 S_0085F0_TCL1_ACTION_ENA(1);
863 if (sctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_CB
) {
864 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
865 S_0085F0_CB0_DEST_BASE_ENA(1) |
866 S_0085F0_CB1_DEST_BASE_ENA(1) |
867 S_0085F0_CB2_DEST_BASE_ENA(1) |
868 S_0085F0_CB3_DEST_BASE_ENA(1) |
869 S_0085F0_CB4_DEST_BASE_ENA(1) |
870 S_0085F0_CB5_DEST_BASE_ENA(1) |
871 S_0085F0_CB6_DEST_BASE_ENA(1) |
872 S_0085F0_CB7_DEST_BASE_ENA(1);
874 if (sctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_DB
) {
875 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
876 S_0085F0_DB_DEST_BASE_ENA(1);
880 if (sctx
->chip_class
>= CIK
) {
881 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, 0));
882 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
883 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
884 radeon_emit(cs
, 0xff); /* CP_COHER_SIZE_HI */
885 radeon_emit(cs
, 0); /* CP_COHER_BASE */
886 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
887 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
889 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, 0));
890 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
891 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
892 radeon_emit(cs
, 0); /* CP_COHER_BASE */
893 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
897 if (sctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_CB_META
) {
898 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
899 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
901 if (sctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_DB_META
) {
902 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
903 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
906 if (sctx
->flags
& (R600_CONTEXT_WAIT_3D_IDLE
|
907 R600_CONTEXT_PS_PARTIAL_FLUSH
)) {
908 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
909 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
910 } else if (sctx
->flags
& R600_CONTEXT_STREAMOUT_FLUSH
) {
911 /* Needed if streamout buffers are going to be used as a source. */
912 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
913 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
916 if (sctx
->flags
& R600_CONTEXT_VGT_FLUSH
) {
917 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
918 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
924 const struct r600_atom si_atom_cache_flush
= { si_emit_cache_flush
, 13 }; /* number of CS dwords */
926 void si_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
928 struct si_context
*sctx
= (struct si_context
*)ctx
;
929 struct pipe_index_buffer ib
= {};
932 if (!info
->count
&& (info
->indexed
|| !info
->count_from_stream_output
))
935 if (!sctx
->ps_shader
|| !sctx
->vs_shader
)
938 si_update_derived_state(sctx
);
939 si_vertex_buffer_update(sctx
);
942 /* Initialize the index buffer struct. */
943 pipe_resource_reference(&ib
.buffer
, sctx
->index_buffer
.buffer
);
944 ib
.user_buffer
= sctx
->index_buffer
.user_buffer
;
945 ib
.index_size
= sctx
->index_buffer
.index_size
;
946 ib
.offset
= sctx
->index_buffer
.offset
+ info
->start
* ib
.index_size
;
948 /* Translate or upload, if needed. */
949 if (ib
.index_size
== 1) {
950 struct pipe_resource
*out_buffer
= NULL
;
954 u_upload_alloc(sctx
->b
.uploader
, 0, info
->count
* 2,
955 &out_offset
, &out_buffer
, &ptr
);
957 util_shorten_ubyte_elts_to_userptr(
958 &sctx
->b
.b
, &ib
, 0, ib
.offset
, info
->count
, ptr
);
960 pipe_resource_reference(&ib
.buffer
, NULL
);
961 ib
.user_buffer
= NULL
;
962 ib
.buffer
= out_buffer
;
963 ib
.offset
= out_offset
;
967 if (ib
.user_buffer
&& !ib
.buffer
) {
968 u_upload_data(sctx
->b
.uploader
, 0, info
->count
* ib
.index_size
,
969 ib
.user_buffer
, &ib
.offset
, &ib
.buffer
);
973 if (!si_update_draw_info_state(sctx
, info
, &ib
))
976 si_state_draw(sctx
, info
, &ib
);
978 sctx
->pm4_dirty_cdwords
+= si_pm4_dirty_dw(sctx
);
980 /* Check flush flags. */
982 sctx
->atoms
.cache_flush
->dirty
= true;
984 si_need_cs_space(sctx
, 0, TRUE
);
987 for (i
= 0; i
< SI_NUM_ATOMS(sctx
); i
++) {
988 if (sctx
->atoms
.array
[i
]->dirty
) {
989 sctx
->atoms
.array
[i
]->emit(&sctx
->b
, sctx
->atoms
.array
[i
]);
990 sctx
->atoms
.array
[i
]->dirty
= false;
994 si_pm4_emit_dirty(sctx
);
995 sctx
->pm4_dirty_cdwords
= 0;
998 if (sctx
->screen
->b
.trace_bo
) {
1003 /* Set the depth buffer as dirty. */
1004 if (sctx
->framebuffer
.zsbuf
) {
1005 struct pipe_surface
*surf
= sctx
->framebuffer
.zsbuf
;
1006 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
1008 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
1010 if (sctx
->fb_compressed_cb_mask
) {
1011 struct pipe_surface
*surf
;
1012 struct r600_texture
*rtex
;
1013 unsigned mask
= sctx
->fb_compressed_cb_mask
;
1016 unsigned i
= u_bit_scan(&mask
);
1017 surf
= sctx
->framebuffer
.cbufs
[i
];
1018 rtex
= (struct r600_texture
*)surf
->texture
;
1020 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
1024 pipe_resource_reference(&ib
.buffer
, NULL
);
1025 sctx
->b
.num_draw_calls
++;