2 * Copyright 2010 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie <airlied@redhat.com>
25 * Jerome Glisse <jglisse@redhat.com>
27 #include "r600_formats.h"
28 #include "r600_shader.h"
31 #include "util/format/u_format_s3tc.h"
32 #include "util/u_index_modify.h"
33 #include "util/u_memory.h"
34 #include "util/u_upload_mgr.h"
35 #include "util/u_math.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "tgsi/tgsi_ureg.h"
40 void r600_init_command_buffer(struct r600_command_buffer
*cb
, unsigned num_dw
)
43 cb
->buf
= CALLOC(1, 4 * num_dw
);
44 cb
->max_num_dw
= num_dw
;
47 void r600_release_command_buffer(struct r600_command_buffer
*cb
)
52 void r600_add_atom(struct r600_context
*rctx
,
53 struct r600_atom
*atom
,
56 assert(id
< R600_NUM_ATOMS
);
57 assert(rctx
->atoms
[id
] == NULL
);
58 rctx
->atoms
[id
] = atom
;
62 void r600_init_atom(struct r600_context
*rctx
,
63 struct r600_atom
*atom
,
65 void (*emit
)(struct r600_context
*ctx
, struct r600_atom
*state
),
68 atom
->emit
= (void*)emit
;
69 atom
->num_dw
= num_dw
;
70 r600_add_atom(rctx
, atom
, id
);
73 void r600_emit_cso_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
75 r600_emit_command_buffer(rctx
->b
.gfx
.cs
, ((struct r600_cso_state
*)atom
)->cb
);
78 void r600_emit_alphatest_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
80 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
81 struct r600_alphatest_state
*a
= (struct r600_alphatest_state
*)atom
;
82 unsigned alpha_ref
= a
->sx_alpha_ref
;
84 if (rctx
->b
.chip_class
>= EVERGREEN
&& a
->cb0_export_16bpc
) {
88 radeon_set_context_reg(cs
, R_028410_SX_ALPHA_TEST_CONTROL
,
89 a
->sx_alpha_test_control
|
90 S_028410_ALPHA_TEST_BYPASS(a
->bypass
));
91 radeon_set_context_reg(cs
, R_028438_SX_ALPHA_REF
, alpha_ref
);
94 static void r600_memory_barrier(struct pipe_context
*ctx
, unsigned flags
)
96 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
98 if (!(flags
& ~PIPE_BARRIER_UPDATE
))
101 if (flags
& PIPE_BARRIER_CONSTANT_BUFFER
)
102 rctx
->b
.flags
|= R600_CONTEXT_INV_CONST_CACHE
;
104 if (flags
& (PIPE_BARRIER_VERTEX_BUFFER
|
105 PIPE_BARRIER_SHADER_BUFFER
|
106 PIPE_BARRIER_TEXTURE
|
108 PIPE_BARRIER_STREAMOUT_BUFFER
|
109 PIPE_BARRIER_GLOBAL_BUFFER
)) {
110 rctx
->b
.flags
|= R600_CONTEXT_INV_VERTEX_CACHE
|
111 R600_CONTEXT_INV_TEX_CACHE
;
114 if (flags
& (PIPE_BARRIER_FRAMEBUFFER
|
116 rctx
->b
.flags
|= R600_CONTEXT_FLUSH_AND_INV
;
118 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
121 static void r600_texture_barrier(struct pipe_context
*ctx
, unsigned flags
)
123 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
125 rctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
|
126 R600_CONTEXT_FLUSH_AND_INV_CB
|
127 R600_CONTEXT_FLUSH_AND_INV
|
128 R600_CONTEXT_WAIT_3D_IDLE
;
129 rctx
->framebuffer
.do_update_surf_dirtiness
= true;
132 static unsigned r600_conv_pipe_prim(unsigned prim
)
134 static const unsigned prim_conv
[] = {
135 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
136 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
137 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
138 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
139 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
140 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
141 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
142 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
143 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
144 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
145 [PIPE_PRIM_LINES_ADJACENCY
] = V_008958_DI_PT_LINELIST_ADJ
,
146 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_008958_DI_PT_LINESTRIP_ADJ
,
147 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_008958_DI_PT_TRILIST_ADJ
,
148 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_008958_DI_PT_TRISTRIP_ADJ
,
149 [PIPE_PRIM_PATCHES
] = V_008958_DI_PT_PATCH
,
150 [R600_PRIM_RECTANGLE_LIST
] = V_008958_DI_PT_RECTLIST
152 assert(prim
< ARRAY_SIZE(prim_conv
));
153 return prim_conv
[prim
];
156 unsigned r600_conv_prim_to_gs_out(unsigned mode
)
158 static const int prim_conv
[] = {
159 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
160 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
161 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
162 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
163 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
164 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
165 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
166 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
167 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
168 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
169 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
170 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
171 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
172 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
173 [PIPE_PRIM_PATCHES
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
174 [R600_PRIM_RECTANGLE_LIST
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
176 assert(mode
< ARRAY_SIZE(prim_conv
));
178 return prim_conv
[mode
];
181 /* common state between evergreen and r600 */
183 static void r600_bind_blend_state_internal(struct r600_context
*rctx
,
184 struct r600_blend_state
*blend
, bool blend_disable
)
186 unsigned color_control
;
187 bool update_cb
= false;
189 rctx
->alpha_to_one
= blend
->alpha_to_one
;
190 rctx
->dual_src_blend
= blend
->dual_src_blend
;
192 if (!blend_disable
) {
193 r600_set_cso_state_with_cb(rctx
, &rctx
->blend_state
, blend
, &blend
->buffer
);
194 color_control
= blend
->cb_color_control
;
196 /* Blending is disabled. */
197 r600_set_cso_state_with_cb(rctx
, &rctx
->blend_state
, blend
, &blend
->buffer_no_blend
);
198 color_control
= blend
->cb_color_control_no_blend
;
201 /* Update derived states. */
202 if (rctx
->cb_misc_state
.blend_colormask
!= blend
->cb_target_mask
) {
203 rctx
->cb_misc_state
.blend_colormask
= blend
->cb_target_mask
;
206 if (rctx
->b
.chip_class
<= R700
&&
207 rctx
->cb_misc_state
.cb_color_control
!= color_control
) {
208 rctx
->cb_misc_state
.cb_color_control
= color_control
;
211 if (rctx
->cb_misc_state
.dual_src_blend
!= blend
->dual_src_blend
) {
212 rctx
->cb_misc_state
.dual_src_blend
= blend
->dual_src_blend
;
216 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
218 if (rctx
->framebuffer
.dual_src_blend
!= blend
->dual_src_blend
) {
219 rctx
->framebuffer
.dual_src_blend
= blend
->dual_src_blend
;
220 r600_mark_atom_dirty(rctx
, &rctx
->framebuffer
.atom
);
224 static void r600_bind_blend_state(struct pipe_context
*ctx
, void *state
)
226 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
227 struct r600_blend_state
*blend
= (struct r600_blend_state
*)state
;
230 r600_set_cso_state_with_cb(rctx
, &rctx
->blend_state
, NULL
, NULL
);
234 r600_bind_blend_state_internal(rctx
, blend
, rctx
->force_blend_disable
);
237 static void r600_set_blend_color(struct pipe_context
*ctx
,
238 const struct pipe_blend_color
*state
)
240 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
242 rctx
->blend_color
.state
= *state
;
243 r600_mark_atom_dirty(rctx
, &rctx
->blend_color
.atom
);
246 void r600_emit_blend_color(struct r600_context
*rctx
, struct r600_atom
*atom
)
248 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
249 struct pipe_blend_color
*state
= &rctx
->blend_color
.state
;
251 radeon_set_context_reg_seq(cs
, R_028414_CB_BLEND_RED
, 4);
252 radeon_emit(cs
, fui(state
->color
[0])); /* R_028414_CB_BLEND_RED */
253 radeon_emit(cs
, fui(state
->color
[1])); /* R_028418_CB_BLEND_GREEN */
254 radeon_emit(cs
, fui(state
->color
[2])); /* R_02841C_CB_BLEND_BLUE */
255 radeon_emit(cs
, fui(state
->color
[3])); /* R_028420_CB_BLEND_ALPHA */
258 void r600_emit_vgt_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
260 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
261 struct r600_vgt_state
*a
= (struct r600_vgt_state
*)atom
;
263 radeon_set_context_reg(cs
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, a
->vgt_multi_prim_ib_reset_en
);
264 radeon_set_context_reg_seq(cs
, R_028408_VGT_INDX_OFFSET
, 2);
265 radeon_emit(cs
, a
->vgt_indx_offset
); /* R_028408_VGT_INDX_OFFSET */
266 radeon_emit(cs
, a
->vgt_multi_prim_ib_reset_indx
); /* R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX */
267 if (a
->last_draw_was_indirect
) {
268 a
->last_draw_was_indirect
= false;
269 radeon_set_ctl_const(cs
, R_03CFF0_SQ_VTX_BASE_VTX_LOC
, 0);
273 static void r600_set_clip_state(struct pipe_context
*ctx
,
274 const struct pipe_clip_state
*state
)
276 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
278 rctx
->clip_state
.state
= *state
;
279 r600_mark_atom_dirty(rctx
, &rctx
->clip_state
.atom
);
280 rctx
->driver_consts
[PIPE_SHADER_VERTEX
].vs_ucp_dirty
= true;
283 static void r600_set_stencil_ref(struct pipe_context
*ctx
,
284 const struct r600_stencil_ref
*state
)
286 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
288 rctx
->stencil_ref
.state
= *state
;
289 r600_mark_atom_dirty(rctx
, &rctx
->stencil_ref
.atom
);
292 void r600_emit_stencil_ref(struct r600_context
*rctx
, struct r600_atom
*atom
)
294 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
295 struct r600_stencil_ref_state
*a
= (struct r600_stencil_ref_state
*)atom
;
297 radeon_set_context_reg_seq(cs
, R_028430_DB_STENCILREFMASK
, 2);
298 radeon_emit(cs
, /* R_028430_DB_STENCILREFMASK */
299 S_028430_STENCILREF(a
->state
.ref_value
[0]) |
300 S_028430_STENCILMASK(a
->state
.valuemask
[0]) |
301 S_028430_STENCILWRITEMASK(a
->state
.writemask
[0]));
302 radeon_emit(cs
, /* R_028434_DB_STENCILREFMASK_BF */
303 S_028434_STENCILREF_BF(a
->state
.ref_value
[1]) |
304 S_028434_STENCILMASK_BF(a
->state
.valuemask
[1]) |
305 S_028434_STENCILWRITEMASK_BF(a
->state
.writemask
[1]));
308 static void r600_set_pipe_stencil_ref(struct pipe_context
*ctx
,
309 const struct pipe_stencil_ref
*state
)
311 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
312 struct r600_dsa_state
*dsa
= (struct r600_dsa_state
*)rctx
->dsa_state
.cso
;
313 struct r600_stencil_ref ref
;
315 rctx
->stencil_ref
.pipe_state
= *state
;
320 ref
.ref_value
[0] = state
->ref_value
[0];
321 ref
.ref_value
[1] = state
->ref_value
[1];
322 ref
.valuemask
[0] = dsa
->valuemask
[0];
323 ref
.valuemask
[1] = dsa
->valuemask
[1];
324 ref
.writemask
[0] = dsa
->writemask
[0];
325 ref
.writemask
[1] = dsa
->writemask
[1];
327 r600_set_stencil_ref(ctx
, &ref
);
330 static void r600_bind_dsa_state(struct pipe_context
*ctx
, void *state
)
332 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
333 struct r600_dsa_state
*dsa
= state
;
334 struct r600_stencil_ref ref
;
337 r600_set_cso_state_with_cb(rctx
, &rctx
->dsa_state
, NULL
, NULL
);
341 r600_set_cso_state_with_cb(rctx
, &rctx
->dsa_state
, dsa
, &dsa
->buffer
);
343 ref
.ref_value
[0] = rctx
->stencil_ref
.pipe_state
.ref_value
[0];
344 ref
.ref_value
[1] = rctx
->stencil_ref
.pipe_state
.ref_value
[1];
345 ref
.valuemask
[0] = dsa
->valuemask
[0];
346 ref
.valuemask
[1] = dsa
->valuemask
[1];
347 ref
.writemask
[0] = dsa
->writemask
[0];
348 ref
.writemask
[1] = dsa
->writemask
[1];
349 if (rctx
->zwritemask
!= dsa
->zwritemask
) {
350 rctx
->zwritemask
= dsa
->zwritemask
;
351 if (rctx
->b
.chip_class
>= EVERGREEN
) {
352 /* work around some issue when not writing to zbuffer
353 * we are having lockup on evergreen so do not enable
354 * hyperz when not writing zbuffer
356 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
360 r600_set_stencil_ref(ctx
, &ref
);
362 /* Update alphatest state. */
363 if (rctx
->alphatest_state
.sx_alpha_test_control
!= dsa
->sx_alpha_test_control
||
364 rctx
->alphatest_state
.sx_alpha_ref
!= dsa
->alpha_ref
) {
365 rctx
->alphatest_state
.sx_alpha_test_control
= dsa
->sx_alpha_test_control
;
366 rctx
->alphatest_state
.sx_alpha_ref
= dsa
->alpha_ref
;
367 r600_mark_atom_dirty(rctx
, &rctx
->alphatest_state
.atom
);
371 static void r600_bind_rs_state(struct pipe_context
*ctx
, void *state
)
373 struct r600_rasterizer_state
*rs
= (struct r600_rasterizer_state
*)state
;
374 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
379 rctx
->rasterizer
= rs
;
381 r600_set_cso_state_with_cb(rctx
, &rctx
->rasterizer_state
, rs
, &rs
->buffer
);
383 if (rs
->offset_enable
&&
384 (rs
->offset_units
!= rctx
->poly_offset_state
.offset_units
||
385 rs
->offset_scale
!= rctx
->poly_offset_state
.offset_scale
||
386 rs
->offset_units_unscaled
!= rctx
->poly_offset_state
.offset_units_unscaled
)) {
387 rctx
->poly_offset_state
.offset_units
= rs
->offset_units
;
388 rctx
->poly_offset_state
.offset_scale
= rs
->offset_scale
;
389 rctx
->poly_offset_state
.offset_units_unscaled
= rs
->offset_units_unscaled
;
390 r600_mark_atom_dirty(rctx
, &rctx
->poly_offset_state
.atom
);
393 /* Update clip_misc_state. */
394 if (rctx
->clip_misc_state
.pa_cl_clip_cntl
!= rs
->pa_cl_clip_cntl
||
395 rctx
->clip_misc_state
.clip_plane_enable
!= rs
->clip_plane_enable
) {
396 rctx
->clip_misc_state
.pa_cl_clip_cntl
= rs
->pa_cl_clip_cntl
;
397 rctx
->clip_misc_state
.clip_plane_enable
= rs
->clip_plane_enable
;
398 r600_mark_atom_dirty(rctx
, &rctx
->clip_misc_state
.atom
);
401 r600_viewport_set_rast_deps(&rctx
->b
, rs
->scissor_enable
, rs
->clip_halfz
);
403 /* Re-emit PA_SC_LINE_STIPPLE. */
404 rctx
->last_primitive_type
= -1;
407 static void r600_delete_rs_state(struct pipe_context
*ctx
, void *state
)
409 struct r600_rasterizer_state
*rs
= (struct r600_rasterizer_state
*)state
;
411 r600_release_command_buffer(&rs
->buffer
);
415 static void r600_sampler_view_destroy(struct pipe_context
*ctx
,
416 struct pipe_sampler_view
*state
)
418 struct r600_pipe_sampler_view
*view
= (struct r600_pipe_sampler_view
*)state
;
420 if (view
->tex_resource
->gpu_address
&&
421 view
->tex_resource
->b
.b
.target
== PIPE_BUFFER
)
422 list_delinit(&view
->list
);
424 pipe_resource_reference(&state
->texture
, NULL
);
428 void r600_sampler_states_dirty(struct r600_context
*rctx
,
429 struct r600_sampler_states
*state
)
431 if (state
->dirty_mask
) {
432 if (state
->dirty_mask
& state
->has_bordercolor_mask
) {
433 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
436 util_bitcount(state
->dirty_mask
& state
->has_bordercolor_mask
) * 11 +
437 util_bitcount(state
->dirty_mask
& ~state
->has_bordercolor_mask
) * 5;
438 r600_mark_atom_dirty(rctx
, &state
->atom
);
442 static void r600_bind_sampler_states(struct pipe_context
*pipe
,
443 enum pipe_shader_type shader
,
445 unsigned count
, void **states
)
447 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
448 struct r600_textures_info
*dst
= &rctx
->samplers
[shader
];
449 struct r600_pipe_sampler_state
**rstates
= (struct r600_pipe_sampler_state
**)states
;
450 int seamless_cube_map
= -1;
452 /* This sets 1-bit for states with index >= count. */
453 uint32_t disable_mask
= ~((1ull << count
) - 1);
454 /* These are the new states set by this function. */
455 uint32_t new_mask
= 0;
457 assert(start
== 0); /* XXX fix below */
464 for (i
= 0; i
< count
; i
++) {
465 struct r600_pipe_sampler_state
*rstate
= rstates
[i
];
467 if (rstate
== dst
->states
.states
[i
]) {
472 if (rstate
->border_color_use
) {
473 dst
->states
.has_bordercolor_mask
|= 1 << i
;
475 dst
->states
.has_bordercolor_mask
&= ~(1 << i
);
477 seamless_cube_map
= rstate
->seamless_cube_map
;
481 disable_mask
|= 1 << i
;
485 memcpy(dst
->states
.states
, rstates
, sizeof(void*) * count
);
486 memset(dst
->states
.states
+ count
, 0, sizeof(void*) * (NUM_TEX_UNITS
- count
));
488 dst
->states
.enabled_mask
&= ~disable_mask
;
489 dst
->states
.dirty_mask
&= dst
->states
.enabled_mask
;
490 dst
->states
.enabled_mask
|= new_mask
;
491 dst
->states
.dirty_mask
|= new_mask
;
492 dst
->states
.has_bordercolor_mask
&= dst
->states
.enabled_mask
;
494 r600_sampler_states_dirty(rctx
, &dst
->states
);
496 /* Seamless cubemap state. */
497 if (rctx
->b
.chip_class
<= R700
&&
498 seamless_cube_map
!= -1 &&
499 seamless_cube_map
!= rctx
->seamless_cube_map
.enabled
) {
500 /* change in TA_CNTL_AUX need a pipeline flush */
501 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
502 rctx
->seamless_cube_map
.enabled
= seamless_cube_map
;
503 r600_mark_atom_dirty(rctx
, &rctx
->seamless_cube_map
.atom
);
507 static void r600_delete_sampler_state(struct pipe_context
*ctx
, void *state
)
512 static void r600_delete_blend_state(struct pipe_context
*ctx
, void *state
)
514 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
515 struct r600_blend_state
*blend
= (struct r600_blend_state
*)state
;
517 if (rctx
->blend_state
.cso
== state
) {
518 ctx
->bind_blend_state(ctx
, NULL
);
521 r600_release_command_buffer(&blend
->buffer
);
522 r600_release_command_buffer(&blend
->buffer_no_blend
);
526 static void r600_delete_dsa_state(struct pipe_context
*ctx
, void *state
)
528 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
529 struct r600_dsa_state
*dsa
= (struct r600_dsa_state
*)state
;
531 if (rctx
->dsa_state
.cso
== state
) {
532 ctx
->bind_depth_stencil_alpha_state(ctx
, NULL
);
535 r600_release_command_buffer(&dsa
->buffer
);
539 static void r600_bind_vertex_elements(struct pipe_context
*ctx
, void *state
)
541 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
543 r600_set_cso_state(rctx
, &rctx
->vertex_fetch_shader
, state
);
546 static void r600_delete_vertex_elements(struct pipe_context
*ctx
, void *state
)
548 struct r600_fetch_shader
*shader
= (struct r600_fetch_shader
*)state
;
549 r600_resource_reference(&shader
->buffer
, NULL
);
553 void r600_vertex_buffers_dirty(struct r600_context
*rctx
)
555 if (rctx
->vertex_buffer_state
.dirty_mask
) {
556 rctx
->vertex_buffer_state
.atom
.num_dw
= (rctx
->b
.chip_class
>= EVERGREEN
? 12 : 11) *
557 util_bitcount(rctx
->vertex_buffer_state
.dirty_mask
);
558 r600_mark_atom_dirty(rctx
, &rctx
->vertex_buffer_state
.atom
);
562 static void r600_set_vertex_buffers(struct pipe_context
*ctx
,
563 unsigned start_slot
, unsigned count
,
564 const struct pipe_vertex_buffer
*input
)
566 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
567 struct r600_vertexbuf_state
*state
= &rctx
->vertex_buffer_state
;
568 struct pipe_vertex_buffer
*vb
= state
->vb
+ start_slot
;
570 uint32_t disable_mask
= 0;
571 /* These are the new buffers set by this function. */
572 uint32_t new_buffer_mask
= 0;
574 /* Set vertex buffers. */
576 for (i
= 0; i
< count
; i
++) {
577 if ((input
[i
].buffer
.resource
!= vb
[i
].buffer
.resource
) ||
578 (vb
[i
].stride
!= input
[i
].stride
) ||
579 (vb
[i
].buffer_offset
!= input
[i
].buffer_offset
) ||
580 (vb
[i
].is_user_buffer
!= input
[i
].is_user_buffer
)) {
581 if (input
[i
].buffer
.resource
) {
582 vb
[i
].stride
= input
[i
].stride
;
583 vb
[i
].buffer_offset
= input
[i
].buffer_offset
;
584 pipe_resource_reference(&vb
[i
].buffer
.resource
, input
[i
].buffer
.resource
);
585 new_buffer_mask
|= 1 << i
;
586 r600_context_add_resource_size(ctx
, input
[i
].buffer
.resource
);
588 pipe_resource_reference(&vb
[i
].buffer
.resource
, NULL
);
589 disable_mask
|= 1 << i
;
594 for (i
= 0; i
< count
; i
++) {
595 pipe_resource_reference(&vb
[i
].buffer
.resource
, NULL
);
597 disable_mask
= ((1ull << count
) - 1);
600 disable_mask
<<= start_slot
;
601 new_buffer_mask
<<= start_slot
;
603 rctx
->vertex_buffer_state
.enabled_mask
&= ~disable_mask
;
604 rctx
->vertex_buffer_state
.dirty_mask
&= rctx
->vertex_buffer_state
.enabled_mask
;
605 rctx
->vertex_buffer_state
.enabled_mask
|= new_buffer_mask
;
606 rctx
->vertex_buffer_state
.dirty_mask
|= new_buffer_mask
;
608 r600_vertex_buffers_dirty(rctx
);
611 void r600_sampler_views_dirty(struct r600_context
*rctx
,
612 struct r600_samplerview_state
*state
)
614 if (state
->dirty_mask
) {
615 state
->atom
.num_dw
= (rctx
->b
.chip_class
>= EVERGREEN
? 14 : 13) *
616 util_bitcount(state
->dirty_mask
);
617 r600_mark_atom_dirty(rctx
, &state
->atom
);
621 static void r600_set_sampler_views(struct pipe_context
*pipe
,
622 enum pipe_shader_type shader
,
623 unsigned start
, unsigned count
,
624 struct pipe_sampler_view
**views
)
626 struct r600_context
*rctx
= (struct r600_context
*) pipe
;
627 struct r600_textures_info
*dst
= &rctx
->samplers
[shader
];
628 struct r600_pipe_sampler_view
**rviews
= (struct r600_pipe_sampler_view
**)views
;
629 uint32_t dirty_sampler_states_mask
= 0;
631 /* This sets 1-bit for textures with index >= count. */
632 uint32_t disable_mask
= ~((1ull << count
) - 1);
633 /* These are the new textures set by this function. */
634 uint32_t new_mask
= 0;
636 /* Set textures with index >= count to NULL. */
637 uint32_t remaining_mask
;
639 assert(start
== 0); /* XXX fix below */
646 remaining_mask
= dst
->views
.enabled_mask
& disable_mask
;
648 while (remaining_mask
) {
649 i
= u_bit_scan(&remaining_mask
);
650 assert(dst
->views
.views
[i
]);
652 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], NULL
);
655 for (i
= 0; i
< count
; i
++) {
656 if (rviews
[i
] == dst
->views
.views
[i
]) {
661 struct r600_texture
*rtex
=
662 (struct r600_texture
*)rviews
[i
]->base
.texture
;
663 bool is_buffer
= rviews
[i
]->base
.texture
->target
== PIPE_BUFFER
;
665 if (!is_buffer
&& rtex
->db_compatible
) {
666 dst
->views
.compressed_depthtex_mask
|= 1 << i
;
668 dst
->views
.compressed_depthtex_mask
&= ~(1 << i
);
671 /* Track compressed colorbuffers. */
672 if (!is_buffer
&& rtex
->cmask
.size
) {
673 dst
->views
.compressed_colortex_mask
|= 1 << i
;
675 dst
->views
.compressed_colortex_mask
&= ~(1 << i
);
678 /* Changing from array to non-arrays textures and vice versa requires
679 * updating TEX_ARRAY_OVERRIDE in sampler states on R6xx-R7xx. */
680 if (rctx
->b
.chip_class
<= R700
&&
681 (dst
->states
.enabled_mask
& (1 << i
)) &&
682 (rviews
[i
]->base
.texture
->target
== PIPE_TEXTURE_1D_ARRAY
||
683 rviews
[i
]->base
.texture
->target
== PIPE_TEXTURE_2D_ARRAY
) != dst
->is_array_sampler
[i
]) {
684 dirty_sampler_states_mask
|= 1 << i
;
687 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], views
[i
]);
689 r600_context_add_resource_size(pipe
, views
[i
]->texture
);
691 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], NULL
);
692 disable_mask
|= 1 << i
;
696 dst
->views
.enabled_mask
&= ~disable_mask
;
697 dst
->views
.dirty_mask
&= dst
->views
.enabled_mask
;
698 dst
->views
.enabled_mask
|= new_mask
;
699 dst
->views
.dirty_mask
|= new_mask
;
700 dst
->views
.compressed_depthtex_mask
&= dst
->views
.enabled_mask
;
701 dst
->views
.compressed_colortex_mask
&= dst
->views
.enabled_mask
;
702 dst
->views
.dirty_buffer_constants
= TRUE
;
703 r600_sampler_views_dirty(rctx
, &dst
->views
);
705 if (dirty_sampler_states_mask
) {
706 dst
->states
.dirty_mask
|= dirty_sampler_states_mask
;
707 r600_sampler_states_dirty(rctx
, &dst
->states
);
711 static void r600_update_compressed_colortex_mask(struct r600_samplerview_state
*views
)
713 uint32_t mask
= views
->enabled_mask
;
716 unsigned i
= u_bit_scan(&mask
);
717 struct pipe_resource
*res
= views
->views
[i
]->base
.texture
;
719 if (res
&& res
->target
!= PIPE_BUFFER
) {
720 struct r600_texture
*rtex
= (struct r600_texture
*)res
;
722 if (rtex
->cmask
.size
) {
723 views
->compressed_colortex_mask
|= 1 << i
;
725 views
->compressed_colortex_mask
&= ~(1 << i
);
731 static int r600_get_hw_atomic_count(const struct pipe_context
*ctx
,
732 enum pipe_shader_type shader
)
734 const struct r600_context
*rctx
= (struct r600_context
*)ctx
;
737 case PIPE_SHADER_FRAGMENT
:
738 case PIPE_SHADER_COMPUTE
:
741 case PIPE_SHADER_VERTEX
:
742 value
= rctx
->ps_shader
->info
.file_count
[TGSI_FILE_HW_ATOMIC
];
744 case PIPE_SHADER_GEOMETRY
:
745 value
= rctx
->ps_shader
->info
.file_count
[TGSI_FILE_HW_ATOMIC
] +
746 rctx
->vs_shader
->info
.file_count
[TGSI_FILE_HW_ATOMIC
];
748 case PIPE_SHADER_TESS_EVAL
:
749 value
= rctx
->ps_shader
->info
.file_count
[TGSI_FILE_HW_ATOMIC
] +
750 rctx
->vs_shader
->info
.file_count
[TGSI_FILE_HW_ATOMIC
] +
751 (rctx
->gs_shader
? rctx
->gs_shader
->info
.file_count
[TGSI_FILE_HW_ATOMIC
] : 0);
753 case PIPE_SHADER_TESS_CTRL
:
754 value
= rctx
->ps_shader
->info
.file_count
[TGSI_FILE_HW_ATOMIC
] +
755 rctx
->vs_shader
->info
.file_count
[TGSI_FILE_HW_ATOMIC
] +
756 (rctx
->gs_shader
? rctx
->gs_shader
->info
.file_count
[TGSI_FILE_HW_ATOMIC
] : 0) +
757 rctx
->tes_shader
->info
.file_count
[TGSI_FILE_HW_ATOMIC
];
763 static void r600_update_compressed_colortex_mask_images(struct r600_image_state
*images
)
765 uint32_t mask
= images
->enabled_mask
;
768 unsigned i
= u_bit_scan(&mask
);
769 struct pipe_resource
*res
= images
->views
[i
].base
.resource
;
771 if (res
&& res
->target
!= PIPE_BUFFER
) {
772 struct r600_texture
*rtex
= (struct r600_texture
*)res
;
774 if (rtex
->cmask
.size
) {
775 images
->compressed_colortex_mask
|= 1 << i
;
777 images
->compressed_colortex_mask
&= ~(1 << i
);
783 /* Compute the key for the hw shader variant */
784 static inline void r600_shader_selector_key(const struct pipe_context
*ctx
,
785 const struct r600_pipe_shader_selector
*sel
,
786 union r600_shader_key
*key
)
788 const struct r600_context
*rctx
= (struct r600_context
*)ctx
;
789 memset(key
, 0, sizeof(*key
));
792 case PIPE_SHADER_VERTEX
: {
793 key
->vs
.as_ls
= (rctx
->tes_shader
!= NULL
);
795 key
->vs
.as_es
= (rctx
->gs_shader
!= NULL
);
797 if (rctx
->ps_shader
->current
->shader
.gs_prim_id_input
&& !rctx
->gs_shader
) {
798 key
->vs
.as_gs_a
= true;
799 key
->vs
.prim_id_out
= rctx
->ps_shader
->current
->shader
.input
[rctx
->ps_shader
->current
->shader
.ps_prim_id_input
].spi_sid
;
801 key
->vs
.first_atomic_counter
= r600_get_hw_atomic_count(ctx
, PIPE_SHADER_VERTEX
);
804 case PIPE_SHADER_GEOMETRY
:
805 key
->gs
.first_atomic_counter
= r600_get_hw_atomic_count(ctx
, PIPE_SHADER_GEOMETRY
);
806 key
->gs
.tri_strip_adj_fix
= rctx
->gs_tri_strip_adj_fix
;
808 case PIPE_SHADER_FRAGMENT
: {
809 if (rctx
->ps_shader
->info
.images_declared
)
810 key
->ps
.image_size_const_offset
= util_last_bit(rctx
->samplers
[PIPE_SHADER_FRAGMENT
].views
.enabled_mask
);
811 key
->ps
.first_atomic_counter
= r600_get_hw_atomic_count(ctx
, PIPE_SHADER_FRAGMENT
);
812 key
->ps
.color_two_side
= rctx
->rasterizer
&& rctx
->rasterizer
->two_side
;
813 key
->ps
.alpha_to_one
= rctx
->alpha_to_one
&&
814 rctx
->rasterizer
&& rctx
->rasterizer
->multisample_enable
&&
815 !rctx
->framebuffer
.cb0_is_integer
;
816 key
->ps
.nr_cbufs
= rctx
->framebuffer
.state
.nr_cbufs
;
817 /* Dual-source blending only makes sense with nr_cbufs == 1. */
818 if (key
->ps
.nr_cbufs
== 1 && rctx
->dual_src_blend
)
819 key
->ps
.nr_cbufs
= 2;
822 case PIPE_SHADER_TESS_EVAL
:
823 key
->tes
.as_es
= (rctx
->gs_shader
!= NULL
);
824 key
->tes
.first_atomic_counter
= r600_get_hw_atomic_count(ctx
, PIPE_SHADER_TESS_EVAL
);
826 case PIPE_SHADER_TESS_CTRL
:
827 key
->tcs
.prim_mode
= rctx
->tes_shader
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
828 key
->tcs
.first_atomic_counter
= r600_get_hw_atomic_count(ctx
, PIPE_SHADER_TESS_CTRL
);
830 case PIPE_SHADER_COMPUTE
:
837 /* Select the hw shader variant depending on the current state.
838 * (*dirty) is set to 1 if current variant was changed */
839 int r600_shader_select(struct pipe_context
*ctx
,
840 struct r600_pipe_shader_selector
* sel
,
843 union r600_shader_key key
;
844 struct r600_pipe_shader
* shader
= NULL
;
847 r600_shader_selector_key(ctx
, sel
, &key
);
849 /* Check if we don't need to change anything.
850 * This path is also used for most shaders that don't need multiple
851 * variants, it will cost just a computation of the key and this
853 if (likely(sel
->current
&& memcmp(&sel
->current
->key
, &key
, sizeof(key
)) == 0)) {
857 /* lookup if we have other variants in the list */
858 if (sel
->num_shaders
> 1) {
859 struct r600_pipe_shader
*p
= sel
->current
, *c
= p
->next_variant
;
861 while (c
&& memcmp(&c
->key
, &key
, sizeof(key
)) != 0) {
867 p
->next_variant
= c
->next_variant
;
872 if (unlikely(!shader
)) {
873 shader
= CALLOC(1, sizeof(struct r600_pipe_shader
));
874 shader
->selector
= sel
;
876 r
= r600_pipe_shader_create(ctx
, shader
, key
);
878 R600_ERR("Failed to build shader variant (type=%u) %d\n",
885 /* We don't know the value of nr_ps_max_color_exports until we built
886 * at least one variant, so we may need to recompute the key after
887 * building first variant. */
888 if (sel
->type
== PIPE_SHADER_FRAGMENT
&&
889 sel
->num_shaders
== 0) {
890 sel
->nr_ps_max_color_exports
= shader
->shader
.nr_ps_max_color_exports
;
891 r600_shader_selector_key(ctx
, sel
, &key
);
894 memcpy(&shader
->key
, &key
, sizeof(key
));
901 shader
->next_variant
= sel
->current
;
902 sel
->current
= shader
;
907 struct r600_pipe_shader_selector
*r600_create_shader_state_tokens(struct pipe_context
*ctx
,
908 const struct tgsi_token
*tokens
,
909 unsigned pipe_shader_type
)
911 struct r600_pipe_shader_selector
*sel
= CALLOC_STRUCT(r600_pipe_shader_selector
);
913 sel
->type
= pipe_shader_type
;
914 sel
->tokens
= tgsi_dup_tokens(tokens
);
915 tgsi_scan_shader(tokens
, &sel
->info
);
919 static void *r600_create_shader_state(struct pipe_context
*ctx
,
920 const struct pipe_shader_state
*state
,
921 unsigned pipe_shader_type
)
924 struct r600_pipe_shader_selector
*sel
= r600_create_shader_state_tokens(ctx
, state
->tokens
, pipe_shader_type
);
926 sel
->so
= state
->stream_output
;
928 switch (pipe_shader_type
) {
929 case PIPE_SHADER_GEOMETRY
:
930 sel
->gs_output_prim
=
931 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
932 sel
->gs_max_out_vertices
=
933 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
934 sel
->gs_num_invocations
=
935 sel
->info
.properties
[TGSI_PROPERTY_GS_INVOCATIONS
];
937 case PIPE_SHADER_VERTEX
:
938 case PIPE_SHADER_TESS_CTRL
:
939 sel
->lds_patch_outputs_written_mask
= 0;
940 sel
->lds_outputs_written_mask
= 0;
942 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
943 unsigned name
= sel
->info
.output_semantic_name
[i
];
944 unsigned index
= sel
->info
.output_semantic_index
[i
];
947 case TGSI_SEMANTIC_TESSINNER
:
948 case TGSI_SEMANTIC_TESSOUTER
:
949 case TGSI_SEMANTIC_PATCH
:
950 sel
->lds_patch_outputs_written_mask
|=
951 1ull << r600_get_lds_unique_index(name
, index
);
954 sel
->lds_outputs_written_mask
|=
955 1ull << r600_get_lds_unique_index(name
, index
);
966 static void *r600_create_ps_state(struct pipe_context
*ctx
,
967 const struct pipe_shader_state
*state
)
969 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_FRAGMENT
);
972 static void *r600_create_vs_state(struct pipe_context
*ctx
,
973 const struct pipe_shader_state
*state
)
975 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_VERTEX
);
978 static void *r600_create_gs_state(struct pipe_context
*ctx
,
979 const struct pipe_shader_state
*state
)
981 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_GEOMETRY
);
984 static void *r600_create_tcs_state(struct pipe_context
*ctx
,
985 const struct pipe_shader_state
*state
)
987 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_TESS_CTRL
);
990 static void *r600_create_tes_state(struct pipe_context
*ctx
,
991 const struct pipe_shader_state
*state
)
993 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_TESS_EVAL
);
996 static void r600_bind_ps_state(struct pipe_context
*ctx
, void *state
)
998 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1001 state
= rctx
->dummy_pixel_shader
;
1003 rctx
->ps_shader
= (struct r600_pipe_shader_selector
*)state
;
1006 static struct tgsi_shader_info
*r600_get_vs_info(struct r600_context
*rctx
)
1008 if (rctx
->gs_shader
)
1009 return &rctx
->gs_shader
->info
;
1010 else if (rctx
->tes_shader
)
1011 return &rctx
->tes_shader
->info
;
1012 else if (rctx
->vs_shader
)
1013 return &rctx
->vs_shader
->info
;
1018 static void r600_bind_vs_state(struct pipe_context
*ctx
, void *state
)
1020 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1022 if (!state
|| rctx
->vs_shader
== state
)
1025 rctx
->vs_shader
= (struct r600_pipe_shader_selector
*)state
;
1026 r600_update_vs_writes_viewport_index(&rctx
->b
, r600_get_vs_info(rctx
));
1028 if (rctx
->vs_shader
->so
.num_outputs
)
1029 rctx
->b
.streamout
.stride_in_dw
= rctx
->vs_shader
->so
.stride
;
1032 static void r600_bind_gs_state(struct pipe_context
*ctx
, void *state
)
1034 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1036 if (state
== rctx
->gs_shader
)
1039 rctx
->gs_shader
= (struct r600_pipe_shader_selector
*)state
;
1040 r600_update_vs_writes_viewport_index(&rctx
->b
, r600_get_vs_info(rctx
));
1045 if (rctx
->gs_shader
->so
.num_outputs
)
1046 rctx
->b
.streamout
.stride_in_dw
= rctx
->gs_shader
->so
.stride
;
1049 static void r600_bind_tcs_state(struct pipe_context
*ctx
, void *state
)
1051 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1053 rctx
->tcs_shader
= (struct r600_pipe_shader_selector
*)state
;
1056 static void r600_bind_tes_state(struct pipe_context
*ctx
, void *state
)
1058 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1060 if (state
== rctx
->tes_shader
)
1063 rctx
->tes_shader
= (struct r600_pipe_shader_selector
*)state
;
1064 r600_update_vs_writes_viewport_index(&rctx
->b
, r600_get_vs_info(rctx
));
1069 if (rctx
->tes_shader
->so
.num_outputs
)
1070 rctx
->b
.streamout
.stride_in_dw
= rctx
->tes_shader
->so
.stride
;
1073 void r600_delete_shader_selector(struct pipe_context
*ctx
,
1074 struct r600_pipe_shader_selector
*sel
)
1076 struct r600_pipe_shader
*p
= sel
->current
, *c
;
1078 c
= p
->next_variant
;
1079 r600_pipe_shader_destroy(ctx
, p
);
1089 static void r600_delete_ps_state(struct pipe_context
*ctx
, void *state
)
1091 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1092 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1094 if (rctx
->ps_shader
== sel
) {
1095 rctx
->ps_shader
= NULL
;
1098 r600_delete_shader_selector(ctx
, sel
);
1101 static void r600_delete_vs_state(struct pipe_context
*ctx
, void *state
)
1103 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1104 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1106 if (rctx
->vs_shader
== sel
) {
1107 rctx
->vs_shader
= NULL
;
1110 r600_delete_shader_selector(ctx
, sel
);
1114 static void r600_delete_gs_state(struct pipe_context
*ctx
, void *state
)
1116 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1117 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1119 if (rctx
->gs_shader
== sel
) {
1120 rctx
->gs_shader
= NULL
;
1123 r600_delete_shader_selector(ctx
, sel
);
1126 static void r600_delete_tcs_state(struct pipe_context
*ctx
, void *state
)
1128 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1129 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1131 if (rctx
->tcs_shader
== sel
) {
1132 rctx
->tcs_shader
= NULL
;
1135 r600_delete_shader_selector(ctx
, sel
);
1138 static void r600_delete_tes_state(struct pipe_context
*ctx
, void *state
)
1140 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1141 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1143 if (rctx
->tes_shader
== sel
) {
1144 rctx
->tes_shader
= NULL
;
1147 r600_delete_shader_selector(ctx
, sel
);
1150 void r600_constant_buffers_dirty(struct r600_context
*rctx
, struct r600_constbuf_state
*state
)
1152 if (state
->dirty_mask
) {
1153 state
->atom
.num_dw
= rctx
->b
.chip_class
>= EVERGREEN
? util_bitcount(state
->dirty_mask
)*20
1154 : util_bitcount(state
->dirty_mask
)*19;
1155 r600_mark_atom_dirty(rctx
, &state
->atom
);
1159 static void r600_set_constant_buffer(struct pipe_context
*ctx
,
1160 enum pipe_shader_type shader
, uint index
,
1161 const struct pipe_constant_buffer
*input
)
1163 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1164 struct r600_constbuf_state
*state
= &rctx
->constbuf_state
[shader
];
1165 struct pipe_constant_buffer
*cb
;
1168 /* Note that the state tracker can unbind constant buffers by
1169 * passing NULL here.
1171 if (unlikely(!input
|| (!input
->buffer
&& !input
->user_buffer
))) {
1172 state
->enabled_mask
&= ~(1 << index
);
1173 state
->dirty_mask
&= ~(1 << index
);
1174 pipe_resource_reference(&state
->cb
[index
].buffer
, NULL
);
1178 cb
= &state
->cb
[index
];
1179 cb
->buffer_size
= input
->buffer_size
;
1181 ptr
= input
->user_buffer
;
1184 /* Upload the user buffer. */
1185 if (R600_BIG_ENDIAN
) {
1187 unsigned i
, size
= input
->buffer_size
;
1189 if (!(tmpPtr
= malloc(size
))) {
1190 R600_ERR("Failed to allocate BE swap buffer.\n");
1194 for (i
= 0; i
< size
/ 4; ++i
) {
1195 tmpPtr
[i
] = util_cpu_to_le32(((uint32_t *)ptr
)[i
]);
1198 u_upload_data(ctx
->stream_uploader
, 0, size
, 256,
1199 tmpPtr
, &cb
->buffer_offset
, &cb
->buffer
);
1202 u_upload_data(ctx
->stream_uploader
, 0,
1203 input
->buffer_size
, 256, ptr
,
1204 &cb
->buffer_offset
, &cb
->buffer
);
1206 /* account it in gtt */
1207 rctx
->b
.gtt
+= input
->buffer_size
;
1209 /* Setup the hw buffer. */
1210 cb
->buffer_offset
= input
->buffer_offset
;
1211 pipe_resource_reference(&cb
->buffer
, input
->buffer
);
1212 r600_context_add_resource_size(ctx
, input
->buffer
);
1215 state
->enabled_mask
|= 1 << index
;
1216 state
->dirty_mask
|= 1 << index
;
1217 r600_constant_buffers_dirty(rctx
, state
);
1220 static void r600_set_sample_mask(struct pipe_context
*pipe
, unsigned sample_mask
)
1222 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
1224 if (rctx
->sample_mask
.sample_mask
== (uint16_t)sample_mask
)
1227 rctx
->sample_mask
.sample_mask
= sample_mask
;
1228 r600_mark_atom_dirty(rctx
, &rctx
->sample_mask
.atom
);
1231 void r600_update_driver_const_buffers(struct r600_context
*rctx
, bool compute_only
)
1235 struct pipe_constant_buffer cb
;
1238 start
= compute_only
? PIPE_SHADER_COMPUTE
: 0;
1239 end
= compute_only
? PIPE_SHADER_TYPES
: PIPE_SHADER_COMPUTE
;
1241 for (sh
= start
; sh
< end
; sh
++) {
1242 struct r600_shader_driver_constants_info
*info
= &rctx
->driver_consts
[sh
];
1243 if (!info
->vs_ucp_dirty
&&
1244 !info
->texture_const_dirty
&&
1245 !info
->ps_sample_pos_dirty
&&
1246 !info
->tcs_default_levels_dirty
&&
1247 !info
->cs_block_grid_size_dirty
)
1250 ptr
= info
->constants
;
1251 size
= info
->alloc_size
;
1252 if (info
->vs_ucp_dirty
) {
1253 assert(sh
== PIPE_SHADER_VERTEX
);
1255 ptr
= rctx
->clip_state
.state
.ucp
;
1256 size
= R600_UCP_SIZE
;
1258 memcpy(ptr
, rctx
->clip_state
.state
.ucp
, R600_UCP_SIZE
);
1260 info
->vs_ucp_dirty
= false;
1263 else if (info
->ps_sample_pos_dirty
) {
1264 assert(sh
== PIPE_SHADER_FRAGMENT
);
1266 ptr
= rctx
->sample_positions
;
1267 size
= R600_UCP_SIZE
;
1269 memcpy(ptr
, rctx
->sample_positions
, R600_UCP_SIZE
);
1271 info
->ps_sample_pos_dirty
= false;
1274 else if (info
->cs_block_grid_size_dirty
) {
1275 assert(sh
== PIPE_SHADER_COMPUTE
);
1277 ptr
= rctx
->cs_block_grid_sizes
;
1278 size
= R600_CS_BLOCK_GRID_SIZE
;
1280 memcpy(ptr
, rctx
->cs_block_grid_sizes
, R600_CS_BLOCK_GRID_SIZE
);
1282 info
->cs_block_grid_size_dirty
= false;
1285 else if (info
->tcs_default_levels_dirty
) {
1287 * We'd only really need this for default tcs shader.
1289 assert(sh
== PIPE_SHADER_TESS_CTRL
);
1291 ptr
= rctx
->tess_state
;
1292 size
= R600_TCS_DEFAULT_LEVELS_SIZE
;
1294 memcpy(ptr
, rctx
->tess_state
, R600_TCS_DEFAULT_LEVELS_SIZE
);
1296 info
->tcs_default_levels_dirty
= false;
1299 if (info
->texture_const_dirty
) {
1302 if (sh
== PIPE_SHADER_VERTEX
)
1303 memcpy(ptr
, rctx
->clip_state
.state
.ucp
, R600_UCP_SIZE
);
1304 if (sh
== PIPE_SHADER_FRAGMENT
)
1305 memcpy(ptr
, rctx
->sample_positions
, R600_UCP_SIZE
);
1306 if (sh
== PIPE_SHADER_COMPUTE
)
1307 memcpy(ptr
, rctx
->cs_block_grid_sizes
, R600_CS_BLOCK_GRID_SIZE
);
1308 if (sh
== PIPE_SHADER_TESS_CTRL
)
1309 memcpy(ptr
, rctx
->tess_state
, R600_TCS_DEFAULT_LEVELS_SIZE
);
1311 info
->texture_const_dirty
= false;
1314 cb
.user_buffer
= ptr
;
1315 cb
.buffer_offset
= 0;
1316 cb
.buffer_size
= size
;
1317 rctx
->b
.b
.set_constant_buffer(&rctx
->b
.b
, sh
, R600_BUFFER_INFO_CONST_BUFFER
, &cb
);
1318 pipe_resource_reference(&cb
.buffer
, NULL
);
1322 static void *r600_alloc_buf_consts(struct r600_context
*rctx
, int shader_type
,
1323 unsigned array_size
, uint32_t *base_offset
)
1325 struct r600_shader_driver_constants_info
*info
= &rctx
->driver_consts
[shader_type
];
1326 if (array_size
+ R600_UCP_SIZE
> info
->alloc_size
) {
1327 info
->constants
= realloc(info
->constants
, array_size
+ R600_UCP_SIZE
);
1328 info
->alloc_size
= array_size
+ R600_UCP_SIZE
;
1330 memset(info
->constants
+ (R600_UCP_SIZE
/ 4), 0, array_size
);
1331 info
->texture_const_dirty
= true;
1332 *base_offset
= R600_UCP_SIZE
;
1333 return info
->constants
;
1336 * On r600/700 hw we don't have vertex fetch swizzle, though TBO
1337 * doesn't require full swizzles it does need masking and setting alpha
1338 * to one, so we setup a set of 5 constants with the masks + alpha value
1339 * then in the shader, we AND the 4 components with 0xffffffff or 0,
1340 * then OR the alpha with the value given here.
1341 * We use a 6th constant to store the txq buffer size in
1342 * we use 7th slot for number of cube layers in a cube map array.
1344 static void r600_setup_buffer_constants(struct r600_context
*rctx
, int shader_type
)
1346 struct r600_textures_info
*samplers
= &rctx
->samplers
[shader_type
];
1348 uint32_t array_size
;
1350 uint32_t *constants
;
1351 uint32_t base_offset
;
1352 if (!samplers
->views
.dirty_buffer_constants
)
1355 samplers
->views
.dirty_buffer_constants
= FALSE
;
1357 bits
= util_last_bit(samplers
->views
.enabled_mask
);
1358 array_size
= bits
* 8 * sizeof(uint32_t);
1360 constants
= r600_alloc_buf_consts(rctx
, shader_type
, array_size
, &base_offset
);
1362 for (i
= 0; i
< bits
; i
++) {
1363 if (samplers
->views
.enabled_mask
& (1 << i
)) {
1364 int offset
= (base_offset
/ 4) + i
* 8;
1365 const struct util_format_description
*desc
;
1366 desc
= util_format_description(samplers
->views
.views
[i
]->base
.format
);
1368 for (j
= 0; j
< 4; j
++)
1369 if (j
< desc
->nr_channels
)
1370 constants
[offset
+j
] = 0xffffffff;
1372 constants
[offset
+j
] = 0x0;
1373 if (desc
->nr_channels
< 4) {
1374 if (desc
->channel
[0].pure_integer
)
1375 constants
[offset
+4] = 1;
1377 constants
[offset
+4] = fui(1.0);
1379 constants
[offset
+ 4] = 0;
1381 constants
[offset
+ 5] = samplers
->views
.views
[i
]->base
.u
.buf
.size
/
1382 util_format_get_blocksize(samplers
->views
.views
[i
]->base
.format
);
1383 constants
[offset
+ 6] = samplers
->views
.views
[i
]->base
.texture
->array_size
/ 6;
1389 /* On evergreen we store one value
1390 * 1. number of cube layers in a cube map array.
1392 void eg_setup_buffer_constants(struct r600_context
*rctx
, int shader_type
)
1394 struct r600_textures_info
*samplers
= &rctx
->samplers
[shader_type
];
1395 struct r600_image_state
*images
= NULL
;
1396 int bits
, sview_bits
, img_bits
;
1397 uint32_t array_size
;
1399 uint32_t *constants
;
1400 uint32_t base_offset
;
1402 if (shader_type
== PIPE_SHADER_FRAGMENT
) {
1403 images
= &rctx
->fragment_images
;
1404 } else if (shader_type
== PIPE_SHADER_COMPUTE
) {
1405 images
= &rctx
->compute_images
;
1408 if (!samplers
->views
.dirty_buffer_constants
&&
1409 !(images
&& images
->dirty_buffer_constants
))
1413 images
->dirty_buffer_constants
= FALSE
;
1414 samplers
->views
.dirty_buffer_constants
= FALSE
;
1416 bits
= sview_bits
= util_last_bit(samplers
->views
.enabled_mask
);
1418 bits
+= util_last_bit(images
->enabled_mask
);
1421 array_size
= bits
* sizeof(uint32_t);
1423 constants
= r600_alloc_buf_consts(rctx
, shader_type
, array_size
,
1426 for (i
= 0; i
< sview_bits
; i
++) {
1427 if (samplers
->views
.enabled_mask
& (1 << i
)) {
1428 uint32_t offset
= (base_offset
/ 4) + i
;
1429 constants
[offset
] = samplers
->views
.views
[i
]->base
.texture
->array_size
/ 6;
1433 for (i
= sview_bits
; i
< img_bits
; i
++) {
1434 int idx
= i
- sview_bits
;
1435 if (images
->enabled_mask
& (1 << idx
)) {
1436 uint32_t offset
= (base_offset
/ 4) + i
;
1437 constants
[offset
] = images
->views
[idx
].base
.resource
->array_size
/ 6;
1443 /* set sample xy locations as array of fragment shader constants */
1444 void r600_set_sample_locations_constant_buffer(struct r600_context
*rctx
)
1446 struct pipe_context
*ctx
= &rctx
->b
.b
;
1448 assert(rctx
->framebuffer
.nr_samples
< R600_UCP_SIZE
);
1449 assert(rctx
->framebuffer
.nr_samples
<= ARRAY_SIZE(rctx
->sample_positions
)/4);
1451 memset(rctx
->sample_positions
, 0, 4 * 4 * 16);
1452 for (unsigned i
= 0; i
< rctx
->framebuffer
.nr_samples
; i
++) {
1453 ctx
->get_sample_position(ctx
, rctx
->framebuffer
.nr_samples
, i
, &rctx
->sample_positions
[4*i
]);
1454 /* Also fill in center-zeroed positions used for interpolateAtSample */
1455 rctx
->sample_positions
[4*i
+ 2] = rctx
->sample_positions
[4*i
+ 0] - 0.5f
;
1456 rctx
->sample_positions
[4*i
+ 3] = rctx
->sample_positions
[4*i
+ 1] - 0.5f
;
1459 rctx
->driver_consts
[PIPE_SHADER_FRAGMENT
].ps_sample_pos_dirty
= true;
1462 static void update_shader_atom(struct pipe_context
*ctx
,
1463 struct r600_shader_state
*state
,
1464 struct r600_pipe_shader
*shader
)
1466 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1468 state
->shader
= shader
;
1470 state
->atom
.num_dw
= shader
->command_buffer
.num_dw
;
1471 r600_context_add_resource_size(ctx
, (struct pipe_resource
*)shader
->bo
);
1473 state
->atom
.num_dw
= 0;
1475 r600_mark_atom_dirty(rctx
, &state
->atom
);
1478 static void update_gs_block_state(struct r600_context
*rctx
, unsigned enable
)
1480 if (rctx
->shader_stages
.geom_enable
!= enable
) {
1481 rctx
->shader_stages
.geom_enable
= enable
;
1482 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1485 if (rctx
->gs_rings
.enable
!= enable
) {
1486 rctx
->gs_rings
.enable
= enable
;
1487 r600_mark_atom_dirty(rctx
, &rctx
->gs_rings
.atom
);
1489 if (enable
&& !rctx
->gs_rings
.esgs_ring
.buffer
) {
1490 unsigned size
= 0x1C000;
1491 rctx
->gs_rings
.esgs_ring
.buffer
=
1492 pipe_buffer_create(rctx
->b
.b
.screen
, 0,
1493 PIPE_USAGE_DEFAULT
, size
);
1494 rctx
->gs_rings
.esgs_ring
.buffer_size
= size
;
1498 rctx
->gs_rings
.gsvs_ring
.buffer
=
1499 pipe_buffer_create(rctx
->b
.b
.screen
, 0,
1500 PIPE_USAGE_DEFAULT
, size
);
1501 rctx
->gs_rings
.gsvs_ring
.buffer_size
= size
;
1505 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_GEOMETRY
,
1506 R600_GS_RING_CONST_BUFFER
, &rctx
->gs_rings
.esgs_ring
);
1507 if (rctx
->tes_shader
) {
1508 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_TESS_EVAL
,
1509 R600_GS_RING_CONST_BUFFER
, &rctx
->gs_rings
.gsvs_ring
);
1511 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_VERTEX
,
1512 R600_GS_RING_CONST_BUFFER
, &rctx
->gs_rings
.gsvs_ring
);
1515 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_GEOMETRY
,
1516 R600_GS_RING_CONST_BUFFER
, NULL
);
1517 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_VERTEX
,
1518 R600_GS_RING_CONST_BUFFER
, NULL
);
1519 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_TESS_EVAL
,
1520 R600_GS_RING_CONST_BUFFER
, NULL
);
1525 static void r600_update_clip_state(struct r600_context
*rctx
,
1526 struct r600_pipe_shader
*current
)
1528 if (current
->pa_cl_vs_out_cntl
!= rctx
->clip_misc_state
.pa_cl_vs_out_cntl
||
1529 current
->shader
.clip_dist_write
!= rctx
->clip_misc_state
.clip_dist_write
||
1530 current
->shader
.cull_dist_write
!= rctx
->clip_misc_state
.cull_dist_write
||
1531 current
->shader
.vs_position_window_space
!= rctx
->clip_misc_state
.clip_disable
||
1532 current
->shader
.vs_out_viewport
!= rctx
->clip_misc_state
.vs_out_viewport
) {
1533 rctx
->clip_misc_state
.pa_cl_vs_out_cntl
= current
->pa_cl_vs_out_cntl
;
1534 rctx
->clip_misc_state
.clip_dist_write
= current
->shader
.clip_dist_write
;
1535 rctx
->clip_misc_state
.cull_dist_write
= current
->shader
.cull_dist_write
;
1536 rctx
->clip_misc_state
.clip_disable
= current
->shader
.vs_position_window_space
;
1537 rctx
->clip_misc_state
.vs_out_viewport
= current
->shader
.vs_out_viewport
;
1538 r600_mark_atom_dirty(rctx
, &rctx
->clip_misc_state
.atom
);
1542 static void r600_generate_fixed_func_tcs(struct r600_context
*rctx
)
1544 struct ureg_src const0
, const1
;
1545 struct ureg_dst tessouter
, tessinner
;
1546 struct ureg_program
*ureg
= ureg_create(PIPE_SHADER_TESS_CTRL
);
1549 return; /* if we get here, we're screwed */
1551 assert(!rctx
->fixed_func_tcs_shader
);
1553 ureg_DECL_constant2D(ureg
, 0, 1, R600_BUFFER_INFO_CONST_BUFFER
);
1554 const0
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 0),
1555 R600_BUFFER_INFO_CONST_BUFFER
);
1556 const1
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 1),
1557 R600_BUFFER_INFO_CONST_BUFFER
);
1559 tessouter
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSOUTER
, 0);
1560 tessinner
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSINNER
, 0);
1562 ureg_MOV(ureg
, tessouter
, const0
);
1563 ureg_MOV(ureg
, tessinner
, const1
);
1566 rctx
->fixed_func_tcs_shader
=
1567 ureg_create_shader_and_destroy(ureg
, &rctx
->b
.b
);
1570 void r600_update_compressed_resource_state(struct r600_context
*rctx
, bool compute_only
)
1575 counter
= p_atomic_read(&rctx
->screen
->b
.compressed_colortex_counter
);
1576 if (counter
!= rctx
->b
.last_compressed_colortex_counter
) {
1577 rctx
->b
.last_compressed_colortex_counter
= counter
;
1580 r600_update_compressed_colortex_mask(&rctx
->samplers
[PIPE_SHADER_COMPUTE
].views
);
1582 for (i
= 0; i
< PIPE_SHADER_TYPES
; ++i
) {
1583 r600_update_compressed_colortex_mask(&rctx
->samplers
[i
].views
);
1587 r600_update_compressed_colortex_mask_images(&rctx
->fragment_images
);
1588 r600_update_compressed_colortex_mask_images(&rctx
->compute_images
);
1591 /* Decompress textures if needed. */
1592 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
1593 struct r600_samplerview_state
*views
= &rctx
->samplers
[i
].views
;
1596 if (i
!= PIPE_SHADER_COMPUTE
)
1598 if (views
->compressed_depthtex_mask
) {
1599 r600_decompress_depth_textures(rctx
, views
);
1601 if (views
->compressed_colortex_mask
) {
1602 r600_decompress_color_textures(rctx
, views
);
1607 struct r600_image_state
*istate
;
1609 if (!compute_only
) {
1610 istate
= &rctx
->fragment_images
;
1611 if (istate
->compressed_depthtex_mask
)
1612 r600_decompress_depth_images(rctx
, istate
);
1613 if (istate
->compressed_colortex_mask
)
1614 r600_decompress_color_images(rctx
, istate
);
1617 istate
= &rctx
->compute_images
;
1618 if (istate
->compressed_depthtex_mask
)
1619 r600_decompress_depth_images(rctx
, istate
);
1620 if (istate
->compressed_colortex_mask
)
1621 r600_decompress_color_images(rctx
, istate
);
1625 /* update MEM_SCRATCH buffers if needed */
1626 void r600_setup_scratch_area_for_shader(struct r600_context
*rctx
,
1627 struct r600_pipe_shader
*shader
, struct r600_scratch_buffer
*scratch
,
1628 unsigned ring_base_reg
, unsigned item_size_reg
, unsigned ring_size_reg
)
1630 unsigned num_ses
= rctx
->screen
->b
.info
.max_se
;
1631 unsigned num_pipes
= rctx
->screen
->b
.info
.r600_max_quad_pipes
;
1632 unsigned nthreads
= 128;
1634 unsigned itemsize
= shader
->scratch_space_needed
* 4;
1635 unsigned size
= align(itemsize
* nthreads
* num_pipes
* num_ses
* 4, 256);
1637 if (scratch
->dirty
||
1638 unlikely(shader
->scratch_space_needed
!= scratch
->item_size
||
1639 size
> scratch
->size
)) {
1640 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
1642 scratch
->dirty
= false;
1644 if (size
> scratch
->size
) {
1645 // Release prior one if any
1646 if (scratch
->buffer
) {
1647 pipe_resource_reference((struct pipe_resource
**)&scratch
->buffer
, NULL
);
1650 scratch
->buffer
= (struct r600_resource
*)pipe_buffer_create(rctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1651 PIPE_USAGE_DEFAULT
, size
);
1652 if (scratch
->buffer
) {
1653 scratch
->size
= size
;
1657 scratch
->item_size
= shader
->scratch_space_needed
;
1659 radeon_set_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
1660 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1661 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH
));
1663 // multi-SE chips need programming per SE
1664 for (unsigned se
= 0; se
< num_ses
; se
++) {
1665 struct r600_resource
*rbuffer
= scratch
->buffer
;
1666 unsigned size_per_se
= size
/ num_ses
;
1668 // Direct to particular SE
1670 radeon_set_config_reg(cs
, EG_0802C_GRBM_GFX_INDEX
,
1671 S_0802C_INSTANCE_INDEX(0) |
1672 S_0802C_SE_INDEX(se
) |
1673 S_0802C_INSTANCE_BROADCAST_WRITES(1) |
1674 S_0802C_SE_BROADCAST_WRITES(0));
1677 radeon_set_config_reg(cs
, ring_base_reg
, (rbuffer
->gpu_address
+ size_per_se
* se
) >> 8);
1678 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1679 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rbuffer
,
1680 RADEON_USAGE_READWRITE
,
1681 RADEON_PRIO_SCRATCH_BUFFER
));
1682 radeon_set_context_reg(cs
, item_size_reg
, itemsize
);
1683 radeon_set_config_reg(cs
, ring_size_reg
, size_per_se
>> 8);
1686 // Restore broadcast mode
1688 radeon_set_config_reg(cs
, EG_0802C_GRBM_GFX_INDEX
,
1689 S_0802C_INSTANCE_INDEX(0) |
1690 S_0802C_SE_INDEX(0) |
1691 S_0802C_INSTANCE_BROADCAST_WRITES(1) |
1692 S_0802C_SE_BROADCAST_WRITES(1));
1695 radeon_set_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
1696 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1697 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH
));
1701 void r600_setup_scratch_buffers(struct r600_context
*rctx
) {
1702 static const struct {
1706 } regs
[R600_NUM_HW_STAGES
] = {
1707 [R600_HW_STAGE_PS
] = { R_008C68_SQ_PSTMP_RING_BASE
, R_0288BC_SQ_PSTMP_RING_ITEMSIZE
, R_008C6C_SQ_PSTMP_RING_SIZE
},
1708 [R600_HW_STAGE_VS
] = { R_008C60_SQ_VSTMP_RING_BASE
, R_0288B8_SQ_VSTMP_RING_ITEMSIZE
, R_008C64_SQ_VSTMP_RING_SIZE
},
1709 [R600_HW_STAGE_GS
] = { R_008C58_SQ_GSTMP_RING_BASE
, R_0288B4_SQ_GSTMP_RING_ITEMSIZE
, R_008C5C_SQ_GSTMP_RING_SIZE
},
1710 [R600_HW_STAGE_ES
] = { R_008C50_SQ_ESTMP_RING_BASE
, R_0288B0_SQ_ESTMP_RING_ITEMSIZE
, R_008C54_SQ_ESTMP_RING_SIZE
}
1713 for (unsigned i
= 0; i
< R600_NUM_HW_STAGES
; i
++) {
1714 struct r600_pipe_shader
*stage
= rctx
->hw_shader_stages
[i
].shader
;
1716 if (stage
&& unlikely(stage
->scratch_space_needed
)) {
1717 r600_setup_scratch_area_for_shader(rctx
, stage
,
1718 &rctx
->scratch_buffers
[i
], regs
[i
].ring_base
, regs
[i
].item_size
, regs
[i
].ring_size
);
1723 #define SELECT_SHADER_OR_FAIL(x) do { \
1724 r600_shader_select(ctx, rctx->x##_shader, &x##_dirty); \
1725 if (unlikely(!rctx->x##_shader->current)) \
1729 #define UPDATE_SHADER(hw, sw) do { \
1730 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) \
1731 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1734 #define UPDATE_SHADER_CLIP(hw, sw) do { \
1735 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) { \
1736 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1737 clip_so_current = rctx->sw##_shader->current; \
1741 #define UPDATE_SHADER_GS(hw, hw2, sw) do { \
1742 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) { \
1743 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1744 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw2)], rctx->sw##_shader->current->gs_copy_shader); \
1745 clip_so_current = rctx->sw##_shader->current->gs_copy_shader; \
1749 #define SET_NULL_SHADER(hw) do { \
1750 if (rctx->hw_shader_stages[(hw)].shader) \
1751 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], NULL); \
1754 static bool r600_update_derived_state(struct r600_context
*rctx
)
1756 struct pipe_context
* ctx
= (struct pipe_context
*)rctx
;
1757 bool ps_dirty
= false, vs_dirty
= false, gs_dirty
= false;
1758 bool tcs_dirty
= false, tes_dirty
= false, fixed_func_tcs_dirty
= false;
1760 bool need_buf_const
;
1761 struct r600_pipe_shader
*clip_so_current
= NULL
;
1763 if (!rctx
->blitter
->running
)
1764 r600_update_compressed_resource_state(rctx
, false);
1766 SELECT_SHADER_OR_FAIL(ps
);
1768 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1770 update_gs_block_state(rctx
, rctx
->gs_shader
!= NULL
);
1772 if (rctx
->gs_shader
)
1773 SELECT_SHADER_OR_FAIL(gs
);
1776 if (rctx
->tcs_shader
) {
1777 SELECT_SHADER_OR_FAIL(tcs
);
1779 UPDATE_SHADER(EG_HW_STAGE_HS
, tcs
);
1780 } else if (rctx
->tes_shader
) {
1781 if (!rctx
->fixed_func_tcs_shader
) {
1782 r600_generate_fixed_func_tcs(rctx
);
1783 if (!rctx
->fixed_func_tcs_shader
)
1787 SELECT_SHADER_OR_FAIL(fixed_func_tcs
);
1789 UPDATE_SHADER(EG_HW_STAGE_HS
, fixed_func_tcs
);
1791 SET_NULL_SHADER(EG_HW_STAGE_HS
);
1793 if (rctx
->tes_shader
) {
1794 SELECT_SHADER_OR_FAIL(tes
);
1797 SELECT_SHADER_OR_FAIL(vs
);
1799 if (rctx
->gs_shader
) {
1800 if (!rctx
->shader_stages
.geom_enable
) {
1801 rctx
->shader_stages
.geom_enable
= true;
1802 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1805 /* gs_shader provides GS and VS (copy shader) */
1806 UPDATE_SHADER_GS(R600_HW_STAGE_GS
, R600_HW_STAGE_VS
, gs
);
1808 /* vs_shader is used as ES */
1810 if (rctx
->tes_shader
) {
1811 /* VS goes to LS, TES goes to ES */
1812 UPDATE_SHADER(R600_HW_STAGE_ES
, tes
);
1813 UPDATE_SHADER(EG_HW_STAGE_LS
, vs
);
1815 /* vs_shader is used as ES */
1816 UPDATE_SHADER(R600_HW_STAGE_ES
, vs
);
1817 SET_NULL_SHADER(EG_HW_STAGE_LS
);
1820 if (unlikely(rctx
->hw_shader_stages
[R600_HW_STAGE_GS
].shader
)) {
1821 SET_NULL_SHADER(R600_HW_STAGE_GS
);
1822 SET_NULL_SHADER(R600_HW_STAGE_ES
);
1823 rctx
->shader_stages
.geom_enable
= false;
1824 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1827 if (rctx
->tes_shader
) {
1828 /* if TES is loaded and no geometry, TES runs on hw VS, VS runs on hw LS */
1829 UPDATE_SHADER_CLIP(R600_HW_STAGE_VS
, tes
);
1830 UPDATE_SHADER(EG_HW_STAGE_LS
, vs
);
1832 SET_NULL_SHADER(EG_HW_STAGE_LS
);
1833 UPDATE_SHADER_CLIP(R600_HW_STAGE_VS
, vs
);
1838 * XXX: I believe there's some fatal flaw in the dirty state logic when
1839 * enabling/disabling tes.
1840 * VS/ES share all buffer/resource/sampler slots. If TES is enabled,
1841 * it will therefore overwrite the VS slots. If it now gets disabled,
1842 * the VS needs to rebind all buffer/resource/sampler slots - not only
1843 * has TES overwritten the corresponding slots, but when the VS was
1844 * operating as LS the things with correpsonding dirty bits got bound
1845 * to LS slots and won't reflect what is dirty as VS stage even if the
1846 * TES didn't overwrite it. The story for re-enabled TES is similar.
1847 * In any case, we're not allowed to submit any TES state when
1848 * TES is disabled (the state tracker may not do this but this looks
1849 * like an optimization to me, not something which can be relied on).
1852 /* Update clip misc state. */
1853 if (clip_so_current
) {
1854 r600_update_clip_state(rctx
, clip_so_current
);
1855 rctx
->b
.streamout
.enabled_stream_buffers_mask
= clip_so_current
->enabled_stream_buffers_mask
;
1858 if (unlikely(ps_dirty
|| rctx
->hw_shader_stages
[R600_HW_STAGE_PS
].shader
!= rctx
->ps_shader
->current
||
1859 rctx
->rasterizer
->sprite_coord_enable
!= rctx
->ps_shader
->current
->sprite_coord_enable
||
1860 rctx
->rasterizer
->flatshade
!= rctx
->ps_shader
->current
->flatshade
)) {
1862 if (rctx
->cb_misc_state
.nr_ps_color_outputs
!= rctx
->ps_shader
->current
->nr_ps_color_outputs
||
1863 rctx
->cb_misc_state
.ps_color_export_mask
!= rctx
->ps_shader
->current
->ps_color_export_mask
) {
1864 rctx
->cb_misc_state
.nr_ps_color_outputs
= rctx
->ps_shader
->current
->nr_ps_color_outputs
;
1865 rctx
->cb_misc_state
.ps_color_export_mask
= rctx
->ps_shader
->current
->ps_color_export_mask
;
1866 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
1869 if (rctx
->b
.chip_class
<= R700
) {
1870 bool multiwrite
= rctx
->ps_shader
->current
->shader
.fs_write_all
;
1872 if (rctx
->cb_misc_state
.multiwrite
!= multiwrite
) {
1873 rctx
->cb_misc_state
.multiwrite
= multiwrite
;
1874 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
1878 if (unlikely(!ps_dirty
&& rctx
->ps_shader
&& rctx
->rasterizer
&&
1879 ((rctx
->rasterizer
->sprite_coord_enable
!= rctx
->ps_shader
->current
->sprite_coord_enable
) ||
1880 (rctx
->rasterizer
->flatshade
!= rctx
->ps_shader
->current
->flatshade
)))) {
1882 if (rctx
->b
.chip_class
>= EVERGREEN
)
1883 evergreen_update_ps_state(ctx
, rctx
->ps_shader
->current
);
1885 r600_update_ps_state(ctx
, rctx
->ps_shader
->current
);
1888 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1890 UPDATE_SHADER(R600_HW_STAGE_PS
, ps
);
1892 if (rctx
->b
.chip_class
>= EVERGREEN
) {
1893 evergreen_update_db_shader_control(rctx
);
1895 r600_update_db_shader_control(rctx
);
1898 /* For each shader stage that needs to spill, set up buffer for MEM_SCRATCH */
1899 if (rctx
->b
.chip_class
>= EVERGREEN
) {
1900 evergreen_setup_scratch_buffers(rctx
);
1902 r600_setup_scratch_buffers(rctx
);
1905 /* on R600 we stuff masks + txq info into one constant buffer */
1906 /* on evergreen we only need a txq info one */
1907 if (rctx
->ps_shader
) {
1908 need_buf_const
= rctx
->ps_shader
->current
->shader
.uses_tex_buffers
|| rctx
->ps_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1909 if (need_buf_const
) {
1910 if (rctx
->b
.chip_class
< EVERGREEN
)
1911 r600_setup_buffer_constants(rctx
, PIPE_SHADER_FRAGMENT
);
1913 eg_setup_buffer_constants(rctx
, PIPE_SHADER_FRAGMENT
);
1917 if (rctx
->vs_shader
) {
1918 need_buf_const
= rctx
->vs_shader
->current
->shader
.uses_tex_buffers
|| rctx
->vs_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1919 if (need_buf_const
) {
1920 if (rctx
->b
.chip_class
< EVERGREEN
)
1921 r600_setup_buffer_constants(rctx
, PIPE_SHADER_VERTEX
);
1923 eg_setup_buffer_constants(rctx
, PIPE_SHADER_VERTEX
);
1927 if (rctx
->gs_shader
) {
1928 need_buf_const
= rctx
->gs_shader
->current
->shader
.uses_tex_buffers
|| rctx
->gs_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1929 if (need_buf_const
) {
1930 if (rctx
->b
.chip_class
< EVERGREEN
)
1931 r600_setup_buffer_constants(rctx
, PIPE_SHADER_GEOMETRY
);
1933 eg_setup_buffer_constants(rctx
, PIPE_SHADER_GEOMETRY
);
1937 if (rctx
->tes_shader
) {
1938 assert(rctx
->b
.chip_class
>= EVERGREEN
);
1939 need_buf_const
= rctx
->tes_shader
->current
->shader
.uses_tex_buffers
||
1940 rctx
->tes_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1941 if (need_buf_const
) {
1942 eg_setup_buffer_constants(rctx
, PIPE_SHADER_TESS_EVAL
);
1944 if (rctx
->tcs_shader
) {
1945 need_buf_const
= rctx
->tcs_shader
->current
->shader
.uses_tex_buffers
||
1946 rctx
->tcs_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1947 if (need_buf_const
) {
1948 eg_setup_buffer_constants(rctx
, PIPE_SHADER_TESS_CTRL
);
1953 r600_update_driver_const_buffers(rctx
, false);
1955 if (rctx
->b
.chip_class
< EVERGREEN
&& rctx
->ps_shader
&& rctx
->vs_shader
) {
1956 if (!r600_adjust_gprs(rctx
)) {
1957 /* discard rendering */
1962 if (rctx
->b
.chip_class
== EVERGREEN
) {
1963 if (!evergreen_adjust_gprs(rctx
)) {
1964 /* discard rendering */
1969 blend_disable
= (rctx
->dual_src_blend
&&
1970 rctx
->ps_shader
->current
->nr_ps_color_outputs
< 2);
1972 if (blend_disable
!= rctx
->force_blend_disable
) {
1973 rctx
->force_blend_disable
= blend_disable
;
1974 r600_bind_blend_state_internal(rctx
,
1975 rctx
->blend_state
.cso
,
1982 void r600_emit_clip_misc_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1984 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
1985 struct r600_clip_misc_state
*state
= &rctx
->clip_misc_state
;
1987 radeon_set_context_reg(cs
, R_028810_PA_CL_CLIP_CNTL
,
1988 state
->pa_cl_clip_cntl
|
1989 (state
->clip_dist_write
? 0 : state
->clip_plane_enable
& 0x3F) |
1990 S_028810_CLIP_DISABLE(state
->clip_disable
));
1991 radeon_set_context_reg(cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
1992 state
->pa_cl_vs_out_cntl
|
1993 (state
->clip_plane_enable
& state
->clip_dist_write
) |
1994 (state
->cull_dist_write
<< 8));
1995 /* reuse needs to be set off if we write oViewport */
1996 if (rctx
->b
.chip_class
>= EVERGREEN
)
1997 radeon_set_context_reg(cs
, R_028AB4_VGT_REUSE_OFF
,
1998 S_028AB4_REUSE_OFF(state
->vs_out_viewport
));
2001 /* rast_prim is the primitive type after GS. */
2002 static inline void r600_emit_rasterizer_prim_state(struct r600_context
*rctx
)
2004 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
2005 enum pipe_prim_type rast_prim
= rctx
->current_rast_prim
;
2007 /* Skip this if not rendering lines. */
2008 if (rast_prim
!= PIPE_PRIM_LINES
&&
2009 rast_prim
!= PIPE_PRIM_LINE_LOOP
&&
2010 rast_prim
!= PIPE_PRIM_LINE_STRIP
&&
2011 rast_prim
!= PIPE_PRIM_LINES_ADJACENCY
&&
2012 rast_prim
!= PIPE_PRIM_LINE_STRIP_ADJACENCY
)
2015 if (rast_prim
== rctx
->last_rast_prim
)
2018 /* For lines, reset the stipple pattern at each primitive. Otherwise,
2019 * reset the stipple pattern at each packet (line strips, line loops).
2021 radeon_set_context_reg(cs
, R_028A0C_PA_SC_LINE_STIPPLE
,
2022 S_028A0C_AUTO_RESET_CNTL(rast_prim
== PIPE_PRIM_LINES
? 1 : 2) |
2023 (rctx
->rasterizer
? rctx
->rasterizer
->pa_sc_line_stipple
: 0));
2024 rctx
->last_rast_prim
= rast_prim
;
2027 static void r600_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
2029 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2030 struct pipe_resource
*indexbuf
= info
->has_user_indices
? NULL
: info
->index
.resource
;
2031 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
2032 bool render_cond_bit
= rctx
->b
.render_cond
&& !rctx
->b
.render_cond_force_off
;
2033 bool has_user_indices
= info
->has_user_indices
;
2035 unsigned num_patches
, dirty_tex_counter
, index_offset
= 0;
2036 unsigned index_size
= info
->index_size
;
2038 struct r600_shader_atomic combined_atomics
[8];
2039 uint8_t atomic_used_mask
;
2041 if (!info
->indirect
&& !info
->count
&& (index_size
|| !info
->count_from_stream_output
)) {
2045 if (unlikely(!rctx
->vs_shader
)) {
2049 if (unlikely(!rctx
->ps_shader
&&
2050 (!rctx
->rasterizer
|| !rctx
->rasterizer
->rasterizer_discard
))) {
2055 /* make sure that the gfx ring is only one active */
2056 if (radeon_emitted(rctx
->b
.dma
.cs
, 0)) {
2057 rctx
->b
.dma
.flush(rctx
, PIPE_FLUSH_ASYNC
, NULL
);
2060 if (rctx
->cmd_buf_is_compute
) {
2061 rctx
->b
.gfx
.flush(rctx
, PIPE_FLUSH_ASYNC
, NULL
);
2062 rctx
->cmd_buf_is_compute
= false;
2065 /* Re-emit the framebuffer state if needed. */
2066 dirty_tex_counter
= p_atomic_read(&rctx
->b
.screen
->dirty_tex_counter
);
2067 if (unlikely(dirty_tex_counter
!= rctx
->b
.last_dirty_tex_counter
)) {
2068 rctx
->b
.last_dirty_tex_counter
= dirty_tex_counter
;
2069 r600_mark_atom_dirty(rctx
, &rctx
->framebuffer
.atom
);
2070 rctx
->framebuffer
.do_update_surf_dirtiness
= true;
2073 if (rctx
->gs_shader
) {
2074 /* Determine whether the GS triangle strip adjacency fix should
2075 * be applied. Rotate every other triangle if
2076 * - triangle strips with adjacency are fed to the GS and
2077 * - primitive restart is disabled (the rotation doesn't help
2078 * when the restart occurs after an odd number of triangles).
2080 bool gs_tri_strip_adj_fix
=
2081 !rctx
->tes_shader
&&
2082 info
->mode
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
&&
2083 !info
->primitive_restart
;
2084 if (gs_tri_strip_adj_fix
!= rctx
->gs_tri_strip_adj_fix
)
2085 rctx
->gs_tri_strip_adj_fix
= gs_tri_strip_adj_fix
;
2087 if (!r600_update_derived_state(rctx
)) {
2088 /* useless to render because current rendering command
2094 rctx
->current_rast_prim
= (rctx
->gs_shader
)? rctx
->gs_shader
->gs_output_prim
2095 : (rctx
->tes_shader
)? rctx
->tes_shader
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
]
2098 if (rctx
->b
.chip_class
>= EVERGREEN
) {
2099 evergreen_emit_atomic_buffer_setup_count(rctx
, NULL
, combined_atomics
, &atomic_used_mask
);
2103 index_offset
+= info
->start
* index_size
;
2105 /* Translate 8-bit indices to 16-bit. */
2106 if (unlikely(index_size
== 1)) {
2107 struct pipe_resource
*out_buffer
= NULL
;
2108 unsigned out_offset
;
2110 unsigned start
, count
;
2112 if (likely(!info
->indirect
)) {
2114 count
= info
->count
;
2117 /* Have to get start/count from indirect buffer, slow path ahead... */
2118 struct r600_resource
*indirect_resource
= (struct r600_resource
*)info
->indirect
->buffer
;
2119 unsigned *data
= r600_buffer_map_sync_with_rings(&rctx
->b
, indirect_resource
,
2120 PIPE_TRANSFER_READ
);
2122 data
+= info
->indirect
->offset
/ sizeof(unsigned);
2123 start
= data
[2] * index_size
;
2132 u_upload_alloc(ctx
->stream_uploader
, start
, count
* 2,
2133 256, &out_offset
, &out_buffer
, &ptr
);
2137 util_shorten_ubyte_elts_to_userptr(
2138 &rctx
->b
.b
, info
, 0, 0, index_offset
, count
, ptr
);
2140 indexbuf
= out_buffer
;
2141 index_offset
= out_offset
;
2143 has_user_indices
= false;
2146 /* Upload the index buffer.
2147 * The upload is skipped for small index counts on little-endian machines
2148 * and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
2149 * Indirect draws never use immediate indices.
2150 * Note: Instanced rendering in combination with immediate indices hangs. */
2151 if (has_user_indices
&& (R600_BIG_ENDIAN
|| info
->indirect
||
2152 info
->instance_count
> 1 ||
2153 info
->count
*index_size
> 20)) {
2155 u_upload_data(ctx
->stream_uploader
, 0,
2156 info
->count
* index_size
, 256,
2157 info
->index
.user
, &index_offset
, &indexbuf
);
2158 has_user_indices
= false;
2160 index_bias
= info
->index_bias
;
2162 index_bias
= info
->start
;
2165 /* Set the index offset and primitive restart. */
2166 if (rctx
->vgt_state
.vgt_multi_prim_ib_reset_en
!= info
->primitive_restart
||
2167 rctx
->vgt_state
.vgt_multi_prim_ib_reset_indx
!= info
->restart_index
||
2168 rctx
->vgt_state
.vgt_indx_offset
!= index_bias
||
2169 (rctx
->vgt_state
.last_draw_was_indirect
&& !info
->indirect
)) {
2170 rctx
->vgt_state
.vgt_multi_prim_ib_reset_en
= info
->primitive_restart
;
2171 rctx
->vgt_state
.vgt_multi_prim_ib_reset_indx
= info
->restart_index
;
2172 rctx
->vgt_state
.vgt_indx_offset
= index_bias
;
2173 r600_mark_atom_dirty(rctx
, &rctx
->vgt_state
.atom
);
2176 /* Workaround for hardware deadlock on certain R600 ASICs: write into a CB register. */
2177 if (rctx
->b
.chip_class
== R600
) {
2178 rctx
->b
.flags
|= R600_CONTEXT_PS_PARTIAL_FLUSH
;
2179 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
2182 if (rctx
->b
.chip_class
>= EVERGREEN
)
2183 evergreen_setup_tess_constants(rctx
, info
, &num_patches
);
2186 r600_need_cs_space(rctx
, has_user_indices
? 5 : 0, TRUE
, util_bitcount(atomic_used_mask
));
2187 r600_flush_emit(rctx
);
2189 mask
= rctx
->dirty_atoms
;
2191 r600_emit_atom(rctx
, rctx
->atoms
[u_bit_scan64(&mask
)]);
2194 if (rctx
->b
.chip_class
>= EVERGREEN
) {
2195 evergreen_emit_atomic_buffer_setup(rctx
, false, combined_atomics
, atomic_used_mask
);
2198 if (rctx
->b
.chip_class
== CAYMAN
) {
2199 /* Copied from radeonsi. */
2200 unsigned primgroup_size
= 128; /* recommended without a GS */
2201 bool ia_switch_on_eop
= false;
2202 bool partial_vs_wave
= false;
2204 if (rctx
->gs_shader
)
2205 primgroup_size
= 64; /* recommended with a GS */
2207 if ((rctx
->rasterizer
&& rctx
->rasterizer
->pa_sc_line_stipple
) ||
2208 (rctx
->b
.screen
->debug_flags
& DBG_SWITCH_ON_EOP
)) {
2209 ia_switch_on_eop
= true;
2212 if (r600_get_strmout_en(&rctx
->b
))
2213 partial_vs_wave
= true;
2215 radeon_set_context_reg(cs
, CM_R_028AA8_IA_MULTI_VGT_PARAM
,
2216 S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
2217 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
2218 S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1));
2221 if (rctx
->b
.chip_class
>= EVERGREEN
) {
2222 uint32_t ls_hs_config
= evergreen_get_ls_hs_config(rctx
, info
,
2225 evergreen_set_ls_hs_config(rctx
, cs
, ls_hs_config
);
2226 evergreen_set_lds_alloc(rctx
, cs
, rctx
->lds_alloc
);
2229 /* On R6xx, CULL_FRONT=1 culls all points, lines, and rectangles,
2230 * even though it should have no effect on those. */
2231 if (rctx
->b
.chip_class
== R600
&& rctx
->rasterizer
) {
2232 unsigned su_sc_mode_cntl
= rctx
->rasterizer
->pa_su_sc_mode_cntl
;
2233 unsigned prim
= info
->mode
;
2235 if (rctx
->gs_shader
) {
2236 prim
= rctx
->gs_shader
->gs_output_prim
;
2238 prim
= r600_conv_prim_to_gs_out(prim
); /* decrease the number of types to 3 */
2240 if (prim
== V_028A6C_OUTPRIM_TYPE_POINTLIST
||
2241 prim
== V_028A6C_OUTPRIM_TYPE_LINESTRIP
||
2242 info
->mode
== R600_PRIM_RECTANGLE_LIST
) {
2243 su_sc_mode_cntl
&= C_028814_CULL_FRONT
;
2245 radeon_set_context_reg(cs
, R_028814_PA_SU_SC_MODE_CNTL
, su_sc_mode_cntl
);
2248 /* Update start instance. */
2249 if (!info
->indirect
&& rctx
->last_start_instance
!= info
->start_instance
) {
2250 radeon_set_ctl_const(cs
, R_03CFF4_SQ_VTX_START_INST_LOC
, info
->start_instance
);
2251 rctx
->last_start_instance
= info
->start_instance
;
2254 /* Update the primitive type. */
2255 if (rctx
->last_primitive_type
!= info
->mode
) {
2256 r600_emit_rasterizer_prim_state(rctx
);
2257 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
,
2258 r600_conv_pipe_prim(info
->mode
));
2260 rctx
->last_primitive_type
= info
->mode
;
2264 if (likely(!info
->indirect
)) {
2265 radeon_emit(cs
, PKT3(PKT3_NUM_INSTANCES
, 0, 0));
2266 radeon_emit(cs
, info
->instance_count
);
2268 uint64_t va
= r600_resource(info
->indirect
->buffer
)->gpu_address
;
2269 assert(rctx
->b
.chip_class
>= EVERGREEN
);
2271 // Invalidate so non-indirect draw calls reset this state
2272 rctx
->vgt_state
.last_draw_was_indirect
= true;
2273 rctx
->last_start_instance
= -1;
2275 radeon_emit(cs
, PKT3(EG_PKT3_SET_BASE
, 2, 0));
2276 radeon_emit(cs
, EG_DRAW_INDEX_INDIRECT_PATCH_TABLE_BASE
);
2277 radeon_emit(cs
, va
);
2278 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
2280 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2281 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
2282 (struct r600_resource
*)info
->indirect
->buffer
,
2284 RADEON_PRIO_DRAW_INDIRECT
));
2288 radeon_emit(cs
, PKT3(PKT3_INDEX_TYPE
, 0, 0));
2289 radeon_emit(cs
, index_size
== 4 ?
2290 (VGT_INDEX_32
| (R600_BIG_ENDIAN
? VGT_DMA_SWAP_32_BIT
: 0)) :
2291 (VGT_INDEX_16
| (R600_BIG_ENDIAN
? VGT_DMA_SWAP_16_BIT
: 0)));
2293 if (has_user_indices
) {
2294 unsigned size_bytes
= info
->count
*index_size
;
2295 unsigned size_dw
= align(size_bytes
, 4) / 4;
2296 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_IMMD
, 1 + size_dw
, render_cond_bit
));
2297 radeon_emit(cs
, info
->count
);
2298 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_IMMEDIATE
);
2299 radeon_emit_array(cs
, info
->index
.user
, size_dw
);
2301 uint64_t va
= r600_resource(indexbuf
)->gpu_address
+ index_offset
;
2303 if (likely(!info
->indirect
)) {
2304 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX
, 3, render_cond_bit
));
2305 radeon_emit(cs
, va
);
2306 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
2307 radeon_emit(cs
, info
->count
);
2308 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
2309 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2310 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
2311 (struct r600_resource
*)indexbuf
,
2313 RADEON_PRIO_INDEX_BUFFER
));
2316 uint32_t max_size
= (indexbuf
->width0
- index_offset
) / index_size
;
2318 radeon_emit(cs
, PKT3(EG_PKT3_INDEX_BASE
, 1, 0));
2319 radeon_emit(cs
, va
);
2320 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
2322 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2323 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
2324 (struct r600_resource
*)indexbuf
,
2326 RADEON_PRIO_INDEX_BUFFER
));
2328 radeon_emit(cs
, PKT3(EG_PKT3_INDEX_BUFFER_SIZE
, 0, 0));
2329 radeon_emit(cs
, max_size
);
2331 radeon_emit(cs
, PKT3(EG_PKT3_DRAW_INDEX_INDIRECT
, 1, render_cond_bit
));
2332 radeon_emit(cs
, info
->indirect
->offset
);
2333 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
2337 if (unlikely(info
->count_from_stream_output
)) {
2338 struct r600_so_target
*t
= (struct r600_so_target
*)info
->count_from_stream_output
;
2339 uint64_t va
= t
->buf_filled_size
->gpu_address
+ t
->buf_filled_size_offset
;
2341 radeon_set_context_reg(cs
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
, t
->stride_in_dw
);
2343 radeon_emit(cs
, PKT3(PKT3_COPY_DW
, 4, 0));
2344 radeon_emit(cs
, COPY_DW_SRC_IS_MEM
| COPY_DW_DST_IS_REG
);
2345 radeon_emit(cs
, va
& 0xFFFFFFFFUL
); /* src address lo */
2346 radeon_emit(cs
, (va
>> 32UL) & 0xFFUL
); /* src address hi */
2347 radeon_emit(cs
, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2); /* dst register */
2348 radeon_emit(cs
, 0); /* unused */
2350 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2351 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
2352 t
->buf_filled_size
, RADEON_USAGE_READ
,
2353 RADEON_PRIO_SO_FILLED_SIZE
));
2356 if (likely(!info
->indirect
)) {
2357 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_AUTO
, 1, render_cond_bit
));
2358 radeon_emit(cs
, info
->count
);
2361 radeon_emit(cs
, PKT3(EG_PKT3_DRAW_INDIRECT
, 1, render_cond_bit
));
2362 radeon_emit(cs
, info
->indirect
->offset
);
2364 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
2365 (info
->count_from_stream_output
? S_0287F0_USE_OPAQUE(1) : 0));
2368 /* SMX returns CONTEXT_DONE too early workaround */
2369 if (rctx
->b
.family
== CHIP_R600
||
2370 rctx
->b
.family
== CHIP_RV610
||
2371 rctx
->b
.family
== CHIP_RV630
||
2372 rctx
->b
.family
== CHIP_RV635
) {
2373 /* if we have gs shader or streamout
2374 we need to do a wait idle after every draw */
2375 if (rctx
->gs_shader
|| r600_get_strmout_en(&rctx
->b
)) {
2376 radeon_set_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
2380 /* ES ring rolling over at EOP - workaround */
2381 if (rctx
->b
.chip_class
== R600
) {
2382 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
2383 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SQ_NON_EVENT
));
2387 if (rctx
->b
.chip_class
>= EVERGREEN
)
2388 evergreen_emit_atomic_buffer_save(rctx
, false, combined_atomics
, &atomic_used_mask
);
2390 if (rctx
->trace_buf
)
2391 eg_trace_emit(rctx
);
2393 if (rctx
->framebuffer
.do_update_surf_dirtiness
) {
2394 /* Set the depth buffer as dirty. */
2395 if (rctx
->framebuffer
.state
.zsbuf
) {
2396 struct pipe_surface
*surf
= rctx
->framebuffer
.state
.zsbuf
;
2397 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
2399 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
2401 if (rtex
->surface
.has_stencil
)
2402 rtex
->stencil_dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
2404 if (rctx
->framebuffer
.compressed_cb_mask
) {
2405 struct pipe_surface
*surf
;
2406 struct r600_texture
*rtex
;
2407 unsigned mask
= rctx
->framebuffer
.compressed_cb_mask
;
2410 unsigned i
= u_bit_scan(&mask
);
2411 surf
= rctx
->framebuffer
.state
.cbufs
[i
];
2412 rtex
= (struct r600_texture
*)surf
->texture
;
2414 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
2418 rctx
->framebuffer
.do_update_surf_dirtiness
= false;
2421 if (index_size
&& indexbuf
!= info
->index
.resource
)
2422 pipe_resource_reference(&indexbuf
, NULL
);
2423 rctx
->b
.num_draw_calls
++;
2426 uint32_t r600_translate_stencil_op(int s_op
)
2429 case PIPE_STENCIL_OP_KEEP
:
2430 return V_028800_STENCIL_KEEP
;
2431 case PIPE_STENCIL_OP_ZERO
:
2432 return V_028800_STENCIL_ZERO
;
2433 case PIPE_STENCIL_OP_REPLACE
:
2434 return V_028800_STENCIL_REPLACE
;
2435 case PIPE_STENCIL_OP_INCR
:
2436 return V_028800_STENCIL_INCR
;
2437 case PIPE_STENCIL_OP_DECR
:
2438 return V_028800_STENCIL_DECR
;
2439 case PIPE_STENCIL_OP_INCR_WRAP
:
2440 return V_028800_STENCIL_INCR_WRAP
;
2441 case PIPE_STENCIL_OP_DECR_WRAP
:
2442 return V_028800_STENCIL_DECR_WRAP
;
2443 case PIPE_STENCIL_OP_INVERT
:
2444 return V_028800_STENCIL_INVERT
;
2446 R600_ERR("Unknown stencil op %d", s_op
);
2453 uint32_t r600_translate_fill(uint32_t func
)
2456 case PIPE_POLYGON_MODE_FILL
:
2458 case PIPE_POLYGON_MODE_LINE
:
2460 case PIPE_POLYGON_MODE_POINT
:
2468 unsigned r600_tex_wrap(unsigned wrap
)
2472 case PIPE_TEX_WRAP_REPEAT
:
2473 return V_03C000_SQ_TEX_WRAP
;
2474 case PIPE_TEX_WRAP_CLAMP
:
2475 return V_03C000_SQ_TEX_CLAMP_HALF_BORDER
;
2476 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
2477 return V_03C000_SQ_TEX_CLAMP_LAST_TEXEL
;
2478 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
2479 return V_03C000_SQ_TEX_CLAMP_BORDER
;
2480 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
2481 return V_03C000_SQ_TEX_MIRROR
;
2482 case PIPE_TEX_WRAP_MIRROR_CLAMP
:
2483 return V_03C000_SQ_TEX_MIRROR_ONCE_HALF_BORDER
;
2484 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
:
2485 return V_03C000_SQ_TEX_MIRROR_ONCE_LAST_TEXEL
;
2486 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
:
2487 return V_03C000_SQ_TEX_MIRROR_ONCE_BORDER
;
2491 unsigned r600_tex_mipfilter(unsigned filter
)
2494 case PIPE_TEX_MIPFILTER_NEAREST
:
2495 return V_03C000_SQ_TEX_Z_FILTER_POINT
;
2496 case PIPE_TEX_MIPFILTER_LINEAR
:
2497 return V_03C000_SQ_TEX_Z_FILTER_LINEAR
;
2499 case PIPE_TEX_MIPFILTER_NONE
:
2500 return V_03C000_SQ_TEX_Z_FILTER_NONE
;
2504 unsigned r600_tex_compare(unsigned compare
)
2508 case PIPE_FUNC_NEVER
:
2509 return V_03C000_SQ_TEX_DEPTH_COMPARE_NEVER
;
2510 case PIPE_FUNC_LESS
:
2511 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESS
;
2512 case PIPE_FUNC_EQUAL
:
2513 return V_03C000_SQ_TEX_DEPTH_COMPARE_EQUAL
;
2514 case PIPE_FUNC_LEQUAL
:
2515 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESSEQUAL
;
2516 case PIPE_FUNC_GREATER
:
2517 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATER
;
2518 case PIPE_FUNC_NOTEQUAL
:
2519 return V_03C000_SQ_TEX_DEPTH_COMPARE_NOTEQUAL
;
2520 case PIPE_FUNC_GEQUAL
:
2521 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL
;
2522 case PIPE_FUNC_ALWAYS
:
2523 return V_03C000_SQ_TEX_DEPTH_COMPARE_ALWAYS
;
2527 static bool wrap_mode_uses_border_color(unsigned wrap
, bool linear_filter
)
2529 return wrap
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
2530 wrap
== PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
||
2532 (wrap
== PIPE_TEX_WRAP_CLAMP
||
2533 wrap
== PIPE_TEX_WRAP_MIRROR_CLAMP
));
2536 bool sampler_state_needs_border_color(const struct pipe_sampler_state
*state
)
2538 bool linear_filter
= state
->min_img_filter
!= PIPE_TEX_FILTER_NEAREST
||
2539 state
->mag_img_filter
!= PIPE_TEX_FILTER_NEAREST
;
2541 return (state
->border_color
.ui
[0] || state
->border_color
.ui
[1] ||
2542 state
->border_color
.ui
[2] || state
->border_color
.ui
[3]) &&
2543 (wrap_mode_uses_border_color(state
->wrap_s
, linear_filter
) ||
2544 wrap_mode_uses_border_color(state
->wrap_t
, linear_filter
) ||
2545 wrap_mode_uses_border_color(state
->wrap_r
, linear_filter
));
2548 void r600_emit_shader(struct r600_context
*rctx
, struct r600_atom
*a
)
2551 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
2552 struct r600_pipe_shader
*shader
= ((struct r600_shader_state
*)a
)->shader
;
2557 r600_emit_command_buffer(cs
, &shader
->command_buffer
);
2558 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2559 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, shader
->bo
,
2560 RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
));
2563 unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format
,
2564 const unsigned char *swizzle_view
,
2568 unsigned char swizzle
[4];
2569 unsigned result
= 0;
2570 const uint32_t tex_swizzle_shift
[4] = {
2573 const uint32_t vtx_swizzle_shift
[4] = {
2576 const uint32_t swizzle_bit
[4] = {
2579 const uint32_t *swizzle_shift
= tex_swizzle_shift
;
2582 swizzle_shift
= vtx_swizzle_shift
;
2585 util_format_compose_swizzles(swizzle_format
, swizzle_view
, swizzle
);
2587 memcpy(swizzle
, swizzle_format
, 4);
2591 for (i
= 0; i
< 4; i
++) {
2592 switch (swizzle
[i
]) {
2593 case PIPE_SWIZZLE_Y
:
2594 result
|= swizzle_bit
[1] << swizzle_shift
[i
];
2596 case PIPE_SWIZZLE_Z
:
2597 result
|= swizzle_bit
[2] << swizzle_shift
[i
];
2599 case PIPE_SWIZZLE_W
:
2600 result
|= swizzle_bit
[3] << swizzle_shift
[i
];
2602 case PIPE_SWIZZLE_0
:
2603 result
|= V_038010_SQ_SEL_0
<< swizzle_shift
[i
];
2605 case PIPE_SWIZZLE_1
:
2606 result
|= V_038010_SQ_SEL_1
<< swizzle_shift
[i
];
2608 default: /* PIPE_SWIZZLE_X */
2609 result
|= swizzle_bit
[0] << swizzle_shift
[i
];
2615 /* texture format translate */
2616 uint32_t r600_translate_texformat(struct pipe_screen
*screen
,
2617 enum pipe_format format
,
2618 const unsigned char *swizzle_view
,
2619 uint32_t *word4_p
, uint32_t *yuv_format_p
,
2620 bool do_endian_swap
)
2622 struct r600_screen
*rscreen
= (struct r600_screen
*)screen
;
2623 uint32_t result
= 0, word4
= 0, yuv_format
= 0;
2624 const struct util_format_description
*desc
;
2625 boolean uniform
= TRUE
;
2626 bool is_srgb_valid
= FALSE
;
2627 const unsigned char swizzle_xxxx
[4] = {0, 0, 0, 0};
2628 const unsigned char swizzle_yyyy
[4] = {1, 1, 1, 1};
2629 const unsigned char swizzle_xxxy
[4] = {0, 0, 0, 1};
2630 const unsigned char swizzle_zyx1
[4] = {2, 1, 0, 5};
2631 const unsigned char swizzle_zyxw
[4] = {2, 1, 0, 3};
2634 const uint32_t sign_bit
[4] = {
2635 S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED
),
2636 S_038010_FORMAT_COMP_Y(V_038010_SQ_FORMAT_COMP_SIGNED
),
2637 S_038010_FORMAT_COMP_Z(V_038010_SQ_FORMAT_COMP_SIGNED
),
2638 S_038010_FORMAT_COMP_W(V_038010_SQ_FORMAT_COMP_SIGNED
)
2641 /* Need to replace the specified texture formats in case of big-endian.
2642 * These formats are formats that have channels with number of bits
2643 * not divisible by 8.
2644 * Mesa conversion functions don't swap bits for those formats, and because
2645 * we transmit this over a serial bus to the GPU (PCIe), the
2646 * bit-endianess is important!!!
2647 * In case we have an "opposite" format, just use that for the swizzling
2648 * information. If we don't have such an "opposite" format, we need
2649 * to use a fixed swizzle info instead (see below)
2651 if (format
== PIPE_FORMAT_R4A4_UNORM
&& do_endian_swap
)
2652 format
= PIPE_FORMAT_A4R4_UNORM
;
2654 desc
= util_format_description(format
);
2658 /* Depth and stencil swizzling is handled separately. */
2659 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_ZS
) {
2660 /* Need to check for specific texture formats that don't have
2661 * an "opposite" format we can use. For those formats, we directly
2662 * specify the swizzling, which is the LE swizzling as defined in
2665 if (do_endian_swap
) {
2666 if (format
== PIPE_FORMAT_L4A4_UNORM
)
2667 word4
|= r600_get_swizzle_combined(swizzle_xxxy
, swizzle_view
, FALSE
);
2668 else if (format
== PIPE_FORMAT_B4G4R4A4_UNORM
)
2669 word4
|= r600_get_swizzle_combined(swizzle_zyxw
, swizzle_view
, FALSE
);
2670 else if (format
== PIPE_FORMAT_B4G4R4X4_UNORM
|| format
== PIPE_FORMAT_B5G6R5_UNORM
)
2671 word4
|= r600_get_swizzle_combined(swizzle_zyx1
, swizzle_view
, FALSE
);
2673 word4
|= r600_get_swizzle_combined(desc
->swizzle
, swizzle_view
, FALSE
);
2675 word4
|= r600_get_swizzle_combined(desc
->swizzle
, swizzle_view
, FALSE
);
2679 /* Colorspace (return non-RGB formats directly). */
2680 switch (desc
->colorspace
) {
2681 /* Depth stencil formats */
2682 case UTIL_FORMAT_COLORSPACE_ZS
:
2684 /* Depth sampler formats. */
2685 case PIPE_FORMAT_Z16_UNORM
:
2686 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2689 case PIPE_FORMAT_Z24X8_UNORM
:
2690 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
2691 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2694 case PIPE_FORMAT_X8Z24_UNORM
:
2695 case PIPE_FORMAT_S8_UINT_Z24_UNORM
:
2696 if (rscreen
->b
.chip_class
< EVERGREEN
)
2698 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
2701 case PIPE_FORMAT_Z32_FLOAT
:
2702 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2703 result
= FMT_32_FLOAT
;
2705 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
2706 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2707 result
= FMT_X24_8_32_FLOAT
;
2709 /* Stencil sampler formats. */
2710 case PIPE_FORMAT_S8_UINT
:
2711 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2712 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2715 case PIPE_FORMAT_X24S8_UINT
:
2716 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2717 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
2720 case PIPE_FORMAT_S8X24_UINT
:
2721 if (rscreen
->b
.chip_class
< EVERGREEN
)
2723 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2724 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2727 case PIPE_FORMAT_X32_S8X24_UINT
:
2728 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2729 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
2730 result
= FMT_X24_8_32_FLOAT
;
2736 case UTIL_FORMAT_COLORSPACE_YUV
:
2737 yuv_format
|= (1 << 30);
2739 case PIPE_FORMAT_UYVY
:
2740 case PIPE_FORMAT_YUYV
:
2744 goto out_unknown
; /* XXX */
2746 case UTIL_FORMAT_COLORSPACE_SRGB
:
2747 word4
|= S_038010_FORCE_DEGAMMA(1);
2754 if (desc
->layout
== UTIL_FORMAT_LAYOUT_RGTC
) {
2756 case PIPE_FORMAT_RGTC1_SNORM
:
2757 case PIPE_FORMAT_LATC1_SNORM
:
2758 word4
|= sign_bit
[0];
2759 case PIPE_FORMAT_RGTC1_UNORM
:
2760 case PIPE_FORMAT_LATC1_UNORM
:
2763 case PIPE_FORMAT_RGTC2_SNORM
:
2764 case PIPE_FORMAT_LATC2_SNORM
:
2765 word4
|= sign_bit
[0] | sign_bit
[1];
2766 case PIPE_FORMAT_RGTC2_UNORM
:
2767 case PIPE_FORMAT_LATC2_UNORM
:
2775 if (desc
->layout
== UTIL_FORMAT_LAYOUT_S3TC
) {
2777 case PIPE_FORMAT_DXT1_RGB
:
2778 case PIPE_FORMAT_DXT1_RGBA
:
2779 case PIPE_FORMAT_DXT1_SRGB
:
2780 case PIPE_FORMAT_DXT1_SRGBA
:
2782 is_srgb_valid
= TRUE
;
2784 case PIPE_FORMAT_DXT3_RGBA
:
2785 case PIPE_FORMAT_DXT3_SRGBA
:
2787 is_srgb_valid
= TRUE
;
2789 case PIPE_FORMAT_DXT5_RGBA
:
2790 case PIPE_FORMAT_DXT5_SRGBA
:
2792 is_srgb_valid
= TRUE
;
2799 if (desc
->layout
== UTIL_FORMAT_LAYOUT_BPTC
) {
2800 if (rscreen
->b
.chip_class
< EVERGREEN
)
2804 case PIPE_FORMAT_BPTC_RGBA_UNORM
:
2805 case PIPE_FORMAT_BPTC_SRGBA
:
2807 is_srgb_valid
= TRUE
;
2809 case PIPE_FORMAT_BPTC_RGB_FLOAT
:
2810 word4
|= sign_bit
[0] | sign_bit
[1] | sign_bit
[2];
2812 case PIPE_FORMAT_BPTC_RGB_UFLOAT
:
2820 if (desc
->layout
== UTIL_FORMAT_LAYOUT_SUBSAMPLED
) {
2822 case PIPE_FORMAT_R8G8_B8G8_UNORM
:
2823 case PIPE_FORMAT_G8R8_B8R8_UNORM
:
2826 case PIPE_FORMAT_G8R8_G8B8_UNORM
:
2827 case PIPE_FORMAT_R8G8_R8B8_UNORM
:
2835 if (format
== PIPE_FORMAT_R9G9B9E5_FLOAT
) {
2836 result
= FMT_5_9_9_9_SHAREDEXP
;
2838 } else if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) {
2839 result
= FMT_10_11_11_FLOAT
;
2844 for (i
= 0; i
< desc
->nr_channels
; i
++) {
2845 if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_SIGNED
) {
2846 word4
|= sign_bit
[i
];
2850 /* R8G8Bx_SNORM - XXX CxV8U8 */
2852 /* See whether the components are of the same size. */
2853 for (i
= 1; i
< desc
->nr_channels
; i
++) {
2854 uniform
= uniform
&& desc
->channel
[0].size
== desc
->channel
[i
].size
;
2857 /* Non-uniform formats. */
2859 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
&&
2860 desc
->channel
[0].pure_integer
)
2861 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2862 switch(desc
->nr_channels
) {
2864 if (desc
->channel
[0].size
== 5 &&
2865 desc
->channel
[1].size
== 6 &&
2866 desc
->channel
[2].size
== 5) {
2872 if (desc
->channel
[0].size
== 5 &&
2873 desc
->channel
[1].size
== 5 &&
2874 desc
->channel
[2].size
== 5 &&
2875 desc
->channel
[3].size
== 1) {
2876 result
= FMT_1_5_5_5
;
2879 if (desc
->channel
[0].size
== 10 &&
2880 desc
->channel
[1].size
== 10 &&
2881 desc
->channel
[2].size
== 10 &&
2882 desc
->channel
[3].size
== 2) {
2883 result
= FMT_2_10_10_10
;
2891 /* Find the first non-VOID channel. */
2892 for (i
= 0; i
< 4; i
++) {
2893 if (desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
) {
2901 /* uniform formats */
2902 switch (desc
->channel
[i
].type
) {
2903 case UTIL_FORMAT_TYPE_UNSIGNED
:
2904 case UTIL_FORMAT_TYPE_SIGNED
:
2906 if (!desc
->channel
[i
].normalized
&&
2907 desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
) {
2911 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
&&
2912 desc
->channel
[i
].pure_integer
)
2913 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2915 switch (desc
->channel
[i
].size
) {
2917 switch (desc
->nr_channels
) {
2922 result
= FMT_4_4_4_4
;
2927 switch (desc
->nr_channels
) {
2930 is_srgb_valid
= TRUE
;
2936 result
= FMT_8_8_8_8
;
2937 is_srgb_valid
= TRUE
;
2942 switch (desc
->nr_channels
) {
2950 result
= FMT_16_16_16_16
;
2955 switch (desc
->nr_channels
) {
2963 result
= FMT_32_32_32_32
;
2969 case UTIL_FORMAT_TYPE_FLOAT
:
2970 switch (desc
->channel
[i
].size
) {
2972 switch (desc
->nr_channels
) {
2974 result
= FMT_16_FLOAT
;
2977 result
= FMT_16_16_FLOAT
;
2980 result
= FMT_16_16_16_16_FLOAT
;
2985 switch (desc
->nr_channels
) {
2987 result
= FMT_32_FLOAT
;
2990 result
= FMT_32_32_FLOAT
;
2993 result
= FMT_32_32_32_32_FLOAT
;
3002 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
&& !is_srgb_valid
)
3007 *yuv_format_p
= yuv_format
;
3010 /* R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format)); */
3014 uint32_t r600_translate_colorformat(enum chip_class chip
, enum pipe_format format
,
3015 bool do_endian_swap
)
3017 const struct util_format_description
*desc
= util_format_description(format
);
3018 int channel
= util_format_get_first_non_void_channel(format
);
3023 #define HAS_SIZE(x,y,z,w) \
3024 (desc->channel[0].size == (x) && desc->channel[1].size == (y) && \
3025 desc->channel[2].size == (z) && desc->channel[3].size == (w))
3027 if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) /* isn't plain */
3028 return V_0280A0_COLOR_10_11_11_FLOAT
;
3030 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
||
3034 is_float
= desc
->channel
[channel
].type
== UTIL_FORMAT_TYPE_FLOAT
;
3036 switch (desc
->nr_channels
) {
3038 switch (desc
->channel
[0].size
) {
3040 return V_0280A0_COLOR_8
;
3043 return V_0280A0_COLOR_16_FLOAT
;
3045 return V_0280A0_COLOR_16
;
3048 return V_0280A0_COLOR_32_FLOAT
;
3050 return V_0280A0_COLOR_32
;
3054 if (desc
->channel
[0].size
== desc
->channel
[1].size
) {
3055 switch (desc
->channel
[0].size
) {
3058 return V_0280A0_COLOR_4_4
;
3060 return ~0U; /* removed on Evergreen */
3062 return V_0280A0_COLOR_8_8
;
3065 return V_0280A0_COLOR_16_16_FLOAT
;
3067 return V_0280A0_COLOR_16_16
;
3070 return V_0280A0_COLOR_32_32_FLOAT
;
3072 return V_0280A0_COLOR_32_32
;
3074 } else if (HAS_SIZE(8,24,0,0)) {
3075 return (do_endian_swap
? V_0280A0_COLOR_8_24
: V_0280A0_COLOR_24_8
);
3076 } else if (HAS_SIZE(24,8,0,0)) {
3077 return V_0280A0_COLOR_8_24
;
3081 if (HAS_SIZE(5,6,5,0)) {
3082 return V_0280A0_COLOR_5_6_5
;
3083 } else if (HAS_SIZE(32,8,24,0)) {
3084 return V_0280A0_COLOR_X24_8_32_FLOAT
;
3088 if (desc
->channel
[0].size
== desc
->channel
[1].size
&&
3089 desc
->channel
[0].size
== desc
->channel
[2].size
&&
3090 desc
->channel
[0].size
== desc
->channel
[3].size
) {
3091 switch (desc
->channel
[0].size
) {
3093 return V_0280A0_COLOR_4_4_4_4
;
3095 return V_0280A0_COLOR_8_8_8_8
;
3098 return V_0280A0_COLOR_16_16_16_16_FLOAT
;
3100 return V_0280A0_COLOR_16_16_16_16
;
3103 return V_0280A0_COLOR_32_32_32_32_FLOAT
;
3105 return V_0280A0_COLOR_32_32_32_32
;
3107 } else if (HAS_SIZE(5,5,5,1)) {
3108 return V_0280A0_COLOR_1_5_5_5
;
3109 } else if (HAS_SIZE(10,10,10,2)) {
3110 return V_0280A0_COLOR_2_10_10_10
;
3117 uint32_t r600_colorformat_endian_swap(uint32_t colorformat
, bool do_endian_swap
)
3119 if (R600_BIG_ENDIAN
) {
3120 switch(colorformat
) {
3121 /* 8-bit buffers. */
3122 case V_0280A0_COLOR_4_4
:
3123 case V_0280A0_COLOR_8
:
3126 /* 16-bit buffers. */
3127 case V_0280A0_COLOR_8_8
:
3129 * No need to do endian swaps on array formats,
3130 * as mesa<-->pipe formats conversion take into account
3135 case V_0280A0_COLOR_5_6_5
:
3136 case V_0280A0_COLOR_1_5_5_5
:
3137 case V_0280A0_COLOR_4_4_4_4
:
3138 case V_0280A0_COLOR_16
:
3139 return (do_endian_swap
? ENDIAN_8IN16
: ENDIAN_NONE
);
3141 /* 32-bit buffers. */
3142 case V_0280A0_COLOR_8_8_8_8
:
3144 * No need to do endian swaps on array formats,
3145 * as mesa<-->pipe formats conversion take into account
3150 case V_0280A0_COLOR_2_10_10_10
:
3151 case V_0280A0_COLOR_8_24
:
3152 case V_0280A0_COLOR_24_8
:
3153 case V_0280A0_COLOR_32_FLOAT
:
3154 return (do_endian_swap
? ENDIAN_8IN32
: ENDIAN_NONE
);
3156 case V_0280A0_COLOR_16_16_FLOAT
:
3157 case V_0280A0_COLOR_16_16
:
3158 return ENDIAN_8IN16
;
3160 /* 64-bit buffers. */
3161 case V_0280A0_COLOR_16_16_16_16
:
3162 case V_0280A0_COLOR_16_16_16_16_FLOAT
:
3163 return ENDIAN_8IN16
;
3165 case V_0280A0_COLOR_32_32_FLOAT
:
3166 case V_0280A0_COLOR_32_32
:
3167 case V_0280A0_COLOR_X24_8_32_FLOAT
:
3168 return ENDIAN_8IN32
;
3170 /* 128-bit buffers. */
3171 case V_0280A0_COLOR_32_32_32_32_FLOAT
:
3172 case V_0280A0_COLOR_32_32_32_32
:
3173 return ENDIAN_8IN32
;
3175 return ENDIAN_NONE
; /* Unsupported. */
3182 static void r600_invalidate_buffer(struct pipe_context
*ctx
, struct pipe_resource
*buf
)
3184 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
3185 struct r600_resource
*rbuffer
= r600_resource(buf
);
3186 unsigned i
, shader
, mask
;
3187 struct r600_pipe_sampler_view
*view
;
3189 /* Reallocate the buffer in the same pipe_resource. */
3190 r600_alloc_resource(&rctx
->screen
->b
, rbuffer
);
3192 /* We changed the buffer, now we need to bind it where the old one was bound. */
3193 /* Vertex buffers. */
3194 mask
= rctx
->vertex_buffer_state
.enabled_mask
;
3196 i
= u_bit_scan(&mask
);
3197 if (rctx
->vertex_buffer_state
.vb
[i
].buffer
.resource
== &rbuffer
->b
.b
) {
3198 rctx
->vertex_buffer_state
.dirty_mask
|= 1 << i
;
3199 r600_vertex_buffers_dirty(rctx
);
3202 /* Streamout buffers. */
3203 for (i
= 0; i
< rctx
->b
.streamout
.num_targets
; i
++) {
3204 if (rctx
->b
.streamout
.targets
[i
] &&
3205 rctx
->b
.streamout
.targets
[i
]->b
.buffer
== &rbuffer
->b
.b
) {
3206 if (rctx
->b
.streamout
.begin_emitted
) {
3207 r600_emit_streamout_end(&rctx
->b
);
3209 rctx
->b
.streamout
.append_bitmask
= rctx
->b
.streamout
.enabled_mask
;
3210 r600_streamout_buffers_dirty(&rctx
->b
);
3214 /* Constant buffers. */
3215 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
3216 struct r600_constbuf_state
*state
= &rctx
->constbuf_state
[shader
];
3218 uint32_t mask
= state
->enabled_mask
;
3221 unsigned i
= u_bit_scan(&mask
);
3222 if (state
->cb
[i
].buffer
== &rbuffer
->b
.b
) {
3224 state
->dirty_mask
|= 1 << i
;
3228 r600_constant_buffers_dirty(rctx
, state
);
3232 /* Texture buffer objects - update the virtual addresses in descriptors. */
3233 LIST_FOR_EACH_ENTRY(view
, &rctx
->texture_buffers
, list
) {
3234 if (view
->base
.texture
== &rbuffer
->b
.b
) {
3235 uint64_t offset
= view
->base
.u
.buf
.offset
;
3236 uint64_t va
= rbuffer
->gpu_address
+ offset
;
3238 view
->tex_resource_words
[0] = va
;
3239 view
->tex_resource_words
[2] &= C_038008_BASE_ADDRESS_HI
;
3240 view
->tex_resource_words
[2] |= S_038008_BASE_ADDRESS_HI(va
>> 32);
3243 /* Texture buffer objects - make bindings dirty if needed. */
3244 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
3245 struct r600_samplerview_state
*state
= &rctx
->samplers
[shader
].views
;
3247 uint32_t mask
= state
->enabled_mask
;
3250 unsigned i
= u_bit_scan(&mask
);
3251 if (state
->views
[i
]->base
.texture
== &rbuffer
->b
.b
) {
3253 state
->dirty_mask
|= 1 << i
;
3257 r600_sampler_views_dirty(rctx
, state
);
3262 struct r600_image_state
*istate
= &rctx
->fragment_buffers
;
3264 uint32_t mask
= istate
->enabled_mask
;
3267 unsigned i
= u_bit_scan(&mask
);
3268 if (istate
->views
[i
].base
.resource
== &rbuffer
->b
.b
) {
3270 istate
->dirty_mask
|= 1 << i
;
3274 r600_mark_atom_dirty(rctx
, &istate
->atom
);
3280 static void r600_set_active_query_state(struct pipe_context
*ctx
, bool enable
)
3282 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
3284 /* Pipeline stat & streamout queries. */
3286 rctx
->b
.flags
&= ~R600_CONTEXT_STOP_PIPELINE_STATS
;
3287 rctx
->b
.flags
|= R600_CONTEXT_START_PIPELINE_STATS
;
3289 rctx
->b
.flags
&= ~R600_CONTEXT_START_PIPELINE_STATS
;
3290 rctx
->b
.flags
|= R600_CONTEXT_STOP_PIPELINE_STATS
;
3293 /* Occlusion queries. */
3294 if (rctx
->db_misc_state
.occlusion_queries_disabled
!= !enable
) {
3295 rctx
->db_misc_state
.occlusion_queries_disabled
= !enable
;
3296 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
3300 static void r600_need_gfx_cs_space(struct pipe_context
*ctx
, unsigned num_dw
,
3301 bool include_draw_vbo
)
3303 r600_need_cs_space((struct r600_context
*)ctx
, num_dw
, include_draw_vbo
, 0);
3306 /* keep this at the end of this file, please */
3307 void r600_init_common_state_functions(struct r600_context
*rctx
)
3309 rctx
->b
.b
.create_fs_state
= r600_create_ps_state
;
3310 rctx
->b
.b
.create_vs_state
= r600_create_vs_state
;
3311 rctx
->b
.b
.create_gs_state
= r600_create_gs_state
;
3312 rctx
->b
.b
.create_tcs_state
= r600_create_tcs_state
;
3313 rctx
->b
.b
.create_tes_state
= r600_create_tes_state
;
3314 rctx
->b
.b
.create_vertex_elements_state
= r600_create_vertex_fetch_shader
;
3315 rctx
->b
.b
.bind_blend_state
= r600_bind_blend_state
;
3316 rctx
->b
.b
.bind_depth_stencil_alpha_state
= r600_bind_dsa_state
;
3317 rctx
->b
.b
.bind_sampler_states
= r600_bind_sampler_states
;
3318 rctx
->b
.b
.bind_fs_state
= r600_bind_ps_state
;
3319 rctx
->b
.b
.bind_rasterizer_state
= r600_bind_rs_state
;
3320 rctx
->b
.b
.bind_vertex_elements_state
= r600_bind_vertex_elements
;
3321 rctx
->b
.b
.bind_vs_state
= r600_bind_vs_state
;
3322 rctx
->b
.b
.bind_gs_state
= r600_bind_gs_state
;
3323 rctx
->b
.b
.bind_tcs_state
= r600_bind_tcs_state
;
3324 rctx
->b
.b
.bind_tes_state
= r600_bind_tes_state
;
3325 rctx
->b
.b
.delete_blend_state
= r600_delete_blend_state
;
3326 rctx
->b
.b
.delete_depth_stencil_alpha_state
= r600_delete_dsa_state
;
3327 rctx
->b
.b
.delete_fs_state
= r600_delete_ps_state
;
3328 rctx
->b
.b
.delete_rasterizer_state
= r600_delete_rs_state
;
3329 rctx
->b
.b
.delete_sampler_state
= r600_delete_sampler_state
;
3330 rctx
->b
.b
.delete_vertex_elements_state
= r600_delete_vertex_elements
;
3331 rctx
->b
.b
.delete_vs_state
= r600_delete_vs_state
;
3332 rctx
->b
.b
.delete_gs_state
= r600_delete_gs_state
;
3333 rctx
->b
.b
.delete_tcs_state
= r600_delete_tcs_state
;
3334 rctx
->b
.b
.delete_tes_state
= r600_delete_tes_state
;
3335 rctx
->b
.b
.set_blend_color
= r600_set_blend_color
;
3336 rctx
->b
.b
.set_clip_state
= r600_set_clip_state
;
3337 rctx
->b
.b
.set_constant_buffer
= r600_set_constant_buffer
;
3338 rctx
->b
.b
.set_sample_mask
= r600_set_sample_mask
;
3339 rctx
->b
.b
.set_stencil_ref
= r600_set_pipe_stencil_ref
;
3340 rctx
->b
.b
.set_vertex_buffers
= r600_set_vertex_buffers
;
3341 rctx
->b
.b
.set_sampler_views
= r600_set_sampler_views
;
3342 rctx
->b
.b
.sampler_view_destroy
= r600_sampler_view_destroy
;
3343 rctx
->b
.b
.memory_barrier
= r600_memory_barrier
;
3344 rctx
->b
.b
.texture_barrier
= r600_texture_barrier
;
3345 rctx
->b
.b
.set_stream_output_targets
= r600_set_streamout_targets
;
3346 rctx
->b
.b
.set_active_query_state
= r600_set_active_query_state
;
3348 rctx
->b
.b
.draw_vbo
= r600_draw_vbo
;
3349 rctx
->b
.invalidate_buffer
= r600_invalidate_buffer
;
3350 rctx
->b
.need_gfx_cs_space
= r600_need_gfx_cs_space
;