2 * Copyright 2010 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie <airlied@redhat.com>
25 * Jerome Glisse <jglisse@redhat.com>
27 #include "r600_formats.h"
28 #include "r600_shader.h"
31 #include "util/u_format_s3tc.h"
32 #include "util/u_index_modify.h"
33 #include "util/u_memory.h"
34 #include "util/u_upload_mgr.h"
35 #include "util/u_math.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "tgsi/tgsi_ureg.h"
40 void r600_init_command_buffer(struct r600_command_buffer
*cb
, unsigned num_dw
)
43 cb
->buf
= CALLOC(1, 4 * num_dw
);
44 cb
->max_num_dw
= num_dw
;
47 void r600_release_command_buffer(struct r600_command_buffer
*cb
)
52 void r600_add_atom(struct r600_context
*rctx
,
53 struct r600_atom
*atom
,
56 assert(id
< R600_NUM_ATOMS
);
57 assert(rctx
->atoms
[id
] == NULL
);
58 rctx
->atoms
[id
] = atom
;
62 void r600_init_atom(struct r600_context
*rctx
,
63 struct r600_atom
*atom
,
65 void (*emit
)(struct r600_context
*ctx
, struct r600_atom
*state
),
68 atom
->emit
= (void*)emit
;
69 atom
->num_dw
= num_dw
;
70 r600_add_atom(rctx
, atom
, id
);
73 void r600_emit_cso_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
75 r600_emit_command_buffer(rctx
->b
.gfx
.cs
, ((struct r600_cso_state
*)atom
)->cb
);
78 void r600_emit_alphatest_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
80 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
81 struct r600_alphatest_state
*a
= (struct r600_alphatest_state
*)atom
;
82 unsigned alpha_ref
= a
->sx_alpha_ref
;
84 if (rctx
->b
.chip_class
>= EVERGREEN
&& a
->cb0_export_16bpc
) {
88 radeon_set_context_reg(cs
, R_028410_SX_ALPHA_TEST_CONTROL
,
89 a
->sx_alpha_test_control
|
90 S_028410_ALPHA_TEST_BYPASS(a
->bypass
));
91 radeon_set_context_reg(cs
, R_028438_SX_ALPHA_REF
, alpha_ref
);
94 static void r600_texture_barrier(struct pipe_context
*ctx
, unsigned flags
)
96 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
98 rctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
|
99 R600_CONTEXT_FLUSH_AND_INV_CB
|
100 R600_CONTEXT_FLUSH_AND_INV
|
101 R600_CONTEXT_WAIT_3D_IDLE
;
104 static unsigned r600_conv_pipe_prim(unsigned prim
)
106 static const unsigned prim_conv
[] = {
107 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
108 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
109 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
110 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
111 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
112 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
113 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
114 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
115 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
116 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
117 [PIPE_PRIM_LINES_ADJACENCY
] = V_008958_DI_PT_LINELIST_ADJ
,
118 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_008958_DI_PT_LINESTRIP_ADJ
,
119 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_008958_DI_PT_TRILIST_ADJ
,
120 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_008958_DI_PT_TRISTRIP_ADJ
,
121 [PIPE_PRIM_PATCHES
] = V_008958_DI_PT_PATCH
,
122 [R600_PRIM_RECTANGLE_LIST
] = V_008958_DI_PT_RECTLIST
124 assert(prim
< ARRAY_SIZE(prim_conv
));
125 return prim_conv
[prim
];
128 unsigned r600_conv_prim_to_gs_out(unsigned mode
)
130 static const int prim_conv
[] = {
131 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
132 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
133 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
134 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
135 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
136 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
137 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
138 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
139 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
140 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
141 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
142 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
143 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
144 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
145 [PIPE_PRIM_PATCHES
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
146 [R600_PRIM_RECTANGLE_LIST
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
148 assert(mode
< ARRAY_SIZE(prim_conv
));
150 return prim_conv
[mode
];
153 /* common state between evergreen and r600 */
155 static void r600_bind_blend_state_internal(struct r600_context
*rctx
,
156 struct r600_blend_state
*blend
, bool blend_disable
)
158 unsigned color_control
;
159 bool update_cb
= false;
161 rctx
->alpha_to_one
= blend
->alpha_to_one
;
162 rctx
->dual_src_blend
= blend
->dual_src_blend
;
164 if (!blend_disable
) {
165 r600_set_cso_state_with_cb(rctx
, &rctx
->blend_state
, blend
, &blend
->buffer
);
166 color_control
= blend
->cb_color_control
;
168 /* Blending is disabled. */
169 r600_set_cso_state_with_cb(rctx
, &rctx
->blend_state
, blend
, &blend
->buffer_no_blend
);
170 color_control
= blend
->cb_color_control_no_blend
;
173 /* Update derived states. */
174 if (rctx
->cb_misc_state
.blend_colormask
!= blend
->cb_target_mask
) {
175 rctx
->cb_misc_state
.blend_colormask
= blend
->cb_target_mask
;
178 if (rctx
->b
.chip_class
<= R700
&&
179 rctx
->cb_misc_state
.cb_color_control
!= color_control
) {
180 rctx
->cb_misc_state
.cb_color_control
= color_control
;
183 if (rctx
->cb_misc_state
.dual_src_blend
!= blend
->dual_src_blend
) {
184 rctx
->cb_misc_state
.dual_src_blend
= blend
->dual_src_blend
;
188 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
190 if (rctx
->framebuffer
.dual_src_blend
!= blend
->dual_src_blend
) {
191 rctx
->framebuffer
.dual_src_blend
= blend
->dual_src_blend
;
192 r600_mark_atom_dirty(rctx
, &rctx
->framebuffer
.atom
);
196 static void r600_bind_blend_state(struct pipe_context
*ctx
, void *state
)
198 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
199 struct r600_blend_state
*blend
= (struct r600_blend_state
*)state
;
202 r600_set_cso_state_with_cb(rctx
, &rctx
->blend_state
, NULL
, NULL
);
206 r600_bind_blend_state_internal(rctx
, blend
, rctx
->force_blend_disable
);
209 static void r600_set_blend_color(struct pipe_context
*ctx
,
210 const struct pipe_blend_color
*state
)
212 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
214 rctx
->blend_color
.state
= *state
;
215 r600_mark_atom_dirty(rctx
, &rctx
->blend_color
.atom
);
218 void r600_emit_blend_color(struct r600_context
*rctx
, struct r600_atom
*atom
)
220 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
221 struct pipe_blend_color
*state
= &rctx
->blend_color
.state
;
223 radeon_set_context_reg_seq(cs
, R_028414_CB_BLEND_RED
, 4);
224 radeon_emit(cs
, fui(state
->color
[0])); /* R_028414_CB_BLEND_RED */
225 radeon_emit(cs
, fui(state
->color
[1])); /* R_028418_CB_BLEND_GREEN */
226 radeon_emit(cs
, fui(state
->color
[2])); /* R_02841C_CB_BLEND_BLUE */
227 radeon_emit(cs
, fui(state
->color
[3])); /* R_028420_CB_BLEND_ALPHA */
230 void r600_emit_vgt_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
232 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
233 struct r600_vgt_state
*a
= (struct r600_vgt_state
*)atom
;
235 radeon_set_context_reg(cs
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, a
->vgt_multi_prim_ib_reset_en
);
236 radeon_set_context_reg_seq(cs
, R_028408_VGT_INDX_OFFSET
, 2);
237 radeon_emit(cs
, a
->vgt_indx_offset
); /* R_028408_VGT_INDX_OFFSET */
238 radeon_emit(cs
, a
->vgt_multi_prim_ib_reset_indx
); /* R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX */
239 if (a
->last_draw_was_indirect
) {
240 a
->last_draw_was_indirect
= false;
241 radeon_set_ctl_const(cs
, R_03CFF0_SQ_VTX_BASE_VTX_LOC
, 0);
245 static void r600_set_clip_state(struct pipe_context
*ctx
,
246 const struct pipe_clip_state
*state
)
248 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
250 rctx
->clip_state
.state
= *state
;
251 r600_mark_atom_dirty(rctx
, &rctx
->clip_state
.atom
);
252 rctx
->driver_consts
[PIPE_SHADER_VERTEX
].vs_ucp_dirty
= true;
255 static void r600_set_stencil_ref(struct pipe_context
*ctx
,
256 const struct r600_stencil_ref
*state
)
258 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
260 rctx
->stencil_ref
.state
= *state
;
261 r600_mark_atom_dirty(rctx
, &rctx
->stencil_ref
.atom
);
264 void r600_emit_stencil_ref(struct r600_context
*rctx
, struct r600_atom
*atom
)
266 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
267 struct r600_stencil_ref_state
*a
= (struct r600_stencil_ref_state
*)atom
;
269 radeon_set_context_reg_seq(cs
, R_028430_DB_STENCILREFMASK
, 2);
270 radeon_emit(cs
, /* R_028430_DB_STENCILREFMASK */
271 S_028430_STENCILREF(a
->state
.ref_value
[0]) |
272 S_028430_STENCILMASK(a
->state
.valuemask
[0]) |
273 S_028430_STENCILWRITEMASK(a
->state
.writemask
[0]));
274 radeon_emit(cs
, /* R_028434_DB_STENCILREFMASK_BF */
275 S_028434_STENCILREF_BF(a
->state
.ref_value
[1]) |
276 S_028434_STENCILMASK_BF(a
->state
.valuemask
[1]) |
277 S_028434_STENCILWRITEMASK_BF(a
->state
.writemask
[1]));
280 static void r600_set_pipe_stencil_ref(struct pipe_context
*ctx
,
281 const struct pipe_stencil_ref
*state
)
283 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
284 struct r600_dsa_state
*dsa
= (struct r600_dsa_state
*)rctx
->dsa_state
.cso
;
285 struct r600_stencil_ref ref
;
287 rctx
->stencil_ref
.pipe_state
= *state
;
292 ref
.ref_value
[0] = state
->ref_value
[0];
293 ref
.ref_value
[1] = state
->ref_value
[1];
294 ref
.valuemask
[0] = dsa
->valuemask
[0];
295 ref
.valuemask
[1] = dsa
->valuemask
[1];
296 ref
.writemask
[0] = dsa
->writemask
[0];
297 ref
.writemask
[1] = dsa
->writemask
[1];
299 r600_set_stencil_ref(ctx
, &ref
);
302 static void r600_bind_dsa_state(struct pipe_context
*ctx
, void *state
)
304 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
305 struct r600_dsa_state
*dsa
= state
;
306 struct r600_stencil_ref ref
;
309 r600_set_cso_state_with_cb(rctx
, &rctx
->dsa_state
, NULL
, NULL
);
313 r600_set_cso_state_with_cb(rctx
, &rctx
->dsa_state
, dsa
, &dsa
->buffer
);
315 ref
.ref_value
[0] = rctx
->stencil_ref
.pipe_state
.ref_value
[0];
316 ref
.ref_value
[1] = rctx
->stencil_ref
.pipe_state
.ref_value
[1];
317 ref
.valuemask
[0] = dsa
->valuemask
[0];
318 ref
.valuemask
[1] = dsa
->valuemask
[1];
319 ref
.writemask
[0] = dsa
->writemask
[0];
320 ref
.writemask
[1] = dsa
->writemask
[1];
321 if (rctx
->zwritemask
!= dsa
->zwritemask
) {
322 rctx
->zwritemask
= dsa
->zwritemask
;
323 if (rctx
->b
.chip_class
>= EVERGREEN
) {
324 /* work around some issue when not writing to zbuffer
325 * we are having lockup on evergreen so do not enable
326 * hyperz when not writing zbuffer
328 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
332 r600_set_stencil_ref(ctx
, &ref
);
334 /* Update alphatest state. */
335 if (rctx
->alphatest_state
.sx_alpha_test_control
!= dsa
->sx_alpha_test_control
||
336 rctx
->alphatest_state
.sx_alpha_ref
!= dsa
->alpha_ref
) {
337 rctx
->alphatest_state
.sx_alpha_test_control
= dsa
->sx_alpha_test_control
;
338 rctx
->alphatest_state
.sx_alpha_ref
= dsa
->alpha_ref
;
339 r600_mark_atom_dirty(rctx
, &rctx
->alphatest_state
.atom
);
343 static void r600_bind_rs_state(struct pipe_context
*ctx
, void *state
)
345 struct r600_rasterizer_state
*rs
= (struct r600_rasterizer_state
*)state
;
346 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
351 rctx
->rasterizer
= rs
;
353 r600_set_cso_state_with_cb(rctx
, &rctx
->rasterizer_state
, rs
, &rs
->buffer
);
355 if (rs
->offset_enable
&&
356 (rs
->offset_units
!= rctx
->poly_offset_state
.offset_units
||
357 rs
->offset_scale
!= rctx
->poly_offset_state
.offset_scale
||
358 rs
->offset_units_unscaled
!= rctx
->poly_offset_state
.offset_units_unscaled
)) {
359 rctx
->poly_offset_state
.offset_units
= rs
->offset_units
;
360 rctx
->poly_offset_state
.offset_scale
= rs
->offset_scale
;
361 rctx
->poly_offset_state
.offset_units_unscaled
= rs
->offset_units_unscaled
;
362 r600_mark_atom_dirty(rctx
, &rctx
->poly_offset_state
.atom
);
365 /* Update clip_misc_state. */
366 if (rctx
->clip_misc_state
.pa_cl_clip_cntl
!= rs
->pa_cl_clip_cntl
||
367 rctx
->clip_misc_state
.clip_plane_enable
!= rs
->clip_plane_enable
) {
368 rctx
->clip_misc_state
.pa_cl_clip_cntl
= rs
->pa_cl_clip_cntl
;
369 rctx
->clip_misc_state
.clip_plane_enable
= rs
->clip_plane_enable
;
370 r600_mark_atom_dirty(rctx
, &rctx
->clip_misc_state
.atom
);
373 r600_viewport_set_rast_deps(&rctx
->b
, rs
->scissor_enable
, rs
->clip_halfz
);
375 /* Re-emit PA_SC_LINE_STIPPLE. */
376 rctx
->last_primitive_type
= -1;
379 static void r600_delete_rs_state(struct pipe_context
*ctx
, void *state
)
381 struct r600_rasterizer_state
*rs
= (struct r600_rasterizer_state
*)state
;
383 r600_release_command_buffer(&rs
->buffer
);
387 static void r600_sampler_view_destroy(struct pipe_context
*ctx
,
388 struct pipe_sampler_view
*state
)
390 struct r600_pipe_sampler_view
*view
= (struct r600_pipe_sampler_view
*)state
;
392 if (view
->tex_resource
->gpu_address
&&
393 view
->tex_resource
->b
.b
.target
== PIPE_BUFFER
)
394 LIST_DELINIT(&view
->list
);
396 pipe_resource_reference(&state
->texture
, NULL
);
400 void r600_sampler_states_dirty(struct r600_context
*rctx
,
401 struct r600_sampler_states
*state
)
403 if (state
->dirty_mask
) {
404 if (state
->dirty_mask
& state
->has_bordercolor_mask
) {
405 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
408 util_bitcount(state
->dirty_mask
& state
->has_bordercolor_mask
) * 11 +
409 util_bitcount(state
->dirty_mask
& ~state
->has_bordercolor_mask
) * 5;
410 r600_mark_atom_dirty(rctx
, &state
->atom
);
414 static void r600_bind_sampler_states(struct pipe_context
*pipe
,
415 enum pipe_shader_type shader
,
417 unsigned count
, void **states
)
419 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
420 struct r600_textures_info
*dst
= &rctx
->samplers
[shader
];
421 struct r600_pipe_sampler_state
**rstates
= (struct r600_pipe_sampler_state
**)states
;
422 int seamless_cube_map
= -1;
424 /* This sets 1-bit for states with index >= count. */
425 uint32_t disable_mask
= ~((1ull << count
) - 1);
426 /* These are the new states set by this function. */
427 uint32_t new_mask
= 0;
429 assert(start
== 0); /* XXX fix below */
436 for (i
= 0; i
< count
; i
++) {
437 struct r600_pipe_sampler_state
*rstate
= rstates
[i
];
439 if (rstate
== dst
->states
.states
[i
]) {
444 if (rstate
->border_color_use
) {
445 dst
->states
.has_bordercolor_mask
|= 1 << i
;
447 dst
->states
.has_bordercolor_mask
&= ~(1 << i
);
449 seamless_cube_map
= rstate
->seamless_cube_map
;
453 disable_mask
|= 1 << i
;
457 memcpy(dst
->states
.states
, rstates
, sizeof(void*) * count
);
458 memset(dst
->states
.states
+ count
, 0, sizeof(void*) * (NUM_TEX_UNITS
- count
));
460 dst
->states
.enabled_mask
&= ~disable_mask
;
461 dst
->states
.dirty_mask
&= dst
->states
.enabled_mask
;
462 dst
->states
.enabled_mask
|= new_mask
;
463 dst
->states
.dirty_mask
|= new_mask
;
464 dst
->states
.has_bordercolor_mask
&= dst
->states
.enabled_mask
;
466 r600_sampler_states_dirty(rctx
, &dst
->states
);
468 /* Seamless cubemap state. */
469 if (rctx
->b
.chip_class
<= R700
&&
470 seamless_cube_map
!= -1 &&
471 seamless_cube_map
!= rctx
->seamless_cube_map
.enabled
) {
472 /* change in TA_CNTL_AUX need a pipeline flush */
473 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
474 rctx
->seamless_cube_map
.enabled
= seamless_cube_map
;
475 r600_mark_atom_dirty(rctx
, &rctx
->seamless_cube_map
.atom
);
479 static void r600_delete_sampler_state(struct pipe_context
*ctx
, void *state
)
484 static void r600_delete_blend_state(struct pipe_context
*ctx
, void *state
)
486 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
487 struct r600_blend_state
*blend
= (struct r600_blend_state
*)state
;
489 if (rctx
->blend_state
.cso
== state
) {
490 ctx
->bind_blend_state(ctx
, NULL
);
493 r600_release_command_buffer(&blend
->buffer
);
494 r600_release_command_buffer(&blend
->buffer_no_blend
);
498 static void r600_delete_dsa_state(struct pipe_context
*ctx
, void *state
)
500 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
501 struct r600_dsa_state
*dsa
= (struct r600_dsa_state
*)state
;
503 if (rctx
->dsa_state
.cso
== state
) {
504 ctx
->bind_depth_stencil_alpha_state(ctx
, NULL
);
507 r600_release_command_buffer(&dsa
->buffer
);
511 static void r600_bind_vertex_elements(struct pipe_context
*ctx
, void *state
)
513 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
515 r600_set_cso_state(rctx
, &rctx
->vertex_fetch_shader
, state
);
518 static void r600_delete_vertex_elements(struct pipe_context
*ctx
, void *state
)
520 struct r600_fetch_shader
*shader
= (struct r600_fetch_shader
*)state
;
521 r600_resource_reference(&shader
->buffer
, NULL
);
525 static void r600_set_index_buffer(struct pipe_context
*ctx
,
526 const struct pipe_index_buffer
*ib
)
528 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
531 pipe_resource_reference(&rctx
->index_buffer
.buffer
, ib
->buffer
);
532 memcpy(&rctx
->index_buffer
, ib
, sizeof(*ib
));
533 r600_context_add_resource_size(ctx
, ib
->buffer
);
535 pipe_resource_reference(&rctx
->index_buffer
.buffer
, NULL
);
539 void r600_vertex_buffers_dirty(struct r600_context
*rctx
)
541 if (rctx
->vertex_buffer_state
.dirty_mask
) {
542 rctx
->vertex_buffer_state
.atom
.num_dw
= (rctx
->b
.chip_class
>= EVERGREEN
? 12 : 11) *
543 util_bitcount(rctx
->vertex_buffer_state
.dirty_mask
);
544 r600_mark_atom_dirty(rctx
, &rctx
->vertex_buffer_state
.atom
);
548 static void r600_set_vertex_buffers(struct pipe_context
*ctx
,
549 unsigned start_slot
, unsigned count
,
550 const struct pipe_vertex_buffer
*input
)
552 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
553 struct r600_vertexbuf_state
*state
= &rctx
->vertex_buffer_state
;
554 struct pipe_vertex_buffer
*vb
= state
->vb
+ start_slot
;
556 uint32_t disable_mask
= 0;
557 /* These are the new buffers set by this function. */
558 uint32_t new_buffer_mask
= 0;
560 /* Set vertex buffers. */
562 for (i
= 0; i
< count
; i
++) {
563 if (memcmp(&input
[i
], &vb
[i
], sizeof(struct pipe_vertex_buffer
))) {
564 if (input
[i
].buffer
) {
565 vb
[i
].stride
= input
[i
].stride
;
566 vb
[i
].buffer_offset
= input
[i
].buffer_offset
;
567 pipe_resource_reference(&vb
[i
].buffer
, input
[i
].buffer
);
568 new_buffer_mask
|= 1 << i
;
569 r600_context_add_resource_size(ctx
, input
[i
].buffer
);
571 pipe_resource_reference(&vb
[i
].buffer
, NULL
);
572 disable_mask
|= 1 << i
;
577 for (i
= 0; i
< count
; i
++) {
578 pipe_resource_reference(&vb
[i
].buffer
, NULL
);
580 disable_mask
= ((1ull << count
) - 1);
583 disable_mask
<<= start_slot
;
584 new_buffer_mask
<<= start_slot
;
586 rctx
->vertex_buffer_state
.enabled_mask
&= ~disable_mask
;
587 rctx
->vertex_buffer_state
.dirty_mask
&= rctx
->vertex_buffer_state
.enabled_mask
;
588 rctx
->vertex_buffer_state
.enabled_mask
|= new_buffer_mask
;
589 rctx
->vertex_buffer_state
.dirty_mask
|= new_buffer_mask
;
591 r600_vertex_buffers_dirty(rctx
);
594 void r600_sampler_views_dirty(struct r600_context
*rctx
,
595 struct r600_samplerview_state
*state
)
597 if (state
->dirty_mask
) {
598 state
->atom
.num_dw
= (rctx
->b
.chip_class
>= EVERGREEN
? 14 : 13) *
599 util_bitcount(state
->dirty_mask
);
600 r600_mark_atom_dirty(rctx
, &state
->atom
);
604 static void r600_set_sampler_views(struct pipe_context
*pipe
,
605 enum pipe_shader_type shader
,
606 unsigned start
, unsigned count
,
607 struct pipe_sampler_view
**views
)
609 struct r600_context
*rctx
= (struct r600_context
*) pipe
;
610 struct r600_textures_info
*dst
= &rctx
->samplers
[shader
];
611 struct r600_pipe_sampler_view
**rviews
= (struct r600_pipe_sampler_view
**)views
;
612 uint32_t dirty_sampler_states_mask
= 0;
614 /* This sets 1-bit for textures with index >= count. */
615 uint32_t disable_mask
= ~((1ull << count
) - 1);
616 /* These are the new textures set by this function. */
617 uint32_t new_mask
= 0;
619 /* Set textures with index >= count to NULL. */
620 uint32_t remaining_mask
;
622 assert(start
== 0); /* XXX fix below */
629 remaining_mask
= dst
->views
.enabled_mask
& disable_mask
;
631 while (remaining_mask
) {
632 i
= u_bit_scan(&remaining_mask
);
633 assert(dst
->views
.views
[i
]);
635 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], NULL
);
638 for (i
= 0; i
< count
; i
++) {
639 if (rviews
[i
] == dst
->views
.views
[i
]) {
644 struct r600_texture
*rtex
=
645 (struct r600_texture
*)rviews
[i
]->base
.texture
;
646 bool is_buffer
= rviews
[i
]->base
.texture
->target
== PIPE_BUFFER
;
648 if (!is_buffer
&& rtex
->db_compatible
) {
649 dst
->views
.compressed_depthtex_mask
|= 1 << i
;
651 dst
->views
.compressed_depthtex_mask
&= ~(1 << i
);
654 /* Track compressed colorbuffers. */
655 if (!is_buffer
&& rtex
->cmask
.size
) {
656 dst
->views
.compressed_colortex_mask
|= 1 << i
;
658 dst
->views
.compressed_colortex_mask
&= ~(1 << i
);
661 /* Changing from array to non-arrays textures and vice versa requires
662 * updating TEX_ARRAY_OVERRIDE in sampler states on R6xx-R7xx. */
663 if (rctx
->b
.chip_class
<= R700
&&
664 (dst
->states
.enabled_mask
& (1 << i
)) &&
665 (rviews
[i
]->base
.texture
->target
== PIPE_TEXTURE_1D_ARRAY
||
666 rviews
[i
]->base
.texture
->target
== PIPE_TEXTURE_2D_ARRAY
) != dst
->is_array_sampler
[i
]) {
667 dirty_sampler_states_mask
|= 1 << i
;
670 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], views
[i
]);
672 r600_context_add_resource_size(pipe
, views
[i
]->texture
);
674 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], NULL
);
675 disable_mask
|= 1 << i
;
679 dst
->views
.enabled_mask
&= ~disable_mask
;
680 dst
->views
.dirty_mask
&= dst
->views
.enabled_mask
;
681 dst
->views
.enabled_mask
|= new_mask
;
682 dst
->views
.dirty_mask
|= new_mask
;
683 dst
->views
.compressed_depthtex_mask
&= dst
->views
.enabled_mask
;
684 dst
->views
.compressed_colortex_mask
&= dst
->views
.enabled_mask
;
685 dst
->views
.dirty_buffer_constants
= TRUE
;
686 r600_sampler_views_dirty(rctx
, &dst
->views
);
688 if (dirty_sampler_states_mask
) {
689 dst
->states
.dirty_mask
|= dirty_sampler_states_mask
;
690 r600_sampler_states_dirty(rctx
, &dst
->states
);
694 static void r600_update_compressed_colortex_mask(struct r600_samplerview_state
*views
)
696 uint32_t mask
= views
->enabled_mask
;
699 unsigned i
= u_bit_scan(&mask
);
700 struct pipe_resource
*res
= views
->views
[i
]->base
.texture
;
702 if (res
&& res
->target
!= PIPE_BUFFER
) {
703 struct r600_texture
*rtex
= (struct r600_texture
*)res
;
705 if (rtex
->cmask
.size
) {
706 views
->compressed_colortex_mask
|= 1 << i
;
708 views
->compressed_colortex_mask
&= ~(1 << i
);
714 /* Compute the key for the hw shader variant */
715 static inline union r600_shader_key
r600_shader_selector_key(struct pipe_context
* ctx
,
716 struct r600_pipe_shader_selector
* sel
)
718 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
719 union r600_shader_key key
;
720 memset(&key
, 0, sizeof(key
));
723 case PIPE_SHADER_VERTEX
: {
724 key
.vs
.as_ls
= (rctx
->tes_shader
!= NULL
);
726 key
.vs
.as_es
= (rctx
->gs_shader
!= NULL
);
728 if (rctx
->ps_shader
->current
->shader
.gs_prim_id_input
&& !rctx
->gs_shader
) {
729 key
.vs
.as_gs_a
= true;
730 key
.vs
.prim_id_out
= rctx
->ps_shader
->current
->shader
.input
[rctx
->ps_shader
->current
->shader
.ps_prim_id_input
].spi_sid
;
734 case PIPE_SHADER_GEOMETRY
:
736 case PIPE_SHADER_FRAGMENT
: {
737 key
.ps
.color_two_side
= rctx
->rasterizer
&& rctx
->rasterizer
->two_side
;
738 key
.ps
.alpha_to_one
= rctx
->alpha_to_one
&&
739 rctx
->rasterizer
&& rctx
->rasterizer
->multisample_enable
&&
740 !rctx
->framebuffer
.cb0_is_integer
;
741 key
.ps
.nr_cbufs
= rctx
->framebuffer
.state
.nr_cbufs
;
742 /* Dual-source blending only makes sense with nr_cbufs == 1. */
743 if (key
.ps
.nr_cbufs
== 1 && rctx
->dual_src_blend
)
747 case PIPE_SHADER_TESS_EVAL
:
748 key
.tes
.as_es
= (rctx
->gs_shader
!= NULL
);
750 case PIPE_SHADER_TESS_CTRL
:
751 key
.tcs
.prim_mode
= rctx
->tes_shader
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
760 /* Select the hw shader variant depending on the current state.
761 * (*dirty) is set to 1 if current variant was changed */
762 static int r600_shader_select(struct pipe_context
*ctx
,
763 struct r600_pipe_shader_selector
* sel
,
766 union r600_shader_key key
;
767 struct r600_pipe_shader
* shader
= NULL
;
770 memset(&key
, 0, sizeof(key
));
771 key
= r600_shader_selector_key(ctx
, sel
);
773 /* Check if we don't need to change anything.
774 * This path is also used for most shaders that don't need multiple
775 * variants, it will cost just a computation of the key and this
777 if (likely(sel
->current
&& memcmp(&sel
->current
->key
, &key
, sizeof(key
)) == 0)) {
781 /* lookup if we have other variants in the list */
782 if (sel
->num_shaders
> 1) {
783 struct r600_pipe_shader
*p
= sel
->current
, *c
= p
->next_variant
;
785 while (c
&& memcmp(&c
->key
, &key
, sizeof(key
)) != 0) {
791 p
->next_variant
= c
->next_variant
;
796 if (unlikely(!shader
)) {
797 shader
= CALLOC(1, sizeof(struct r600_pipe_shader
));
798 shader
->selector
= sel
;
800 r
= r600_pipe_shader_create(ctx
, shader
, key
);
802 R600_ERR("Failed to build shader variant (type=%u) %d\n",
809 /* We don't know the value of nr_ps_max_color_exports until we built
810 * at least one variant, so we may need to recompute the key after
811 * building first variant. */
812 if (sel
->type
== PIPE_SHADER_FRAGMENT
&&
813 sel
->num_shaders
== 0) {
814 sel
->nr_ps_max_color_exports
= shader
->shader
.nr_ps_max_color_exports
;
815 key
= r600_shader_selector_key(ctx
, sel
);
818 memcpy(&shader
->key
, &key
, sizeof(key
));
825 shader
->next_variant
= sel
->current
;
826 sel
->current
= shader
;
831 static void *r600_create_shader_state(struct pipe_context
*ctx
,
832 const struct pipe_shader_state
*state
,
833 unsigned pipe_shader_type
)
835 struct r600_pipe_shader_selector
*sel
= CALLOC_STRUCT(r600_pipe_shader_selector
);
838 sel
->type
= pipe_shader_type
;
839 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
840 sel
->so
= state
->stream_output
;
841 tgsi_scan_shader(state
->tokens
, &sel
->info
);
843 switch (pipe_shader_type
) {
844 case PIPE_SHADER_GEOMETRY
:
845 sel
->gs_output_prim
=
846 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
847 sel
->gs_max_out_vertices
=
848 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
849 sel
->gs_num_invocations
=
850 sel
->info
.properties
[TGSI_PROPERTY_GS_INVOCATIONS
];
852 case PIPE_SHADER_VERTEX
:
853 case PIPE_SHADER_TESS_CTRL
:
854 sel
->lds_patch_outputs_written_mask
= 0;
855 sel
->lds_outputs_written_mask
= 0;
857 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
858 unsigned name
= sel
->info
.output_semantic_name
[i
];
859 unsigned index
= sel
->info
.output_semantic_index
[i
];
862 case TGSI_SEMANTIC_TESSINNER
:
863 case TGSI_SEMANTIC_TESSOUTER
:
864 case TGSI_SEMANTIC_PATCH
:
865 sel
->lds_patch_outputs_written_mask
|=
866 1llu << r600_get_lds_unique_index(name
, index
);
869 sel
->lds_outputs_written_mask
|=
870 1llu << r600_get_lds_unique_index(name
, index
);
881 static void *r600_create_ps_state(struct pipe_context
*ctx
,
882 const struct pipe_shader_state
*state
)
884 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_FRAGMENT
);
887 static void *r600_create_vs_state(struct pipe_context
*ctx
,
888 const struct pipe_shader_state
*state
)
890 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_VERTEX
);
893 static void *r600_create_gs_state(struct pipe_context
*ctx
,
894 const struct pipe_shader_state
*state
)
896 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_GEOMETRY
);
899 static void *r600_create_tcs_state(struct pipe_context
*ctx
,
900 const struct pipe_shader_state
*state
)
902 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_TESS_CTRL
);
905 static void *r600_create_tes_state(struct pipe_context
*ctx
,
906 const struct pipe_shader_state
*state
)
908 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_TESS_EVAL
);
911 static void r600_bind_ps_state(struct pipe_context
*ctx
, void *state
)
913 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
916 state
= rctx
->dummy_pixel_shader
;
918 rctx
->ps_shader
= (struct r600_pipe_shader_selector
*)state
;
921 static struct tgsi_shader_info
*r600_get_vs_info(struct r600_context
*rctx
)
924 return &rctx
->gs_shader
->info
;
925 else if (rctx
->tes_shader
)
926 return &rctx
->tes_shader
->info
;
927 else if (rctx
->vs_shader
)
928 return &rctx
->vs_shader
->info
;
933 static void r600_bind_vs_state(struct pipe_context
*ctx
, void *state
)
935 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
940 rctx
->vs_shader
= (struct r600_pipe_shader_selector
*)state
;
941 r600_update_vs_writes_viewport_index(&rctx
->b
, r600_get_vs_info(rctx
));
942 rctx
->b
.streamout
.stride_in_dw
= rctx
->vs_shader
->so
.stride
;
945 static void r600_bind_gs_state(struct pipe_context
*ctx
, void *state
)
947 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
949 rctx
->gs_shader
= (struct r600_pipe_shader_selector
*)state
;
950 r600_update_vs_writes_viewport_index(&rctx
->b
, r600_get_vs_info(rctx
));
954 rctx
->b
.streamout
.stride_in_dw
= rctx
->gs_shader
->so
.stride
;
957 static void r600_bind_tcs_state(struct pipe_context
*ctx
, void *state
)
959 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
961 rctx
->tcs_shader
= (struct r600_pipe_shader_selector
*)state
;
964 static void r600_bind_tes_state(struct pipe_context
*ctx
, void *state
)
966 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
968 rctx
->tes_shader
= (struct r600_pipe_shader_selector
*)state
;
969 r600_update_vs_writes_viewport_index(&rctx
->b
, r600_get_vs_info(rctx
));
973 rctx
->b
.streamout
.stride_in_dw
= rctx
->tes_shader
->so
.stride
;
976 static void r600_delete_shader_selector(struct pipe_context
*ctx
,
977 struct r600_pipe_shader_selector
*sel
)
979 struct r600_pipe_shader
*p
= sel
->current
, *c
;
982 r600_pipe_shader_destroy(ctx
, p
);
992 static void r600_delete_ps_state(struct pipe_context
*ctx
, void *state
)
994 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
995 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
997 if (rctx
->ps_shader
== sel
) {
998 rctx
->ps_shader
= NULL
;
1001 r600_delete_shader_selector(ctx
, sel
);
1004 static void r600_delete_vs_state(struct pipe_context
*ctx
, void *state
)
1006 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1007 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1009 if (rctx
->vs_shader
== sel
) {
1010 rctx
->vs_shader
= NULL
;
1013 r600_delete_shader_selector(ctx
, sel
);
1017 static void r600_delete_gs_state(struct pipe_context
*ctx
, void *state
)
1019 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1020 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1022 if (rctx
->gs_shader
== sel
) {
1023 rctx
->gs_shader
= NULL
;
1026 r600_delete_shader_selector(ctx
, sel
);
1029 static void r600_delete_tcs_state(struct pipe_context
*ctx
, void *state
)
1031 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1032 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1034 if (rctx
->tcs_shader
== sel
) {
1035 rctx
->tcs_shader
= NULL
;
1038 r600_delete_shader_selector(ctx
, sel
);
1041 static void r600_delete_tes_state(struct pipe_context
*ctx
, void *state
)
1043 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1044 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1046 if (rctx
->tes_shader
== sel
) {
1047 rctx
->tes_shader
= NULL
;
1050 r600_delete_shader_selector(ctx
, sel
);
1053 void r600_constant_buffers_dirty(struct r600_context
*rctx
, struct r600_constbuf_state
*state
)
1055 if (state
->dirty_mask
) {
1056 state
->atom
.num_dw
= rctx
->b
.chip_class
>= EVERGREEN
? util_bitcount(state
->dirty_mask
)*20
1057 : util_bitcount(state
->dirty_mask
)*19;
1058 r600_mark_atom_dirty(rctx
, &state
->atom
);
1062 static void r600_set_constant_buffer(struct pipe_context
*ctx
,
1063 enum pipe_shader_type shader
, uint index
,
1064 const struct pipe_constant_buffer
*input
)
1066 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1067 struct r600_constbuf_state
*state
= &rctx
->constbuf_state
[shader
];
1068 struct pipe_constant_buffer
*cb
;
1071 /* Note that the state tracker can unbind constant buffers by
1072 * passing NULL here.
1074 if (unlikely(!input
|| (!input
->buffer
&& !input
->user_buffer
))) {
1075 state
->enabled_mask
&= ~(1 << index
);
1076 state
->dirty_mask
&= ~(1 << index
);
1077 pipe_resource_reference(&state
->cb
[index
].buffer
, NULL
);
1081 cb
= &state
->cb
[index
];
1082 cb
->buffer_size
= input
->buffer_size
;
1084 ptr
= input
->user_buffer
;
1087 /* Upload the user buffer. */
1088 if (R600_BIG_ENDIAN
) {
1090 unsigned i
, size
= input
->buffer_size
;
1092 if (!(tmpPtr
= malloc(size
))) {
1093 R600_ERR("Failed to allocate BE swap buffer.\n");
1097 for (i
= 0; i
< size
/ 4; ++i
) {
1098 tmpPtr
[i
] = util_cpu_to_le32(((uint32_t *)ptr
)[i
]);
1101 u_upload_data(ctx
->stream_uploader
, 0, size
, 256,
1102 tmpPtr
, &cb
->buffer_offset
, &cb
->buffer
);
1105 u_upload_data(ctx
->stream_uploader
, 0,
1106 input
->buffer_size
, 256, ptr
,
1107 &cb
->buffer_offset
, &cb
->buffer
);
1109 /* account it in gtt */
1110 rctx
->b
.gtt
+= input
->buffer_size
;
1112 /* Setup the hw buffer. */
1113 cb
->buffer_offset
= input
->buffer_offset
;
1114 pipe_resource_reference(&cb
->buffer
, input
->buffer
);
1115 r600_context_add_resource_size(ctx
, input
->buffer
);
1118 state
->enabled_mask
|= 1 << index
;
1119 state
->dirty_mask
|= 1 << index
;
1120 r600_constant_buffers_dirty(rctx
, state
);
1123 static void r600_set_sample_mask(struct pipe_context
*pipe
, unsigned sample_mask
)
1125 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
1127 if (rctx
->sample_mask
.sample_mask
== (uint16_t)sample_mask
)
1130 rctx
->sample_mask
.sample_mask
= sample_mask
;
1131 r600_mark_atom_dirty(rctx
, &rctx
->sample_mask
.atom
);
1134 static void r600_update_driver_const_buffers(struct r600_context
*rctx
)
1138 struct pipe_constant_buffer cb
;
1139 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
1140 struct r600_shader_driver_constants_info
*info
= &rctx
->driver_consts
[sh
];
1141 if (!info
->vs_ucp_dirty
&&
1142 !info
->texture_const_dirty
&&
1143 !info
->ps_sample_pos_dirty
)
1146 ptr
= info
->constants
;
1147 size
= info
->alloc_size
;
1148 if (info
->vs_ucp_dirty
) {
1149 assert(sh
== PIPE_SHADER_VERTEX
);
1151 ptr
= rctx
->clip_state
.state
.ucp
;
1152 size
= R600_UCP_SIZE
;
1154 memcpy(ptr
, rctx
->clip_state
.state
.ucp
, R600_UCP_SIZE
);
1156 info
->vs_ucp_dirty
= false;
1159 if (info
->ps_sample_pos_dirty
) {
1160 assert(sh
== PIPE_SHADER_FRAGMENT
);
1162 ptr
= rctx
->sample_positions
;
1163 size
= R600_UCP_SIZE
;
1165 memcpy(ptr
, rctx
->sample_positions
, R600_UCP_SIZE
);
1167 info
->ps_sample_pos_dirty
= false;
1170 if (info
->texture_const_dirty
) {
1173 if (sh
== PIPE_SHADER_VERTEX
)
1174 memcpy(ptr
, rctx
->clip_state
.state
.ucp
, R600_UCP_SIZE
);
1175 if (sh
== PIPE_SHADER_FRAGMENT
)
1176 memcpy(ptr
, rctx
->sample_positions
, R600_UCP_SIZE
);
1178 info
->texture_const_dirty
= false;
1181 cb
.user_buffer
= ptr
;
1182 cb
.buffer_offset
= 0;
1183 cb
.buffer_size
= size
;
1184 rctx
->b
.b
.set_constant_buffer(&rctx
->b
.b
, sh
, R600_BUFFER_INFO_CONST_BUFFER
, &cb
);
1185 pipe_resource_reference(&cb
.buffer
, NULL
);
1189 static void *r600_alloc_buf_consts(struct r600_context
*rctx
, int shader_type
,
1190 int array_size
, uint32_t *base_offset
)
1192 struct r600_shader_driver_constants_info
*info
= &rctx
->driver_consts
[shader_type
];
1193 if (array_size
+ R600_UCP_SIZE
> info
->alloc_size
) {
1194 info
->constants
= realloc(info
->constants
, array_size
+ R600_UCP_SIZE
);
1195 info
->alloc_size
= array_size
+ R600_UCP_SIZE
;
1197 memset(info
->constants
+ (R600_UCP_SIZE
/ 4), 0, array_size
);
1198 info
->texture_const_dirty
= true;
1199 *base_offset
= R600_UCP_SIZE
;
1200 return info
->constants
;
1203 * On r600/700 hw we don't have vertex fetch swizzle, though TBO
1204 * doesn't require full swizzles it does need masking and setting alpha
1205 * to one, so we setup a set of 5 constants with the masks + alpha value
1206 * then in the shader, we AND the 4 components with 0xffffffff or 0,
1207 * then OR the alpha with the value given here.
1208 * We use a 6th constant to store the txq buffer size in
1209 * we use 7th slot for number of cube layers in a cube map array.
1211 static void r600_setup_buffer_constants(struct r600_context
*rctx
, int shader_type
)
1213 struct r600_textures_info
*samplers
= &rctx
->samplers
[shader_type
];
1215 uint32_t array_size
;
1217 uint32_t *constants
;
1218 uint32_t base_offset
;
1219 if (!samplers
->views
.dirty_buffer_constants
)
1222 samplers
->views
.dirty_buffer_constants
= FALSE
;
1224 bits
= util_last_bit(samplers
->views
.enabled_mask
);
1225 array_size
= bits
* 8 * sizeof(uint32_t) * 4;
1227 constants
= r600_alloc_buf_consts(rctx
, shader_type
, array_size
, &base_offset
);
1229 for (i
= 0; i
< bits
; i
++) {
1230 if (samplers
->views
.enabled_mask
& (1 << i
)) {
1231 int offset
= (base_offset
/ 4) + i
* 8;
1232 const struct util_format_description
*desc
;
1233 desc
= util_format_description(samplers
->views
.views
[i
]->base
.format
);
1235 for (j
= 0; j
< 4; j
++)
1236 if (j
< desc
->nr_channels
)
1237 constants
[offset
+j
] = 0xffffffff;
1239 constants
[offset
+j
] = 0x0;
1240 if (desc
->nr_channels
< 4) {
1241 if (desc
->channel
[0].pure_integer
)
1242 constants
[offset
+4] = 1;
1244 constants
[offset
+4] = fui(1.0);
1246 constants
[offset
+ 4] = 0;
1248 constants
[offset
+ 5] = samplers
->views
.views
[i
]->base
.texture
->width0
/ util_format_get_blocksize(samplers
->views
.views
[i
]->base
.format
);
1249 constants
[offset
+ 6] = samplers
->views
.views
[i
]->base
.texture
->array_size
/ 6;
1255 /* On evergreen we store two values
1256 * 1. buffer size for TXQ
1257 * 2. number of cube layers in a cube map array.
1259 static void eg_setup_buffer_constants(struct r600_context
*rctx
, int shader_type
)
1261 struct r600_textures_info
*samplers
= &rctx
->samplers
[shader_type
];
1263 uint32_t array_size
;
1265 uint32_t *constants
;
1266 uint32_t base_offset
;
1267 if (!samplers
->views
.dirty_buffer_constants
)
1270 samplers
->views
.dirty_buffer_constants
= FALSE
;
1272 bits
= util_last_bit(samplers
->views
.enabled_mask
);
1273 array_size
= bits
* 2 * sizeof(uint32_t) * 4;
1275 constants
= r600_alloc_buf_consts(rctx
, shader_type
, array_size
,
1278 for (i
= 0; i
< bits
; i
++) {
1279 if (samplers
->views
.enabled_mask
& (1 << i
)) {
1280 uint32_t offset
= (base_offset
/ 4) + i
* 2;
1281 constants
[offset
] = samplers
->views
.views
[i
]->base
.texture
->width0
/ util_format_get_blocksize(samplers
->views
.views
[i
]->base
.format
);
1282 constants
[offset
+ 1] = samplers
->views
.views
[i
]->base
.texture
->array_size
/ 6;
1287 /* set sample xy locations as array of fragment shader constants */
1288 void r600_set_sample_locations_constant_buffer(struct r600_context
*rctx
)
1291 struct pipe_context
*ctx
= &rctx
->b
.b
;
1293 assert(rctx
->framebuffer
.nr_samples
< R600_UCP_SIZE
);
1294 assert(rctx
->framebuffer
.nr_samples
<= ARRAY_SIZE(rctx
->sample_positions
)/4);
1296 memset(rctx
->sample_positions
, 0, 4 * 4 * 16);
1297 for (i
= 0; i
< rctx
->framebuffer
.nr_samples
; i
++) {
1298 ctx
->get_sample_position(ctx
, rctx
->framebuffer
.nr_samples
, i
, &rctx
->sample_positions
[4*i
]);
1299 /* Also fill in center-zeroed positions used for interpolateAtSample */
1300 rctx
->sample_positions
[4*i
+ 2] = rctx
->sample_positions
[4*i
+ 0] - 0.5f
;
1301 rctx
->sample_positions
[4*i
+ 3] = rctx
->sample_positions
[4*i
+ 1] - 0.5f
;
1304 rctx
->driver_consts
[PIPE_SHADER_FRAGMENT
].ps_sample_pos_dirty
= true;
1307 static void update_shader_atom(struct pipe_context
*ctx
,
1308 struct r600_shader_state
*state
,
1309 struct r600_pipe_shader
*shader
)
1311 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1313 state
->shader
= shader
;
1315 state
->atom
.num_dw
= shader
->command_buffer
.num_dw
;
1316 r600_context_add_resource_size(ctx
, (struct pipe_resource
*)shader
->bo
);
1318 state
->atom
.num_dw
= 0;
1320 r600_mark_atom_dirty(rctx
, &state
->atom
);
1323 static void update_gs_block_state(struct r600_context
*rctx
, unsigned enable
)
1325 if (rctx
->shader_stages
.geom_enable
!= enable
) {
1326 rctx
->shader_stages
.geom_enable
= enable
;
1327 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1330 if (rctx
->gs_rings
.enable
!= enable
) {
1331 rctx
->gs_rings
.enable
= enable
;
1332 r600_mark_atom_dirty(rctx
, &rctx
->gs_rings
.atom
);
1334 if (enable
&& !rctx
->gs_rings
.esgs_ring
.buffer
) {
1335 unsigned size
= 0x1C000;
1336 rctx
->gs_rings
.esgs_ring
.buffer
=
1337 pipe_buffer_create(rctx
->b
.b
.screen
, 0,
1338 PIPE_USAGE_DEFAULT
, size
);
1339 rctx
->gs_rings
.esgs_ring
.buffer_size
= size
;
1343 rctx
->gs_rings
.gsvs_ring
.buffer
=
1344 pipe_buffer_create(rctx
->b
.b
.screen
, 0,
1345 PIPE_USAGE_DEFAULT
, size
);
1346 rctx
->gs_rings
.gsvs_ring
.buffer_size
= size
;
1350 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_GEOMETRY
,
1351 R600_GS_RING_CONST_BUFFER
, &rctx
->gs_rings
.esgs_ring
);
1352 if (rctx
->tes_shader
) {
1353 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_TESS_EVAL
,
1354 R600_GS_RING_CONST_BUFFER
, &rctx
->gs_rings
.gsvs_ring
);
1356 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_VERTEX
,
1357 R600_GS_RING_CONST_BUFFER
, &rctx
->gs_rings
.gsvs_ring
);
1360 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_GEOMETRY
,
1361 R600_GS_RING_CONST_BUFFER
, NULL
);
1362 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_VERTEX
,
1363 R600_GS_RING_CONST_BUFFER
, NULL
);
1364 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_TESS_EVAL
,
1365 R600_GS_RING_CONST_BUFFER
, NULL
);
1370 static void r600_update_clip_state(struct r600_context
*rctx
,
1371 struct r600_pipe_shader
*current
)
1373 if (current
->pa_cl_vs_out_cntl
!= rctx
->clip_misc_state
.pa_cl_vs_out_cntl
||
1374 current
->shader
.clip_dist_write
!= rctx
->clip_misc_state
.clip_dist_write
||
1375 current
->shader
.vs_position_window_space
!= rctx
->clip_misc_state
.clip_disable
||
1376 current
->shader
.vs_out_viewport
!= rctx
->clip_misc_state
.vs_out_viewport
) {
1377 rctx
->clip_misc_state
.pa_cl_vs_out_cntl
= current
->pa_cl_vs_out_cntl
;
1378 rctx
->clip_misc_state
.clip_dist_write
= current
->shader
.clip_dist_write
;
1379 rctx
->clip_misc_state
.clip_disable
= current
->shader
.vs_position_window_space
;
1380 rctx
->clip_misc_state
.vs_out_viewport
= current
->shader
.vs_out_viewport
;
1381 r600_mark_atom_dirty(rctx
, &rctx
->clip_misc_state
.atom
);
1385 static void r600_generate_fixed_func_tcs(struct r600_context
*rctx
)
1387 struct ureg_src const0
, const1
;
1388 struct ureg_dst tessouter
, tessinner
;
1389 struct ureg_program
*ureg
= ureg_create(PIPE_SHADER_TESS_CTRL
);
1392 return; /* if we get here, we're screwed */
1394 assert(!rctx
->fixed_func_tcs_shader
);
1396 ureg_DECL_constant2D(ureg
, 0, 3, R600_LDS_INFO_CONST_BUFFER
);
1397 const0
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 2),
1398 R600_LDS_INFO_CONST_BUFFER
);
1399 const1
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 3),
1400 R600_LDS_INFO_CONST_BUFFER
);
1402 tessouter
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSOUTER
, 0);
1403 tessinner
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSINNER
, 0);
1405 ureg_MOV(ureg
, tessouter
, const0
);
1406 ureg_MOV(ureg
, tessinner
, const1
);
1409 rctx
->fixed_func_tcs_shader
=
1410 ureg_create_shader_and_destroy(ureg
, &rctx
->b
.b
);
1413 #define SELECT_SHADER_OR_FAIL(x) do { \
1414 r600_shader_select(ctx, rctx->x##_shader, &x##_dirty); \
1415 if (unlikely(!rctx->x##_shader->current)) \
1419 #define UPDATE_SHADER(hw, sw) do { \
1420 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) \
1421 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1424 #define UPDATE_SHADER_CLIP(hw, sw) do { \
1425 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) { \
1426 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1427 clip_so_current = rctx->sw##_shader->current; \
1431 #define UPDATE_SHADER_GS(hw, hw2, sw) do { \
1432 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) { \
1433 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1434 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw2)], rctx->sw##_shader->current->gs_copy_shader); \
1435 clip_so_current = rctx->sw##_shader->current->gs_copy_shader; \
1439 #define SET_NULL_SHADER(hw) do { \
1440 if (rctx->hw_shader_stages[(hw)].shader) \
1441 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], NULL); \
1444 static bool r600_update_derived_state(struct r600_context
*rctx
)
1446 struct pipe_context
* ctx
= (struct pipe_context
*)rctx
;
1447 bool ps_dirty
= false, vs_dirty
= false, gs_dirty
= false;
1448 bool tcs_dirty
= false, tes_dirty
= false, fixed_func_tcs_dirty
= false;
1450 bool need_buf_const
;
1451 struct r600_pipe_shader
*clip_so_current
= NULL
;
1453 if (!rctx
->blitter
->running
) {
1457 counter
= p_atomic_read(&rctx
->screen
->b
.compressed_colortex_counter
);
1458 if (counter
!= rctx
->b
.last_compressed_colortex_counter
) {
1459 rctx
->b
.last_compressed_colortex_counter
= counter
;
1461 for (i
= 0; i
< PIPE_SHADER_TYPES
; ++i
) {
1462 r600_update_compressed_colortex_mask(&rctx
->samplers
[i
].views
);
1466 /* Decompress textures if needed. */
1467 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
1468 struct r600_samplerview_state
*views
= &rctx
->samplers
[i
].views
;
1469 if (views
->compressed_depthtex_mask
) {
1470 r600_decompress_depth_textures(rctx
, views
);
1472 if (views
->compressed_colortex_mask
) {
1473 r600_decompress_color_textures(rctx
, views
);
1478 SELECT_SHADER_OR_FAIL(ps
);
1480 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1482 update_gs_block_state(rctx
, rctx
->gs_shader
!= NULL
);
1484 if (rctx
->gs_shader
)
1485 SELECT_SHADER_OR_FAIL(gs
);
1488 if (rctx
->tcs_shader
) {
1489 SELECT_SHADER_OR_FAIL(tcs
);
1491 UPDATE_SHADER(EG_HW_STAGE_HS
, tcs
);
1492 } else if (rctx
->tes_shader
) {
1493 if (!rctx
->fixed_func_tcs_shader
) {
1494 r600_generate_fixed_func_tcs(rctx
);
1495 if (!rctx
->fixed_func_tcs_shader
)
1499 SELECT_SHADER_OR_FAIL(fixed_func_tcs
);
1501 UPDATE_SHADER(EG_HW_STAGE_HS
, fixed_func_tcs
);
1503 SET_NULL_SHADER(EG_HW_STAGE_HS
);
1505 if (rctx
->tes_shader
) {
1506 SELECT_SHADER_OR_FAIL(tes
);
1509 SELECT_SHADER_OR_FAIL(vs
);
1511 if (rctx
->gs_shader
) {
1512 if (!rctx
->shader_stages
.geom_enable
) {
1513 rctx
->shader_stages
.geom_enable
= true;
1514 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1517 /* gs_shader provides GS and VS (copy shader) */
1518 UPDATE_SHADER_GS(R600_HW_STAGE_GS
, R600_HW_STAGE_VS
, gs
);
1520 /* vs_shader is used as ES */
1522 if (rctx
->tes_shader
) {
1523 /* VS goes to LS, TES goes to ES */
1524 UPDATE_SHADER(R600_HW_STAGE_ES
, tes
);
1525 UPDATE_SHADER(EG_HW_STAGE_LS
, vs
);
1527 /* vs_shader is used as ES */
1528 UPDATE_SHADER(R600_HW_STAGE_ES
, vs
);
1529 SET_NULL_SHADER(EG_HW_STAGE_LS
);
1532 if (unlikely(rctx
->hw_shader_stages
[R600_HW_STAGE_GS
].shader
)) {
1533 SET_NULL_SHADER(R600_HW_STAGE_GS
);
1534 SET_NULL_SHADER(R600_HW_STAGE_ES
);
1535 rctx
->shader_stages
.geom_enable
= false;
1536 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1539 if (rctx
->tes_shader
) {
1540 /* if TES is loaded and no geometry, TES runs on hw VS, VS runs on hw LS */
1541 UPDATE_SHADER_CLIP(R600_HW_STAGE_VS
, tes
);
1542 UPDATE_SHADER(EG_HW_STAGE_LS
, vs
);
1544 SET_NULL_SHADER(EG_HW_STAGE_LS
);
1545 UPDATE_SHADER_CLIP(R600_HW_STAGE_VS
, vs
);
1549 /* Update clip misc state. */
1550 if (clip_so_current
) {
1551 r600_update_clip_state(rctx
, clip_so_current
);
1552 rctx
->b
.streamout
.enabled_stream_buffers_mask
= clip_so_current
->enabled_stream_buffers_mask
;
1555 if (unlikely(ps_dirty
|| rctx
->hw_shader_stages
[R600_HW_STAGE_PS
].shader
!= rctx
->ps_shader
->current
||
1556 rctx
->rasterizer
->sprite_coord_enable
!= rctx
->ps_shader
->current
->sprite_coord_enable
||
1557 rctx
->rasterizer
->flatshade
!= rctx
->ps_shader
->current
->flatshade
)) {
1559 if (rctx
->cb_misc_state
.nr_ps_color_outputs
!= rctx
->ps_shader
->current
->nr_ps_color_outputs
) {
1560 rctx
->cb_misc_state
.nr_ps_color_outputs
= rctx
->ps_shader
->current
->nr_ps_color_outputs
;
1561 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
1564 if (rctx
->b
.chip_class
<= R700
) {
1565 bool multiwrite
= rctx
->ps_shader
->current
->shader
.fs_write_all
;
1567 if (rctx
->cb_misc_state
.multiwrite
!= multiwrite
) {
1568 rctx
->cb_misc_state
.multiwrite
= multiwrite
;
1569 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
1573 if (unlikely(!ps_dirty
&& rctx
->ps_shader
&& rctx
->rasterizer
&&
1574 ((rctx
->rasterizer
->sprite_coord_enable
!= rctx
->ps_shader
->current
->sprite_coord_enable
) ||
1575 (rctx
->rasterizer
->flatshade
!= rctx
->ps_shader
->current
->flatshade
)))) {
1577 if (rctx
->b
.chip_class
>= EVERGREEN
)
1578 evergreen_update_ps_state(ctx
, rctx
->ps_shader
->current
);
1580 r600_update_ps_state(ctx
, rctx
->ps_shader
->current
);
1583 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1585 UPDATE_SHADER(R600_HW_STAGE_PS
, ps
);
1587 if (rctx
->b
.chip_class
>= EVERGREEN
) {
1588 evergreen_update_db_shader_control(rctx
);
1590 r600_update_db_shader_control(rctx
);
1593 /* on R600 we stuff masks + txq info into one constant buffer */
1594 /* on evergreen we only need a txq info one */
1595 if (rctx
->ps_shader
) {
1596 need_buf_const
= rctx
->ps_shader
->current
->shader
.uses_tex_buffers
|| rctx
->ps_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1597 if (need_buf_const
) {
1598 if (rctx
->b
.chip_class
< EVERGREEN
)
1599 r600_setup_buffer_constants(rctx
, PIPE_SHADER_FRAGMENT
);
1601 eg_setup_buffer_constants(rctx
, PIPE_SHADER_FRAGMENT
);
1605 if (rctx
->vs_shader
) {
1606 need_buf_const
= rctx
->vs_shader
->current
->shader
.uses_tex_buffers
|| rctx
->vs_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1607 if (need_buf_const
) {
1608 if (rctx
->b
.chip_class
< EVERGREEN
)
1609 r600_setup_buffer_constants(rctx
, PIPE_SHADER_VERTEX
);
1611 eg_setup_buffer_constants(rctx
, PIPE_SHADER_VERTEX
);
1615 if (rctx
->gs_shader
) {
1616 need_buf_const
= rctx
->gs_shader
->current
->shader
.uses_tex_buffers
|| rctx
->gs_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1617 if (need_buf_const
) {
1618 if (rctx
->b
.chip_class
< EVERGREEN
)
1619 r600_setup_buffer_constants(rctx
, PIPE_SHADER_GEOMETRY
);
1621 eg_setup_buffer_constants(rctx
, PIPE_SHADER_GEOMETRY
);
1625 r600_update_driver_const_buffers(rctx
);
1627 if (rctx
->b
.chip_class
< EVERGREEN
&& rctx
->ps_shader
&& rctx
->vs_shader
) {
1628 if (!r600_adjust_gprs(rctx
)) {
1629 /* discard rendering */
1634 if (rctx
->b
.chip_class
== EVERGREEN
) {
1635 if (!evergreen_adjust_gprs(rctx
)) {
1636 /* discard rendering */
1641 blend_disable
= (rctx
->dual_src_blend
&&
1642 rctx
->ps_shader
->current
->nr_ps_color_outputs
< 2);
1644 if (blend_disable
!= rctx
->force_blend_disable
) {
1645 rctx
->force_blend_disable
= blend_disable
;
1646 r600_bind_blend_state_internal(rctx
,
1647 rctx
->blend_state
.cso
,
1654 void r600_emit_clip_misc_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1656 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1657 struct r600_clip_misc_state
*state
= &rctx
->clip_misc_state
;
1659 radeon_set_context_reg(cs
, R_028810_PA_CL_CLIP_CNTL
,
1660 state
->pa_cl_clip_cntl
|
1661 (state
->clip_dist_write
? 0 : state
->clip_plane_enable
& 0x3F) |
1662 S_028810_CLIP_DISABLE(state
->clip_disable
));
1663 radeon_set_context_reg(cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
1664 state
->pa_cl_vs_out_cntl
|
1665 (state
->clip_plane_enable
& state
->clip_dist_write
));
1666 /* reuse needs to be set off if we write oViewport */
1667 if (rctx
->b
.chip_class
>= EVERGREEN
)
1668 radeon_set_context_reg(cs
, R_028AB4_VGT_REUSE_OFF
,
1669 S_028AB4_REUSE_OFF(state
->vs_out_viewport
));
1672 static void r600_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*dinfo
)
1674 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1675 struct pipe_draw_info info
= *dinfo
;
1676 struct pipe_index_buffer ib
= {};
1677 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1678 bool render_cond_bit
= rctx
->b
.render_cond
&& !rctx
->b
.render_cond_force_off
;
1680 unsigned num_patches
, dirty_tex_counter
;
1682 if (!info
.indirect
&& !info
.count
&& (info
.indexed
|| !info
.count_from_stream_output
)) {
1686 if (!rctx
->vs_shader
|| !rctx
->ps_shader
) {
1691 /* make sure that the gfx ring is only one active */
1692 if (radeon_emitted(rctx
->b
.dma
.cs
, 0)) {
1693 rctx
->b
.dma
.flush(rctx
, RADEON_FLUSH_ASYNC
, NULL
);
1696 /* Re-emit the framebuffer state if needed. */
1697 dirty_tex_counter
= p_atomic_read(&rctx
->b
.screen
->dirty_tex_counter
);
1698 if (dirty_tex_counter
!= rctx
->b
.last_dirty_tex_counter
) {
1699 rctx
->b
.last_dirty_tex_counter
= dirty_tex_counter
;
1700 r600_mark_atom_dirty(rctx
, &rctx
->framebuffer
.atom
);
1703 if (!r600_update_derived_state(rctx
)) {
1704 /* useless to render because current rendering command
1711 /* Initialize the index buffer struct. */
1712 pipe_resource_reference(&ib
.buffer
, rctx
->index_buffer
.buffer
);
1713 ib
.user_buffer
= rctx
->index_buffer
.user_buffer
;
1714 ib
.index_size
= rctx
->index_buffer
.index_size
;
1715 ib
.offset
= rctx
->index_buffer
.offset
;
1716 if (!info
.indirect
) {
1717 ib
.offset
+= info
.start
* ib
.index_size
;
1720 /* Translate 8-bit indices to 16-bit. */
1721 if (unlikely(ib
.index_size
== 1)) {
1722 struct pipe_resource
*out_buffer
= NULL
;
1723 unsigned out_offset
;
1725 unsigned start
, count
;
1727 if (likely(!info
.indirect
)) {
1732 /* Have to get start/count from indirect buffer, slow path ahead... */
1733 struct r600_resource
*indirect_resource
= (struct r600_resource
*)info
.indirect
;
1734 unsigned *data
= r600_buffer_map_sync_with_rings(&rctx
->b
, indirect_resource
,
1735 PIPE_TRANSFER_READ
);
1737 data
+= info
.indirect_offset
/ sizeof(unsigned);
1738 start
= data
[2] * ib
.index_size
;
1747 u_upload_alloc(ctx
->stream_uploader
, start
, count
* 2,
1748 256, &out_offset
, &out_buffer
, &ptr
);
1750 util_shorten_ubyte_elts_to_userptr(
1751 &rctx
->b
.b
, &ib
, 0, 0, ib
.offset
+ start
, count
, ptr
);
1753 pipe_resource_reference(&ib
.buffer
, NULL
);
1754 ib
.user_buffer
= NULL
;
1755 ib
.buffer
= out_buffer
;
1756 ib
.offset
= out_offset
;
1760 /* Upload the index buffer.
1761 * The upload is skipped for small index counts on little-endian machines
1762 * and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
1763 * Indirect draws never use immediate indices.
1764 * Note: Instanced rendering in combination with immediate indices hangs. */
1765 if (ib
.user_buffer
&& (R600_BIG_ENDIAN
|| info
.indirect
||
1766 info
.instance_count
> 1 ||
1767 info
.count
*ib
.index_size
> 20)) {
1768 u_upload_data(ctx
->stream_uploader
, 0,
1769 info
.count
* ib
.index_size
, 256,
1770 ib
.user_buffer
, &ib
.offset
, &ib
.buffer
);
1771 ib
.user_buffer
= NULL
;
1774 info
.index_bias
= info
.start
;
1777 /* Set the index offset and primitive restart. */
1778 if (rctx
->vgt_state
.vgt_multi_prim_ib_reset_en
!= info
.primitive_restart
||
1779 rctx
->vgt_state
.vgt_multi_prim_ib_reset_indx
!= info
.restart_index
||
1780 rctx
->vgt_state
.vgt_indx_offset
!= info
.index_bias
||
1781 (rctx
->vgt_state
.last_draw_was_indirect
&& !info
.indirect
)) {
1782 rctx
->vgt_state
.vgt_multi_prim_ib_reset_en
= info
.primitive_restart
;
1783 rctx
->vgt_state
.vgt_multi_prim_ib_reset_indx
= info
.restart_index
;
1784 rctx
->vgt_state
.vgt_indx_offset
= info
.index_bias
;
1785 r600_mark_atom_dirty(rctx
, &rctx
->vgt_state
.atom
);
1788 /* Workaround for hardware deadlock on certain R600 ASICs: write into a CB register. */
1789 if (rctx
->b
.chip_class
== R600
) {
1790 rctx
->b
.flags
|= R600_CONTEXT_PS_PARTIAL_FLUSH
;
1791 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
1794 if (rctx
->b
.chip_class
>= EVERGREEN
)
1795 evergreen_setup_tess_constants(rctx
, &info
, &num_patches
);
1798 r600_need_cs_space(rctx
, ib
.user_buffer
? 5 : 0, TRUE
);
1799 r600_flush_emit(rctx
);
1801 mask
= rctx
->dirty_atoms
;
1803 r600_emit_atom(rctx
, rctx
->atoms
[u_bit_scan64(&mask
)]);
1806 if (rctx
->b
.chip_class
== CAYMAN
) {
1807 /* Copied from radeonsi. */
1808 unsigned primgroup_size
= 128; /* recommended without a GS */
1809 bool ia_switch_on_eop
= false;
1810 bool partial_vs_wave
= false;
1812 if (rctx
->gs_shader
)
1813 primgroup_size
= 64; /* recommended with a GS */
1815 if ((rctx
->rasterizer
&& rctx
->rasterizer
->pa_sc_line_stipple
) ||
1816 (rctx
->b
.screen
->debug_flags
& DBG_SWITCH_ON_EOP
)) {
1817 ia_switch_on_eop
= true;
1820 if (r600_get_strmout_en(&rctx
->b
))
1821 partial_vs_wave
= true;
1823 radeon_set_context_reg(cs
, CM_R_028AA8_IA_MULTI_VGT_PARAM
,
1824 S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
1825 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
1826 S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1));
1829 if (rctx
->b
.chip_class
>= EVERGREEN
) {
1830 uint32_t ls_hs_config
= evergreen_get_ls_hs_config(rctx
, &info
,
1833 evergreen_set_ls_hs_config(rctx
, cs
, ls_hs_config
);
1834 evergreen_set_lds_alloc(rctx
, cs
, rctx
->lds_alloc
);
1837 /* On R6xx, CULL_FRONT=1 culls all points, lines, and rectangles,
1838 * even though it should have no effect on those. */
1839 if (rctx
->b
.chip_class
== R600
&& rctx
->rasterizer
) {
1840 unsigned su_sc_mode_cntl
= rctx
->rasterizer
->pa_su_sc_mode_cntl
;
1841 unsigned prim
= info
.mode
;
1843 if (rctx
->gs_shader
) {
1844 prim
= rctx
->gs_shader
->gs_output_prim
;
1846 prim
= r600_conv_prim_to_gs_out(prim
); /* decrease the number of types to 3 */
1848 if (prim
== V_028A6C_OUTPRIM_TYPE_POINTLIST
||
1849 prim
== V_028A6C_OUTPRIM_TYPE_LINESTRIP
||
1850 info
.mode
== R600_PRIM_RECTANGLE_LIST
) {
1851 su_sc_mode_cntl
&= C_028814_CULL_FRONT
;
1853 radeon_set_context_reg(cs
, R_028814_PA_SU_SC_MODE_CNTL
, su_sc_mode_cntl
);
1856 /* Update start instance. */
1857 if (!info
.indirect
&& rctx
->last_start_instance
!= info
.start_instance
) {
1858 radeon_set_ctl_const(cs
, R_03CFF4_SQ_VTX_START_INST_LOC
, info
.start_instance
);
1859 rctx
->last_start_instance
= info
.start_instance
;
1862 /* Update the primitive type. */
1863 if (rctx
->last_primitive_type
!= info
.mode
) {
1864 unsigned ls_mask
= 0;
1866 if (info
.mode
== PIPE_PRIM_LINES
)
1868 else if (info
.mode
== PIPE_PRIM_LINE_STRIP
||
1869 info
.mode
== PIPE_PRIM_LINE_LOOP
)
1872 radeon_set_context_reg(cs
, R_028A0C_PA_SC_LINE_STIPPLE
,
1873 S_028A0C_AUTO_RESET_CNTL(ls_mask
) |
1874 (rctx
->rasterizer
? rctx
->rasterizer
->pa_sc_line_stipple
: 0));
1875 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
,
1876 r600_conv_pipe_prim(info
.mode
));
1878 rctx
->last_primitive_type
= info
.mode
;
1882 if (!info
.indirect
) {
1883 radeon_emit(cs
, PKT3(PKT3_NUM_INSTANCES
, 0, 0));
1884 radeon_emit(cs
, info
.instance_count
);
1887 if (unlikely(info
.indirect
)) {
1888 uint64_t va
= r600_resource(info
.indirect
)->gpu_address
;
1889 assert(rctx
->b
.chip_class
>= EVERGREEN
);
1891 // Invalidate so non-indirect draw calls reset this state
1892 rctx
->vgt_state
.last_draw_was_indirect
= true;
1893 rctx
->last_start_instance
= -1;
1895 radeon_emit(cs
, PKT3(EG_PKT3_SET_BASE
, 2, 0));
1896 radeon_emit(cs
, EG_DRAW_INDEX_INDIRECT_PATCH_TABLE_BASE
);
1897 radeon_emit(cs
, va
);
1898 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
1900 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1901 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
1902 (struct r600_resource
*)info
.indirect
,
1904 RADEON_PRIO_DRAW_INDIRECT
));
1908 radeon_emit(cs
, PKT3(PKT3_INDEX_TYPE
, 0, 0));
1909 radeon_emit(cs
, ib
.index_size
== 4 ?
1910 (VGT_INDEX_32
| (R600_BIG_ENDIAN
? VGT_DMA_SWAP_32_BIT
: 0)) :
1911 (VGT_INDEX_16
| (R600_BIG_ENDIAN
? VGT_DMA_SWAP_16_BIT
: 0)));
1913 if (ib
.user_buffer
) {
1914 unsigned size_bytes
= info
.count
*ib
.index_size
;
1915 unsigned size_dw
= align(size_bytes
, 4) / 4;
1916 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_IMMD
, 1 + size_dw
, render_cond_bit
));
1917 radeon_emit(cs
, info
.count
);
1918 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_IMMEDIATE
);
1919 radeon_emit_array(cs
, ib
.user_buffer
, size_dw
);
1921 uint64_t va
= r600_resource(ib
.buffer
)->gpu_address
+ ib
.offset
;
1923 if (likely(!info
.indirect
)) {
1924 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX
, 3, render_cond_bit
));
1925 radeon_emit(cs
, va
);
1926 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
1927 radeon_emit(cs
, info
.count
);
1928 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
1929 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1930 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
1931 (struct r600_resource
*)ib
.buffer
,
1933 RADEON_PRIO_INDEX_BUFFER
));
1936 uint32_t max_size
= (ib
.buffer
->width0
- ib
.offset
) / ib
.index_size
;
1938 radeon_emit(cs
, PKT3(EG_PKT3_INDEX_BASE
, 1, 0));
1939 radeon_emit(cs
, va
);
1940 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
1942 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1943 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
1944 (struct r600_resource
*)ib
.buffer
,
1946 RADEON_PRIO_INDEX_BUFFER
));
1948 radeon_emit(cs
, PKT3(EG_PKT3_INDEX_BUFFER_SIZE
, 0, 0));
1949 radeon_emit(cs
, max_size
);
1951 radeon_emit(cs
, PKT3(EG_PKT3_DRAW_INDEX_INDIRECT
, 1, render_cond_bit
));
1952 radeon_emit(cs
, info
.indirect_offset
);
1953 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
1957 if (unlikely(info
.count_from_stream_output
)) {
1958 struct r600_so_target
*t
= (struct r600_so_target
*)info
.count_from_stream_output
;
1959 uint64_t va
= t
->buf_filled_size
->gpu_address
+ t
->buf_filled_size_offset
;
1961 radeon_set_context_reg(cs
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
, t
->stride_in_dw
);
1963 radeon_emit(cs
, PKT3(PKT3_COPY_DW
, 4, 0));
1964 radeon_emit(cs
, COPY_DW_SRC_IS_MEM
| COPY_DW_DST_IS_REG
);
1965 radeon_emit(cs
, va
& 0xFFFFFFFFUL
); /* src address lo */
1966 radeon_emit(cs
, (va
>> 32UL) & 0xFFUL
); /* src address hi */
1967 radeon_emit(cs
, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2); /* dst register */
1968 radeon_emit(cs
, 0); /* unused */
1970 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1971 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
1972 t
->buf_filled_size
, RADEON_USAGE_READ
,
1973 RADEON_PRIO_SO_FILLED_SIZE
));
1976 if (likely(!info
.indirect
)) {
1977 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_AUTO
, 1, render_cond_bit
));
1978 radeon_emit(cs
, info
.count
);
1981 radeon_emit(cs
, PKT3(EG_PKT3_DRAW_INDIRECT
, 1, render_cond_bit
));
1982 radeon_emit(cs
, info
.indirect_offset
);
1984 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
1985 (info
.count_from_stream_output
? S_0287F0_USE_OPAQUE(1) : 0));
1988 /* SMX returns CONTEXT_DONE too early workaround */
1989 if (rctx
->b
.family
== CHIP_R600
||
1990 rctx
->b
.family
== CHIP_RV610
||
1991 rctx
->b
.family
== CHIP_RV630
||
1992 rctx
->b
.family
== CHIP_RV635
) {
1993 /* if we have gs shader or streamout
1994 we need to do a wait idle after every draw */
1995 if (rctx
->gs_shader
|| r600_get_strmout_en(&rctx
->b
)) {
1996 radeon_set_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
2000 /* ES ring rolling over at EOP - workaround */
2001 if (rctx
->b
.chip_class
== R600
) {
2002 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
2003 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SQ_NON_EVENT
));
2006 /* Set the depth buffer as dirty. */
2007 if (rctx
->framebuffer
.state
.zsbuf
) {
2008 struct pipe_surface
*surf
= rctx
->framebuffer
.state
.zsbuf
;
2009 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
2011 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
2013 if (rtex
->surface
.flags
& RADEON_SURF_SBUFFER
)
2014 rtex
->stencil_dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
2016 if (rctx
->framebuffer
.compressed_cb_mask
) {
2017 struct pipe_surface
*surf
;
2018 struct r600_texture
*rtex
;
2019 unsigned mask
= rctx
->framebuffer
.compressed_cb_mask
;
2022 unsigned i
= u_bit_scan(&mask
);
2023 surf
= rctx
->framebuffer
.state
.cbufs
[i
];
2024 rtex
= (struct r600_texture
*)surf
->texture
;
2026 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
2031 pipe_resource_reference(&ib
.buffer
, NULL
);
2032 rctx
->b
.num_draw_calls
++;
2035 uint32_t r600_translate_stencil_op(int s_op
)
2038 case PIPE_STENCIL_OP_KEEP
:
2039 return V_028800_STENCIL_KEEP
;
2040 case PIPE_STENCIL_OP_ZERO
:
2041 return V_028800_STENCIL_ZERO
;
2042 case PIPE_STENCIL_OP_REPLACE
:
2043 return V_028800_STENCIL_REPLACE
;
2044 case PIPE_STENCIL_OP_INCR
:
2045 return V_028800_STENCIL_INCR
;
2046 case PIPE_STENCIL_OP_DECR
:
2047 return V_028800_STENCIL_DECR
;
2048 case PIPE_STENCIL_OP_INCR_WRAP
:
2049 return V_028800_STENCIL_INCR_WRAP
;
2050 case PIPE_STENCIL_OP_DECR_WRAP
:
2051 return V_028800_STENCIL_DECR_WRAP
;
2052 case PIPE_STENCIL_OP_INVERT
:
2053 return V_028800_STENCIL_INVERT
;
2055 R600_ERR("Unknown stencil op %d", s_op
);
2062 uint32_t r600_translate_fill(uint32_t func
)
2065 case PIPE_POLYGON_MODE_FILL
:
2067 case PIPE_POLYGON_MODE_LINE
:
2069 case PIPE_POLYGON_MODE_POINT
:
2077 unsigned r600_tex_wrap(unsigned wrap
)
2081 case PIPE_TEX_WRAP_REPEAT
:
2082 return V_03C000_SQ_TEX_WRAP
;
2083 case PIPE_TEX_WRAP_CLAMP
:
2084 return V_03C000_SQ_TEX_CLAMP_HALF_BORDER
;
2085 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
2086 return V_03C000_SQ_TEX_CLAMP_LAST_TEXEL
;
2087 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
2088 return V_03C000_SQ_TEX_CLAMP_BORDER
;
2089 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
2090 return V_03C000_SQ_TEX_MIRROR
;
2091 case PIPE_TEX_WRAP_MIRROR_CLAMP
:
2092 return V_03C000_SQ_TEX_MIRROR_ONCE_HALF_BORDER
;
2093 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
:
2094 return V_03C000_SQ_TEX_MIRROR_ONCE_LAST_TEXEL
;
2095 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
:
2096 return V_03C000_SQ_TEX_MIRROR_ONCE_BORDER
;
2100 unsigned r600_tex_mipfilter(unsigned filter
)
2103 case PIPE_TEX_MIPFILTER_NEAREST
:
2104 return V_03C000_SQ_TEX_Z_FILTER_POINT
;
2105 case PIPE_TEX_MIPFILTER_LINEAR
:
2106 return V_03C000_SQ_TEX_Z_FILTER_LINEAR
;
2108 case PIPE_TEX_MIPFILTER_NONE
:
2109 return V_03C000_SQ_TEX_Z_FILTER_NONE
;
2113 unsigned r600_tex_compare(unsigned compare
)
2117 case PIPE_FUNC_NEVER
:
2118 return V_03C000_SQ_TEX_DEPTH_COMPARE_NEVER
;
2119 case PIPE_FUNC_LESS
:
2120 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESS
;
2121 case PIPE_FUNC_EQUAL
:
2122 return V_03C000_SQ_TEX_DEPTH_COMPARE_EQUAL
;
2123 case PIPE_FUNC_LEQUAL
:
2124 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESSEQUAL
;
2125 case PIPE_FUNC_GREATER
:
2126 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATER
;
2127 case PIPE_FUNC_NOTEQUAL
:
2128 return V_03C000_SQ_TEX_DEPTH_COMPARE_NOTEQUAL
;
2129 case PIPE_FUNC_GEQUAL
:
2130 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL
;
2131 case PIPE_FUNC_ALWAYS
:
2132 return V_03C000_SQ_TEX_DEPTH_COMPARE_ALWAYS
;
2136 static bool wrap_mode_uses_border_color(unsigned wrap
, bool linear_filter
)
2138 return wrap
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
2139 wrap
== PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
||
2141 (wrap
== PIPE_TEX_WRAP_CLAMP
||
2142 wrap
== PIPE_TEX_WRAP_MIRROR_CLAMP
));
2145 bool sampler_state_needs_border_color(const struct pipe_sampler_state
*state
)
2147 bool linear_filter
= state
->min_img_filter
!= PIPE_TEX_FILTER_NEAREST
||
2148 state
->mag_img_filter
!= PIPE_TEX_FILTER_NEAREST
;
2150 return (state
->border_color
.ui
[0] || state
->border_color
.ui
[1] ||
2151 state
->border_color
.ui
[2] || state
->border_color
.ui
[3]) &&
2152 (wrap_mode_uses_border_color(state
->wrap_s
, linear_filter
) ||
2153 wrap_mode_uses_border_color(state
->wrap_t
, linear_filter
) ||
2154 wrap_mode_uses_border_color(state
->wrap_r
, linear_filter
));
2157 void r600_emit_shader(struct r600_context
*rctx
, struct r600_atom
*a
)
2160 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
2161 struct r600_pipe_shader
*shader
= ((struct r600_shader_state
*)a
)->shader
;
2166 r600_emit_command_buffer(cs
, &shader
->command_buffer
);
2167 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2168 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, shader
->bo
,
2169 RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
));
2172 unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format
,
2173 const unsigned char *swizzle_view
,
2177 unsigned char swizzle
[4];
2178 unsigned result
= 0;
2179 const uint32_t tex_swizzle_shift
[4] = {
2182 const uint32_t vtx_swizzle_shift
[4] = {
2185 const uint32_t swizzle_bit
[4] = {
2188 const uint32_t *swizzle_shift
= tex_swizzle_shift
;
2191 swizzle_shift
= vtx_swizzle_shift
;
2194 util_format_compose_swizzles(swizzle_format
, swizzle_view
, swizzle
);
2196 memcpy(swizzle
, swizzle_format
, 4);
2200 for (i
= 0; i
< 4; i
++) {
2201 switch (swizzle
[i
]) {
2202 case PIPE_SWIZZLE_Y
:
2203 result
|= swizzle_bit
[1] << swizzle_shift
[i
];
2205 case PIPE_SWIZZLE_Z
:
2206 result
|= swizzle_bit
[2] << swizzle_shift
[i
];
2208 case PIPE_SWIZZLE_W
:
2209 result
|= swizzle_bit
[3] << swizzle_shift
[i
];
2211 case PIPE_SWIZZLE_0
:
2212 result
|= V_038010_SQ_SEL_0
<< swizzle_shift
[i
];
2214 case PIPE_SWIZZLE_1
:
2215 result
|= V_038010_SQ_SEL_1
<< swizzle_shift
[i
];
2217 default: /* PIPE_SWIZZLE_X */
2218 result
|= swizzle_bit
[0] << swizzle_shift
[i
];
2224 /* texture format translate */
2225 uint32_t r600_translate_texformat(struct pipe_screen
*screen
,
2226 enum pipe_format format
,
2227 const unsigned char *swizzle_view
,
2228 uint32_t *word4_p
, uint32_t *yuv_format_p
,
2229 bool do_endian_swap
)
2231 struct r600_screen
*rscreen
= (struct r600_screen
*)screen
;
2232 uint32_t result
= 0, word4
= 0, yuv_format
= 0;
2233 const struct util_format_description
*desc
;
2234 boolean uniform
= TRUE
;
2235 bool is_srgb_valid
= FALSE
;
2236 const unsigned char swizzle_xxxx
[4] = {0, 0, 0, 0};
2237 const unsigned char swizzle_yyyy
[4] = {1, 1, 1, 1};
2238 const unsigned char swizzle_xxxy
[4] = {0, 0, 0, 1};
2239 const unsigned char swizzle_zyx1
[4] = {2, 1, 0, 5};
2240 const unsigned char swizzle_zyxw
[4] = {2, 1, 0, 3};
2243 const uint32_t sign_bit
[4] = {
2244 S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED
),
2245 S_038010_FORMAT_COMP_Y(V_038010_SQ_FORMAT_COMP_SIGNED
),
2246 S_038010_FORMAT_COMP_Z(V_038010_SQ_FORMAT_COMP_SIGNED
),
2247 S_038010_FORMAT_COMP_W(V_038010_SQ_FORMAT_COMP_SIGNED
)
2250 /* Need to replace the specified texture formats in case of big-endian.
2251 * These formats are formats that have channels with number of bits
2252 * not divisible by 8.
2253 * Mesa conversion functions don't swap bits for those formats, and because
2254 * we transmit this over a serial bus to the GPU (PCIe), the
2255 * bit-endianess is important!!!
2256 * In case we have an "opposite" format, just use that for the swizzling
2257 * information. If we don't have such an "opposite" format, we need
2258 * to use a fixed swizzle info instead (see below)
2260 if (format
== PIPE_FORMAT_R4A4_UNORM
&& do_endian_swap
)
2261 format
= PIPE_FORMAT_A4R4_UNORM
;
2263 desc
= util_format_description(format
);
2265 /* Depth and stencil swizzling is handled separately. */
2266 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_ZS
) {
2267 /* Need to check for specific texture formats that don't have
2268 * an "opposite" format we can use. For those formats, we directly
2269 * specify the swizzling, which is the LE swizzling as defined in
2272 if (do_endian_swap
) {
2273 if (format
== PIPE_FORMAT_L4A4_UNORM
)
2274 word4
|= r600_get_swizzle_combined(swizzle_xxxy
, swizzle_view
, FALSE
);
2275 else if (format
== PIPE_FORMAT_B4G4R4A4_UNORM
)
2276 word4
|= r600_get_swizzle_combined(swizzle_zyxw
, swizzle_view
, FALSE
);
2277 else if (format
== PIPE_FORMAT_B4G4R4X4_UNORM
|| format
== PIPE_FORMAT_B5G6R5_UNORM
)
2278 word4
|= r600_get_swizzle_combined(swizzle_zyx1
, swizzle_view
, FALSE
);
2280 word4
|= r600_get_swizzle_combined(desc
->swizzle
, swizzle_view
, FALSE
);
2282 word4
|= r600_get_swizzle_combined(desc
->swizzle
, swizzle_view
, FALSE
);
2286 /* Colorspace (return non-RGB formats directly). */
2287 switch (desc
->colorspace
) {
2288 /* Depth stencil formats */
2289 case UTIL_FORMAT_COLORSPACE_ZS
:
2291 /* Depth sampler formats. */
2292 case PIPE_FORMAT_Z16_UNORM
:
2293 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2296 case PIPE_FORMAT_Z24X8_UNORM
:
2297 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
2298 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2301 case PIPE_FORMAT_X8Z24_UNORM
:
2302 case PIPE_FORMAT_S8_UINT_Z24_UNORM
:
2303 if (rscreen
->b
.chip_class
< EVERGREEN
)
2305 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
2308 case PIPE_FORMAT_Z32_FLOAT
:
2309 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2310 result
= FMT_32_FLOAT
;
2312 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
2313 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2314 result
= FMT_X24_8_32_FLOAT
;
2316 /* Stencil sampler formats. */
2317 case PIPE_FORMAT_S8_UINT
:
2318 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2319 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2322 case PIPE_FORMAT_X24S8_UINT
:
2323 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2324 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
2327 case PIPE_FORMAT_S8X24_UINT
:
2328 if (rscreen
->b
.chip_class
< EVERGREEN
)
2330 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2331 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2334 case PIPE_FORMAT_X32_S8X24_UINT
:
2335 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2336 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
2337 result
= FMT_X24_8_32_FLOAT
;
2343 case UTIL_FORMAT_COLORSPACE_YUV
:
2344 yuv_format
|= (1 << 30);
2346 case PIPE_FORMAT_UYVY
:
2347 case PIPE_FORMAT_YUYV
:
2351 goto out_unknown
; /* XXX */
2353 case UTIL_FORMAT_COLORSPACE_SRGB
:
2354 word4
|= S_038010_FORCE_DEGAMMA(1);
2361 if (desc
->layout
== UTIL_FORMAT_LAYOUT_RGTC
) {
2363 case PIPE_FORMAT_RGTC1_SNORM
:
2364 case PIPE_FORMAT_LATC1_SNORM
:
2365 word4
|= sign_bit
[0];
2366 case PIPE_FORMAT_RGTC1_UNORM
:
2367 case PIPE_FORMAT_LATC1_UNORM
:
2370 case PIPE_FORMAT_RGTC2_SNORM
:
2371 case PIPE_FORMAT_LATC2_SNORM
:
2372 word4
|= sign_bit
[0] | sign_bit
[1];
2373 case PIPE_FORMAT_RGTC2_UNORM
:
2374 case PIPE_FORMAT_LATC2_UNORM
:
2382 if (desc
->layout
== UTIL_FORMAT_LAYOUT_S3TC
) {
2383 if (!util_format_s3tc_enabled
) {
2388 case PIPE_FORMAT_DXT1_RGB
:
2389 case PIPE_FORMAT_DXT1_RGBA
:
2390 case PIPE_FORMAT_DXT1_SRGB
:
2391 case PIPE_FORMAT_DXT1_SRGBA
:
2393 is_srgb_valid
= TRUE
;
2395 case PIPE_FORMAT_DXT3_RGBA
:
2396 case PIPE_FORMAT_DXT3_SRGBA
:
2398 is_srgb_valid
= TRUE
;
2400 case PIPE_FORMAT_DXT5_RGBA
:
2401 case PIPE_FORMAT_DXT5_SRGBA
:
2403 is_srgb_valid
= TRUE
;
2410 if (desc
->layout
== UTIL_FORMAT_LAYOUT_BPTC
) {
2411 if (rscreen
->b
.chip_class
< EVERGREEN
)
2415 case PIPE_FORMAT_BPTC_RGBA_UNORM
:
2416 case PIPE_FORMAT_BPTC_SRGBA
:
2418 is_srgb_valid
= TRUE
;
2420 case PIPE_FORMAT_BPTC_RGB_FLOAT
:
2421 word4
|= sign_bit
[0] | sign_bit
[1] | sign_bit
[2];
2423 case PIPE_FORMAT_BPTC_RGB_UFLOAT
:
2431 if (desc
->layout
== UTIL_FORMAT_LAYOUT_SUBSAMPLED
) {
2433 case PIPE_FORMAT_R8G8_B8G8_UNORM
:
2434 case PIPE_FORMAT_G8R8_B8R8_UNORM
:
2437 case PIPE_FORMAT_G8R8_G8B8_UNORM
:
2438 case PIPE_FORMAT_R8G8_R8B8_UNORM
:
2446 if (format
== PIPE_FORMAT_R9G9B9E5_FLOAT
) {
2447 result
= FMT_5_9_9_9_SHAREDEXP
;
2449 } else if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) {
2450 result
= FMT_10_11_11_FLOAT
;
2455 for (i
= 0; i
< desc
->nr_channels
; i
++) {
2456 if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_SIGNED
) {
2457 word4
|= sign_bit
[i
];
2461 /* R8G8Bx_SNORM - XXX CxV8U8 */
2463 /* See whether the components are of the same size. */
2464 for (i
= 1; i
< desc
->nr_channels
; i
++) {
2465 uniform
= uniform
&& desc
->channel
[0].size
== desc
->channel
[i
].size
;
2468 /* Non-uniform formats. */
2470 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
&&
2471 desc
->channel
[0].pure_integer
)
2472 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2473 switch(desc
->nr_channels
) {
2475 if (desc
->channel
[0].size
== 5 &&
2476 desc
->channel
[1].size
== 6 &&
2477 desc
->channel
[2].size
== 5) {
2483 if (desc
->channel
[0].size
== 5 &&
2484 desc
->channel
[1].size
== 5 &&
2485 desc
->channel
[2].size
== 5 &&
2486 desc
->channel
[3].size
== 1) {
2487 result
= FMT_1_5_5_5
;
2490 if (desc
->channel
[0].size
== 10 &&
2491 desc
->channel
[1].size
== 10 &&
2492 desc
->channel
[2].size
== 10 &&
2493 desc
->channel
[3].size
== 2) {
2494 result
= FMT_2_10_10_10
;
2502 /* Find the first non-VOID channel. */
2503 for (i
= 0; i
< 4; i
++) {
2504 if (desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
) {
2512 /* uniform formats */
2513 switch (desc
->channel
[i
].type
) {
2514 case UTIL_FORMAT_TYPE_UNSIGNED
:
2515 case UTIL_FORMAT_TYPE_SIGNED
:
2517 if (!desc
->channel
[i
].normalized
&&
2518 desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
) {
2522 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
&&
2523 desc
->channel
[i
].pure_integer
)
2524 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2526 switch (desc
->channel
[i
].size
) {
2528 switch (desc
->nr_channels
) {
2533 result
= FMT_4_4_4_4
;
2538 switch (desc
->nr_channels
) {
2546 result
= FMT_8_8_8_8
;
2547 is_srgb_valid
= TRUE
;
2552 switch (desc
->nr_channels
) {
2560 result
= FMT_16_16_16_16
;
2565 switch (desc
->nr_channels
) {
2573 result
= FMT_32_32_32_32
;
2579 case UTIL_FORMAT_TYPE_FLOAT
:
2580 switch (desc
->channel
[i
].size
) {
2582 switch (desc
->nr_channels
) {
2584 result
= FMT_16_FLOAT
;
2587 result
= FMT_16_16_FLOAT
;
2590 result
= FMT_16_16_16_16_FLOAT
;
2595 switch (desc
->nr_channels
) {
2597 result
= FMT_32_FLOAT
;
2600 result
= FMT_32_32_FLOAT
;
2603 result
= FMT_32_32_32_32_FLOAT
;
2612 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
&& !is_srgb_valid
)
2617 *yuv_format_p
= yuv_format
;
2620 /* R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format)); */
2624 uint32_t r600_translate_colorformat(enum chip_class chip
, enum pipe_format format
,
2625 bool do_endian_swap
)
2627 const struct util_format_description
*desc
= util_format_description(format
);
2628 int channel
= util_format_get_first_non_void_channel(format
);
2631 #define HAS_SIZE(x,y,z,w) \
2632 (desc->channel[0].size == (x) && desc->channel[1].size == (y) && \
2633 desc->channel[2].size == (z) && desc->channel[3].size == (w))
2635 if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) /* isn't plain */
2636 return V_0280A0_COLOR_10_11_11_FLOAT
;
2638 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
||
2642 is_float
= desc
->channel
[channel
].type
== UTIL_FORMAT_TYPE_FLOAT
;
2644 switch (desc
->nr_channels
) {
2646 switch (desc
->channel
[0].size
) {
2648 return V_0280A0_COLOR_8
;
2651 return V_0280A0_COLOR_16_FLOAT
;
2653 return V_0280A0_COLOR_16
;
2656 return V_0280A0_COLOR_32_FLOAT
;
2658 return V_0280A0_COLOR_32
;
2662 if (desc
->channel
[0].size
== desc
->channel
[1].size
) {
2663 switch (desc
->channel
[0].size
) {
2666 return V_0280A0_COLOR_4_4
;
2668 return ~0U; /* removed on Evergreen */
2670 return V_0280A0_COLOR_8_8
;
2673 return V_0280A0_COLOR_16_16_FLOAT
;
2675 return V_0280A0_COLOR_16_16
;
2678 return V_0280A0_COLOR_32_32_FLOAT
;
2680 return V_0280A0_COLOR_32_32
;
2682 } else if (HAS_SIZE(8,24,0,0)) {
2683 return (do_endian_swap
? V_0280A0_COLOR_8_24
: V_0280A0_COLOR_24_8
);
2684 } else if (HAS_SIZE(24,8,0,0)) {
2685 return V_0280A0_COLOR_8_24
;
2689 if (HAS_SIZE(5,6,5,0)) {
2690 return V_0280A0_COLOR_5_6_5
;
2691 } else if (HAS_SIZE(32,8,24,0)) {
2692 return V_0280A0_COLOR_X24_8_32_FLOAT
;
2696 if (desc
->channel
[0].size
== desc
->channel
[1].size
&&
2697 desc
->channel
[0].size
== desc
->channel
[2].size
&&
2698 desc
->channel
[0].size
== desc
->channel
[3].size
) {
2699 switch (desc
->channel
[0].size
) {
2701 return V_0280A0_COLOR_4_4_4_4
;
2703 return V_0280A0_COLOR_8_8_8_8
;
2706 return V_0280A0_COLOR_16_16_16_16_FLOAT
;
2708 return V_0280A0_COLOR_16_16_16_16
;
2711 return V_0280A0_COLOR_32_32_32_32_FLOAT
;
2713 return V_0280A0_COLOR_32_32_32_32
;
2715 } else if (HAS_SIZE(5,5,5,1)) {
2716 return V_0280A0_COLOR_1_5_5_5
;
2717 } else if (HAS_SIZE(10,10,10,2)) {
2718 return V_0280A0_COLOR_2_10_10_10
;
2725 uint32_t r600_colorformat_endian_swap(uint32_t colorformat
, bool do_endian_swap
)
2727 if (R600_BIG_ENDIAN
) {
2728 switch(colorformat
) {
2729 /* 8-bit buffers. */
2730 case V_0280A0_COLOR_4_4
:
2731 case V_0280A0_COLOR_8
:
2734 /* 16-bit buffers. */
2735 case V_0280A0_COLOR_8_8
:
2737 * No need to do endian swaps on array formats,
2738 * as mesa<-->pipe formats conversion take into account
2743 case V_0280A0_COLOR_5_6_5
:
2744 case V_0280A0_COLOR_1_5_5_5
:
2745 case V_0280A0_COLOR_4_4_4_4
:
2746 case V_0280A0_COLOR_16
:
2747 return (do_endian_swap
? ENDIAN_8IN16
: ENDIAN_NONE
);
2749 /* 32-bit buffers. */
2750 case V_0280A0_COLOR_8_8_8_8
:
2752 * No need to do endian swaps on array formats,
2753 * as mesa<-->pipe formats conversion take into account
2758 case V_0280A0_COLOR_2_10_10_10
:
2759 case V_0280A0_COLOR_8_24
:
2760 case V_0280A0_COLOR_24_8
:
2761 case V_0280A0_COLOR_32_FLOAT
:
2762 return (do_endian_swap
? ENDIAN_8IN32
: ENDIAN_NONE
);
2764 case V_0280A0_COLOR_16_16_FLOAT
:
2765 case V_0280A0_COLOR_16_16
:
2766 return ENDIAN_8IN16
;
2768 /* 64-bit buffers. */
2769 case V_0280A0_COLOR_16_16_16_16
:
2770 case V_0280A0_COLOR_16_16_16_16_FLOAT
:
2771 return ENDIAN_8IN16
;
2773 case V_0280A0_COLOR_32_32_FLOAT
:
2774 case V_0280A0_COLOR_32_32
:
2775 case V_0280A0_COLOR_X24_8_32_FLOAT
:
2776 return ENDIAN_8IN32
;
2778 /* 128-bit buffers. */
2779 case V_0280A0_COLOR_32_32_32_32_FLOAT
:
2780 case V_0280A0_COLOR_32_32_32_32
:
2781 return ENDIAN_8IN32
;
2783 return ENDIAN_NONE
; /* Unsupported. */
2790 static void r600_invalidate_buffer(struct pipe_context
*ctx
, struct pipe_resource
*buf
)
2792 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2793 struct r600_resource
*rbuffer
= r600_resource(buf
);
2794 unsigned i
, shader
, mask
;
2795 struct r600_pipe_sampler_view
*view
;
2797 /* Reallocate the buffer in the same pipe_resource. */
2798 r600_alloc_resource(&rctx
->screen
->b
, rbuffer
);
2800 /* We changed the buffer, now we need to bind it where the old one was bound. */
2801 /* Vertex buffers. */
2802 mask
= rctx
->vertex_buffer_state
.enabled_mask
;
2804 i
= u_bit_scan(&mask
);
2805 if (rctx
->vertex_buffer_state
.vb
[i
].buffer
== &rbuffer
->b
.b
) {
2806 rctx
->vertex_buffer_state
.dirty_mask
|= 1 << i
;
2807 r600_vertex_buffers_dirty(rctx
);
2810 /* Streamout buffers. */
2811 for (i
= 0; i
< rctx
->b
.streamout
.num_targets
; i
++) {
2812 if (rctx
->b
.streamout
.targets
[i
] &&
2813 rctx
->b
.streamout
.targets
[i
]->b
.buffer
== &rbuffer
->b
.b
) {
2814 if (rctx
->b
.streamout
.begin_emitted
) {
2815 r600_emit_streamout_end(&rctx
->b
);
2817 rctx
->b
.streamout
.append_bitmask
= rctx
->b
.streamout
.enabled_mask
;
2818 r600_streamout_buffers_dirty(&rctx
->b
);
2822 /* Constant buffers. */
2823 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
2824 struct r600_constbuf_state
*state
= &rctx
->constbuf_state
[shader
];
2826 uint32_t mask
= state
->enabled_mask
;
2829 unsigned i
= u_bit_scan(&mask
);
2830 if (state
->cb
[i
].buffer
== &rbuffer
->b
.b
) {
2832 state
->dirty_mask
|= 1 << i
;
2836 r600_constant_buffers_dirty(rctx
, state
);
2840 /* Texture buffer objects - update the virtual addresses in descriptors. */
2841 LIST_FOR_EACH_ENTRY(view
, &rctx
->texture_buffers
, list
) {
2842 if (view
->base
.texture
== &rbuffer
->b
.b
) {
2843 uint64_t offset
= view
->base
.u
.buf
.offset
;
2844 uint64_t va
= rbuffer
->gpu_address
+ offset
;
2846 view
->tex_resource_words
[0] = va
;
2847 view
->tex_resource_words
[2] &= C_038008_BASE_ADDRESS_HI
;
2848 view
->tex_resource_words
[2] |= S_038008_BASE_ADDRESS_HI(va
>> 32);
2851 /* Texture buffer objects - make bindings dirty if needed. */
2852 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
2853 struct r600_samplerview_state
*state
= &rctx
->samplers
[shader
].views
;
2855 uint32_t mask
= state
->enabled_mask
;
2858 unsigned i
= u_bit_scan(&mask
);
2859 if (state
->views
[i
]->base
.texture
== &rbuffer
->b
.b
) {
2861 state
->dirty_mask
|= 1 << i
;
2865 r600_sampler_views_dirty(rctx
, state
);
2870 static void r600_set_active_query_state(struct pipe_context
*ctx
, boolean enable
)
2872 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2874 /* Pipeline stat & streamout queries. */
2876 rctx
->b
.flags
&= ~R600_CONTEXT_STOP_PIPELINE_STATS
;
2877 rctx
->b
.flags
|= R600_CONTEXT_START_PIPELINE_STATS
;
2879 rctx
->b
.flags
&= ~R600_CONTEXT_START_PIPELINE_STATS
;
2880 rctx
->b
.flags
|= R600_CONTEXT_STOP_PIPELINE_STATS
;
2883 /* Occlusion queries. */
2884 if (rctx
->db_misc_state
.occlusion_queries_disabled
!= !enable
) {
2885 rctx
->db_misc_state
.occlusion_queries_disabled
= !enable
;
2886 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
2890 static void r600_set_occlusion_query_state(struct pipe_context
*ctx
, bool enable
)
2892 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2894 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
2897 static void r600_need_gfx_cs_space(struct pipe_context
*ctx
, unsigned num_dw
,
2898 bool include_draw_vbo
)
2900 r600_need_cs_space((struct r600_context
*)ctx
, num_dw
, include_draw_vbo
);
2903 /* keep this at the end of this file, please */
2904 void r600_init_common_state_functions(struct r600_context
*rctx
)
2906 rctx
->b
.b
.create_fs_state
= r600_create_ps_state
;
2907 rctx
->b
.b
.create_vs_state
= r600_create_vs_state
;
2908 rctx
->b
.b
.create_gs_state
= r600_create_gs_state
;
2909 rctx
->b
.b
.create_tcs_state
= r600_create_tcs_state
;
2910 rctx
->b
.b
.create_tes_state
= r600_create_tes_state
;
2911 rctx
->b
.b
.create_vertex_elements_state
= r600_create_vertex_fetch_shader
;
2912 rctx
->b
.b
.bind_blend_state
= r600_bind_blend_state
;
2913 rctx
->b
.b
.bind_depth_stencil_alpha_state
= r600_bind_dsa_state
;
2914 rctx
->b
.b
.bind_sampler_states
= r600_bind_sampler_states
;
2915 rctx
->b
.b
.bind_fs_state
= r600_bind_ps_state
;
2916 rctx
->b
.b
.bind_rasterizer_state
= r600_bind_rs_state
;
2917 rctx
->b
.b
.bind_vertex_elements_state
= r600_bind_vertex_elements
;
2918 rctx
->b
.b
.bind_vs_state
= r600_bind_vs_state
;
2919 rctx
->b
.b
.bind_gs_state
= r600_bind_gs_state
;
2920 rctx
->b
.b
.bind_tcs_state
= r600_bind_tcs_state
;
2921 rctx
->b
.b
.bind_tes_state
= r600_bind_tes_state
;
2922 rctx
->b
.b
.delete_blend_state
= r600_delete_blend_state
;
2923 rctx
->b
.b
.delete_depth_stencil_alpha_state
= r600_delete_dsa_state
;
2924 rctx
->b
.b
.delete_fs_state
= r600_delete_ps_state
;
2925 rctx
->b
.b
.delete_rasterizer_state
= r600_delete_rs_state
;
2926 rctx
->b
.b
.delete_sampler_state
= r600_delete_sampler_state
;
2927 rctx
->b
.b
.delete_vertex_elements_state
= r600_delete_vertex_elements
;
2928 rctx
->b
.b
.delete_vs_state
= r600_delete_vs_state
;
2929 rctx
->b
.b
.delete_gs_state
= r600_delete_gs_state
;
2930 rctx
->b
.b
.delete_tcs_state
= r600_delete_tcs_state
;
2931 rctx
->b
.b
.delete_tes_state
= r600_delete_tes_state
;
2932 rctx
->b
.b
.set_blend_color
= r600_set_blend_color
;
2933 rctx
->b
.b
.set_clip_state
= r600_set_clip_state
;
2934 rctx
->b
.b
.set_constant_buffer
= r600_set_constant_buffer
;
2935 rctx
->b
.b
.set_sample_mask
= r600_set_sample_mask
;
2936 rctx
->b
.b
.set_stencil_ref
= r600_set_pipe_stencil_ref
;
2937 rctx
->b
.b
.set_vertex_buffers
= r600_set_vertex_buffers
;
2938 rctx
->b
.b
.set_index_buffer
= r600_set_index_buffer
;
2939 rctx
->b
.b
.set_sampler_views
= r600_set_sampler_views
;
2940 rctx
->b
.b
.sampler_view_destroy
= r600_sampler_view_destroy
;
2941 rctx
->b
.b
.texture_barrier
= r600_texture_barrier
;
2942 rctx
->b
.b
.set_stream_output_targets
= r600_set_streamout_targets
;
2943 rctx
->b
.b
.set_active_query_state
= r600_set_active_query_state
;
2944 rctx
->b
.b
.draw_vbo
= r600_draw_vbo
;
2945 rctx
->b
.invalidate_buffer
= r600_invalidate_buffer
;
2946 rctx
->b
.set_occlusion_query_state
= r600_set_occlusion_query_state
;
2947 rctx
->b
.need_gfx_cs_space
= r600_need_gfx_cs_space
;