2 * Copyright 2010 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie <airlied@redhat.com>
25 * Jerome Glisse <jglisse@redhat.com>
27 #include "r600_formats.h"
28 #include "r600_shader.h"
31 #include "util/u_format_s3tc.h"
32 #include "util/u_index_modify.h"
33 #include "util/u_memory.h"
34 #include "util/u_upload_mgr.h"
35 #include "util/u_math.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "tgsi/tgsi_ureg.h"
40 void r600_init_command_buffer(struct r600_command_buffer
*cb
, unsigned num_dw
)
43 cb
->buf
= CALLOC(1, 4 * num_dw
);
44 cb
->max_num_dw
= num_dw
;
47 void r600_release_command_buffer(struct r600_command_buffer
*cb
)
52 void r600_add_atom(struct r600_context
*rctx
,
53 struct r600_atom
*atom
,
56 assert(id
< R600_NUM_ATOMS
);
57 assert(rctx
->atoms
[id
] == NULL
);
58 rctx
->atoms
[id
] = atom
;
62 void r600_init_atom(struct r600_context
*rctx
,
63 struct r600_atom
*atom
,
65 void (*emit
)(struct r600_context
*ctx
, struct r600_atom
*state
),
68 atom
->emit
= (void*)emit
;
69 atom
->num_dw
= num_dw
;
70 r600_add_atom(rctx
, atom
, id
);
73 void r600_emit_cso_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
75 r600_emit_command_buffer(rctx
->b
.gfx
.cs
, ((struct r600_cso_state
*)atom
)->cb
);
78 void r600_emit_alphatest_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
80 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
81 struct r600_alphatest_state
*a
= (struct r600_alphatest_state
*)atom
;
82 unsigned alpha_ref
= a
->sx_alpha_ref
;
84 if (rctx
->b
.chip_class
>= EVERGREEN
&& a
->cb0_export_16bpc
) {
88 radeon_set_context_reg(cs
, R_028410_SX_ALPHA_TEST_CONTROL
,
89 a
->sx_alpha_test_control
|
90 S_028410_ALPHA_TEST_BYPASS(a
->bypass
));
91 radeon_set_context_reg(cs
, R_028438_SX_ALPHA_REF
, alpha_ref
);
94 static void r600_texture_barrier(struct pipe_context
*ctx
)
96 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
98 rctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
|
99 R600_CONTEXT_FLUSH_AND_INV_CB
|
100 R600_CONTEXT_FLUSH_AND_INV
|
101 R600_CONTEXT_WAIT_3D_IDLE
;
104 static unsigned r600_conv_pipe_prim(unsigned prim
)
106 static const unsigned prim_conv
[] = {
107 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
108 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
109 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
110 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
111 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
112 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
113 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
114 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
115 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
116 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
117 [PIPE_PRIM_LINES_ADJACENCY
] = V_008958_DI_PT_LINELIST_ADJ
,
118 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_008958_DI_PT_LINESTRIP_ADJ
,
119 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_008958_DI_PT_TRILIST_ADJ
,
120 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_008958_DI_PT_TRISTRIP_ADJ
,
121 [PIPE_PRIM_PATCHES
] = V_008958_DI_PT_PATCH
,
122 [R600_PRIM_RECTANGLE_LIST
] = V_008958_DI_PT_RECTLIST
124 assert(prim
< ARRAY_SIZE(prim_conv
));
125 return prim_conv
[prim
];
128 unsigned r600_conv_prim_to_gs_out(unsigned mode
)
130 static const int prim_conv
[] = {
131 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
132 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
133 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
134 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
135 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
136 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
137 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
138 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
139 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
140 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
141 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
142 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
143 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
144 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
145 [PIPE_PRIM_PATCHES
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
146 [R600_PRIM_RECTANGLE_LIST
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
148 assert(mode
< ARRAY_SIZE(prim_conv
));
150 return prim_conv
[mode
];
153 /* common state between evergreen and r600 */
155 static void r600_bind_blend_state_internal(struct r600_context
*rctx
,
156 struct r600_blend_state
*blend
, bool blend_disable
)
158 unsigned color_control
;
159 bool update_cb
= false;
161 rctx
->alpha_to_one
= blend
->alpha_to_one
;
162 rctx
->dual_src_blend
= blend
->dual_src_blend
;
164 if (!blend_disable
) {
165 r600_set_cso_state_with_cb(rctx
, &rctx
->blend_state
, blend
, &blend
->buffer
);
166 color_control
= blend
->cb_color_control
;
168 /* Blending is disabled. */
169 r600_set_cso_state_with_cb(rctx
, &rctx
->blend_state
, blend
, &blend
->buffer_no_blend
);
170 color_control
= blend
->cb_color_control_no_blend
;
173 /* Update derived states. */
174 if (rctx
->cb_misc_state
.blend_colormask
!= blend
->cb_target_mask
) {
175 rctx
->cb_misc_state
.blend_colormask
= blend
->cb_target_mask
;
178 if (rctx
->b
.chip_class
<= R700
&&
179 rctx
->cb_misc_state
.cb_color_control
!= color_control
) {
180 rctx
->cb_misc_state
.cb_color_control
= color_control
;
183 if (rctx
->cb_misc_state
.dual_src_blend
!= blend
->dual_src_blend
) {
184 rctx
->cb_misc_state
.dual_src_blend
= blend
->dual_src_blend
;
188 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
192 static void r600_bind_blend_state(struct pipe_context
*ctx
, void *state
)
194 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
195 struct r600_blend_state
*blend
= (struct r600_blend_state
*)state
;
198 r600_set_cso_state_with_cb(rctx
, &rctx
->blend_state
, NULL
, NULL
);
202 r600_bind_blend_state_internal(rctx
, blend
, rctx
->force_blend_disable
);
205 static void r600_set_blend_color(struct pipe_context
*ctx
,
206 const struct pipe_blend_color
*state
)
208 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
210 rctx
->blend_color
.state
= *state
;
211 r600_mark_atom_dirty(rctx
, &rctx
->blend_color
.atom
);
214 void r600_emit_blend_color(struct r600_context
*rctx
, struct r600_atom
*atom
)
216 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
217 struct pipe_blend_color
*state
= &rctx
->blend_color
.state
;
219 radeon_set_context_reg_seq(cs
, R_028414_CB_BLEND_RED
, 4);
220 radeon_emit(cs
, fui(state
->color
[0])); /* R_028414_CB_BLEND_RED */
221 radeon_emit(cs
, fui(state
->color
[1])); /* R_028418_CB_BLEND_GREEN */
222 radeon_emit(cs
, fui(state
->color
[2])); /* R_02841C_CB_BLEND_BLUE */
223 radeon_emit(cs
, fui(state
->color
[3])); /* R_028420_CB_BLEND_ALPHA */
226 void r600_emit_vgt_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
228 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
229 struct r600_vgt_state
*a
= (struct r600_vgt_state
*)atom
;
231 radeon_set_context_reg(cs
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, a
->vgt_multi_prim_ib_reset_en
);
232 radeon_set_context_reg_seq(cs
, R_028408_VGT_INDX_OFFSET
, 2);
233 radeon_emit(cs
, a
->vgt_indx_offset
); /* R_028408_VGT_INDX_OFFSET */
234 radeon_emit(cs
, a
->vgt_multi_prim_ib_reset_indx
); /* R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX */
235 if (a
->last_draw_was_indirect
) {
236 a
->last_draw_was_indirect
= false;
237 radeon_set_ctl_const(cs
, R_03CFF0_SQ_VTX_BASE_VTX_LOC
, 0);
241 static void r600_set_clip_state(struct pipe_context
*ctx
,
242 const struct pipe_clip_state
*state
)
244 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
246 rctx
->clip_state
.state
= *state
;
247 r600_mark_atom_dirty(rctx
, &rctx
->clip_state
.atom
);
248 rctx
->driver_consts
[PIPE_SHADER_VERTEX
].vs_ucp_dirty
= true;
251 static void r600_set_stencil_ref(struct pipe_context
*ctx
,
252 const struct r600_stencil_ref
*state
)
254 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
256 rctx
->stencil_ref
.state
= *state
;
257 r600_mark_atom_dirty(rctx
, &rctx
->stencil_ref
.atom
);
260 void r600_emit_stencil_ref(struct r600_context
*rctx
, struct r600_atom
*atom
)
262 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
263 struct r600_stencil_ref_state
*a
= (struct r600_stencil_ref_state
*)atom
;
265 radeon_set_context_reg_seq(cs
, R_028430_DB_STENCILREFMASK
, 2);
266 radeon_emit(cs
, /* R_028430_DB_STENCILREFMASK */
267 S_028430_STENCILREF(a
->state
.ref_value
[0]) |
268 S_028430_STENCILMASK(a
->state
.valuemask
[0]) |
269 S_028430_STENCILWRITEMASK(a
->state
.writemask
[0]));
270 radeon_emit(cs
, /* R_028434_DB_STENCILREFMASK_BF */
271 S_028434_STENCILREF_BF(a
->state
.ref_value
[1]) |
272 S_028434_STENCILMASK_BF(a
->state
.valuemask
[1]) |
273 S_028434_STENCILWRITEMASK_BF(a
->state
.writemask
[1]));
276 static void r600_set_pipe_stencil_ref(struct pipe_context
*ctx
,
277 const struct pipe_stencil_ref
*state
)
279 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
280 struct r600_dsa_state
*dsa
= (struct r600_dsa_state
*)rctx
->dsa_state
.cso
;
281 struct r600_stencil_ref ref
;
283 rctx
->stencil_ref
.pipe_state
= *state
;
288 ref
.ref_value
[0] = state
->ref_value
[0];
289 ref
.ref_value
[1] = state
->ref_value
[1];
290 ref
.valuemask
[0] = dsa
->valuemask
[0];
291 ref
.valuemask
[1] = dsa
->valuemask
[1];
292 ref
.writemask
[0] = dsa
->writemask
[0];
293 ref
.writemask
[1] = dsa
->writemask
[1];
295 r600_set_stencil_ref(ctx
, &ref
);
298 static void r600_bind_dsa_state(struct pipe_context
*ctx
, void *state
)
300 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
301 struct r600_dsa_state
*dsa
= state
;
302 struct r600_stencil_ref ref
;
305 r600_set_cso_state_with_cb(rctx
, &rctx
->dsa_state
, NULL
, NULL
);
309 r600_set_cso_state_with_cb(rctx
, &rctx
->dsa_state
, dsa
, &dsa
->buffer
);
311 ref
.ref_value
[0] = rctx
->stencil_ref
.pipe_state
.ref_value
[0];
312 ref
.ref_value
[1] = rctx
->stencil_ref
.pipe_state
.ref_value
[1];
313 ref
.valuemask
[0] = dsa
->valuemask
[0];
314 ref
.valuemask
[1] = dsa
->valuemask
[1];
315 ref
.writemask
[0] = dsa
->writemask
[0];
316 ref
.writemask
[1] = dsa
->writemask
[1];
317 if (rctx
->zwritemask
!= dsa
->zwritemask
) {
318 rctx
->zwritemask
= dsa
->zwritemask
;
319 if (rctx
->b
.chip_class
>= EVERGREEN
) {
320 /* work around some issue when not writing to zbuffer
321 * we are having lockup on evergreen so do not enable
322 * hyperz when not writing zbuffer
324 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
328 r600_set_stencil_ref(ctx
, &ref
);
330 /* Update alphatest state. */
331 if (rctx
->alphatest_state
.sx_alpha_test_control
!= dsa
->sx_alpha_test_control
||
332 rctx
->alphatest_state
.sx_alpha_ref
!= dsa
->alpha_ref
) {
333 rctx
->alphatest_state
.sx_alpha_test_control
= dsa
->sx_alpha_test_control
;
334 rctx
->alphatest_state
.sx_alpha_ref
= dsa
->alpha_ref
;
335 r600_mark_atom_dirty(rctx
, &rctx
->alphatest_state
.atom
);
339 static void r600_bind_rs_state(struct pipe_context
*ctx
, void *state
)
341 struct r600_rasterizer_state
*rs
= (struct r600_rasterizer_state
*)state
;
342 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
347 rctx
->rasterizer
= rs
;
349 r600_set_cso_state_with_cb(rctx
, &rctx
->rasterizer_state
, rs
, &rs
->buffer
);
351 if (rs
->offset_enable
&&
352 (rs
->offset_units
!= rctx
->poly_offset_state
.offset_units
||
353 rs
->offset_scale
!= rctx
->poly_offset_state
.offset_scale
||
354 rs
->offset_units_unscaled
!= rctx
->poly_offset_state
.offset_units_unscaled
)) {
355 rctx
->poly_offset_state
.offset_units
= rs
->offset_units
;
356 rctx
->poly_offset_state
.offset_scale
= rs
->offset_scale
;
357 rctx
->poly_offset_state
.offset_units_unscaled
= rs
->offset_units_unscaled
;
358 r600_mark_atom_dirty(rctx
, &rctx
->poly_offset_state
.atom
);
361 /* Update clip_misc_state. */
362 if (rctx
->clip_misc_state
.pa_cl_clip_cntl
!= rs
->pa_cl_clip_cntl
||
363 rctx
->clip_misc_state
.clip_plane_enable
!= rs
->clip_plane_enable
) {
364 rctx
->clip_misc_state
.pa_cl_clip_cntl
= rs
->pa_cl_clip_cntl
;
365 rctx
->clip_misc_state
.clip_plane_enable
= rs
->clip_plane_enable
;
366 r600_mark_atom_dirty(rctx
, &rctx
->clip_misc_state
.atom
);
369 r600_set_scissor_enable(&rctx
->b
, rs
->scissor_enable
);
371 /* Re-emit PA_SC_LINE_STIPPLE. */
372 rctx
->last_primitive_type
= -1;
375 static void r600_delete_rs_state(struct pipe_context
*ctx
, void *state
)
377 struct r600_rasterizer_state
*rs
= (struct r600_rasterizer_state
*)state
;
379 r600_release_command_buffer(&rs
->buffer
);
383 static void r600_sampler_view_destroy(struct pipe_context
*ctx
,
384 struct pipe_sampler_view
*state
)
386 struct r600_pipe_sampler_view
*view
= (struct r600_pipe_sampler_view
*)state
;
388 if (view
->tex_resource
->gpu_address
&&
389 view
->tex_resource
->b
.b
.target
== PIPE_BUFFER
)
390 LIST_DELINIT(&view
->list
);
392 pipe_resource_reference(&state
->texture
, NULL
);
396 void r600_sampler_states_dirty(struct r600_context
*rctx
,
397 struct r600_sampler_states
*state
)
399 if (state
->dirty_mask
) {
400 if (state
->dirty_mask
& state
->has_bordercolor_mask
) {
401 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
404 util_bitcount(state
->dirty_mask
& state
->has_bordercolor_mask
) * 11 +
405 util_bitcount(state
->dirty_mask
& ~state
->has_bordercolor_mask
) * 5;
406 r600_mark_atom_dirty(rctx
, &state
->atom
);
410 static void r600_bind_sampler_states(struct pipe_context
*pipe
,
413 unsigned count
, void **states
)
415 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
416 struct r600_textures_info
*dst
= &rctx
->samplers
[shader
];
417 struct r600_pipe_sampler_state
**rstates
= (struct r600_pipe_sampler_state
**)states
;
418 int seamless_cube_map
= -1;
420 /* This sets 1-bit for states with index >= count. */
421 uint32_t disable_mask
= ~((1ull << count
) - 1);
422 /* These are the new states set by this function. */
423 uint32_t new_mask
= 0;
425 assert(start
== 0); /* XXX fix below */
432 for (i
= 0; i
< count
; i
++) {
433 struct r600_pipe_sampler_state
*rstate
= rstates
[i
];
435 if (rstate
== dst
->states
.states
[i
]) {
440 if (rstate
->border_color_use
) {
441 dst
->states
.has_bordercolor_mask
|= 1 << i
;
443 dst
->states
.has_bordercolor_mask
&= ~(1 << i
);
445 seamless_cube_map
= rstate
->seamless_cube_map
;
449 disable_mask
|= 1 << i
;
453 memcpy(dst
->states
.states
, rstates
, sizeof(void*) * count
);
454 memset(dst
->states
.states
+ count
, 0, sizeof(void*) * (NUM_TEX_UNITS
- count
));
456 dst
->states
.enabled_mask
&= ~disable_mask
;
457 dst
->states
.dirty_mask
&= dst
->states
.enabled_mask
;
458 dst
->states
.enabled_mask
|= new_mask
;
459 dst
->states
.dirty_mask
|= new_mask
;
460 dst
->states
.has_bordercolor_mask
&= dst
->states
.enabled_mask
;
462 r600_sampler_states_dirty(rctx
, &dst
->states
);
464 /* Seamless cubemap state. */
465 if (rctx
->b
.chip_class
<= R700
&&
466 seamless_cube_map
!= -1 &&
467 seamless_cube_map
!= rctx
->seamless_cube_map
.enabled
) {
468 /* change in TA_CNTL_AUX need a pipeline flush */
469 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
470 rctx
->seamless_cube_map
.enabled
= seamless_cube_map
;
471 r600_mark_atom_dirty(rctx
, &rctx
->seamless_cube_map
.atom
);
475 static void r600_delete_sampler_state(struct pipe_context
*ctx
, void *state
)
480 static void r600_delete_blend_state(struct pipe_context
*ctx
, void *state
)
482 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
483 struct r600_blend_state
*blend
= (struct r600_blend_state
*)state
;
485 if (rctx
->blend_state
.cso
== state
) {
486 ctx
->bind_blend_state(ctx
, NULL
);
489 r600_release_command_buffer(&blend
->buffer
);
490 r600_release_command_buffer(&blend
->buffer_no_blend
);
494 static void r600_delete_dsa_state(struct pipe_context
*ctx
, void *state
)
496 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
497 struct r600_dsa_state
*dsa
= (struct r600_dsa_state
*)state
;
499 if (rctx
->dsa_state
.cso
== state
) {
500 ctx
->bind_depth_stencil_alpha_state(ctx
, NULL
);
503 r600_release_command_buffer(&dsa
->buffer
);
507 static void r600_bind_vertex_elements(struct pipe_context
*ctx
, void *state
)
509 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
511 r600_set_cso_state(rctx
, &rctx
->vertex_fetch_shader
, state
);
514 static void r600_delete_vertex_elements(struct pipe_context
*ctx
, void *state
)
516 struct r600_fetch_shader
*shader
= (struct r600_fetch_shader
*)state
;
517 r600_resource_reference(&shader
->buffer
, NULL
);
521 static void r600_set_index_buffer(struct pipe_context
*ctx
,
522 const struct pipe_index_buffer
*ib
)
524 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
527 pipe_resource_reference(&rctx
->index_buffer
.buffer
, ib
->buffer
);
528 memcpy(&rctx
->index_buffer
, ib
, sizeof(*ib
));
529 r600_context_add_resource_size(ctx
, ib
->buffer
);
531 pipe_resource_reference(&rctx
->index_buffer
.buffer
, NULL
);
535 void r600_vertex_buffers_dirty(struct r600_context
*rctx
)
537 if (rctx
->vertex_buffer_state
.dirty_mask
) {
538 rctx
->vertex_buffer_state
.atom
.num_dw
= (rctx
->b
.chip_class
>= EVERGREEN
? 12 : 11) *
539 util_bitcount(rctx
->vertex_buffer_state
.dirty_mask
);
540 r600_mark_atom_dirty(rctx
, &rctx
->vertex_buffer_state
.atom
);
544 static void r600_set_vertex_buffers(struct pipe_context
*ctx
,
545 unsigned start_slot
, unsigned count
,
546 const struct pipe_vertex_buffer
*input
)
548 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
549 struct r600_vertexbuf_state
*state
= &rctx
->vertex_buffer_state
;
550 struct pipe_vertex_buffer
*vb
= state
->vb
+ start_slot
;
552 uint32_t disable_mask
= 0;
553 /* These are the new buffers set by this function. */
554 uint32_t new_buffer_mask
= 0;
556 /* Set vertex buffers. */
558 for (i
= 0; i
< count
; i
++) {
559 if (memcmp(&input
[i
], &vb
[i
], sizeof(struct pipe_vertex_buffer
))) {
560 if (input
[i
].buffer
) {
561 vb
[i
].stride
= input
[i
].stride
;
562 vb
[i
].buffer_offset
= input
[i
].buffer_offset
;
563 pipe_resource_reference(&vb
[i
].buffer
, input
[i
].buffer
);
564 new_buffer_mask
|= 1 << i
;
565 r600_context_add_resource_size(ctx
, input
[i
].buffer
);
567 pipe_resource_reference(&vb
[i
].buffer
, NULL
);
568 disable_mask
|= 1 << i
;
573 for (i
= 0; i
< count
; i
++) {
574 pipe_resource_reference(&vb
[i
].buffer
, NULL
);
576 disable_mask
= ((1ull << count
) - 1);
579 disable_mask
<<= start_slot
;
580 new_buffer_mask
<<= start_slot
;
582 rctx
->vertex_buffer_state
.enabled_mask
&= ~disable_mask
;
583 rctx
->vertex_buffer_state
.dirty_mask
&= rctx
->vertex_buffer_state
.enabled_mask
;
584 rctx
->vertex_buffer_state
.enabled_mask
|= new_buffer_mask
;
585 rctx
->vertex_buffer_state
.dirty_mask
|= new_buffer_mask
;
587 r600_vertex_buffers_dirty(rctx
);
590 void r600_sampler_views_dirty(struct r600_context
*rctx
,
591 struct r600_samplerview_state
*state
)
593 if (state
->dirty_mask
) {
594 state
->atom
.num_dw
= (rctx
->b
.chip_class
>= EVERGREEN
? 14 : 13) *
595 util_bitcount(state
->dirty_mask
);
596 r600_mark_atom_dirty(rctx
, &state
->atom
);
600 static void r600_set_sampler_views(struct pipe_context
*pipe
, unsigned shader
,
601 unsigned start
, unsigned count
,
602 struct pipe_sampler_view
**views
)
604 struct r600_context
*rctx
= (struct r600_context
*) pipe
;
605 struct r600_textures_info
*dst
= &rctx
->samplers
[shader
];
606 struct r600_pipe_sampler_view
**rviews
= (struct r600_pipe_sampler_view
**)views
;
607 uint32_t dirty_sampler_states_mask
= 0;
609 /* This sets 1-bit for textures with index >= count. */
610 uint32_t disable_mask
= ~((1ull << count
) - 1);
611 /* These are the new textures set by this function. */
612 uint32_t new_mask
= 0;
614 /* Set textures with index >= count to NULL. */
615 uint32_t remaining_mask
;
617 assert(start
== 0); /* XXX fix below */
624 remaining_mask
= dst
->views
.enabled_mask
& disable_mask
;
626 while (remaining_mask
) {
627 i
= u_bit_scan(&remaining_mask
);
628 assert(dst
->views
.views
[i
]);
630 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], NULL
);
633 for (i
= 0; i
< count
; i
++) {
634 if (rviews
[i
] == dst
->views
.views
[i
]) {
639 struct r600_texture
*rtex
=
640 (struct r600_texture
*)rviews
[i
]->base
.texture
;
641 bool is_buffer
= rviews
[i
]->base
.texture
->target
== PIPE_BUFFER
;
643 if (!is_buffer
&& rtex
->db_compatible
) {
644 dst
->views
.compressed_depthtex_mask
|= 1 << i
;
646 dst
->views
.compressed_depthtex_mask
&= ~(1 << i
);
649 /* Track compressed colorbuffers. */
650 if (!is_buffer
&& rtex
->cmask
.size
) {
651 dst
->views
.compressed_colortex_mask
|= 1 << i
;
653 dst
->views
.compressed_colortex_mask
&= ~(1 << i
);
656 /* Changing from array to non-arrays textures and vice versa requires
657 * updating TEX_ARRAY_OVERRIDE in sampler states on R6xx-R7xx. */
658 if (rctx
->b
.chip_class
<= R700
&&
659 (dst
->states
.enabled_mask
& (1 << i
)) &&
660 (rviews
[i
]->base
.texture
->target
== PIPE_TEXTURE_1D_ARRAY
||
661 rviews
[i
]->base
.texture
->target
== PIPE_TEXTURE_2D_ARRAY
) != dst
->is_array_sampler
[i
]) {
662 dirty_sampler_states_mask
|= 1 << i
;
665 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], views
[i
]);
667 r600_context_add_resource_size(pipe
, views
[i
]->texture
);
669 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], NULL
);
670 disable_mask
|= 1 << i
;
674 dst
->views
.enabled_mask
&= ~disable_mask
;
675 dst
->views
.dirty_mask
&= dst
->views
.enabled_mask
;
676 dst
->views
.enabled_mask
|= new_mask
;
677 dst
->views
.dirty_mask
|= new_mask
;
678 dst
->views
.compressed_depthtex_mask
&= dst
->views
.enabled_mask
;
679 dst
->views
.compressed_colortex_mask
&= dst
->views
.enabled_mask
;
680 dst
->views
.dirty_buffer_constants
= TRUE
;
681 r600_sampler_views_dirty(rctx
, &dst
->views
);
683 if (dirty_sampler_states_mask
) {
684 dst
->states
.dirty_mask
|= dirty_sampler_states_mask
;
685 r600_sampler_states_dirty(rctx
, &dst
->states
);
689 static void r600_update_compressed_colortex_mask(struct r600_samplerview_state
*views
)
691 uint32_t mask
= views
->enabled_mask
;
694 unsigned i
= u_bit_scan(&mask
);
695 struct pipe_resource
*res
= views
->views
[i
]->base
.texture
;
697 if (res
&& res
->target
!= PIPE_BUFFER
) {
698 struct r600_texture
*rtex
= (struct r600_texture
*)res
;
700 if (rtex
->cmask
.size
) {
701 views
->compressed_colortex_mask
|= 1 << i
;
703 views
->compressed_colortex_mask
&= ~(1 << i
);
709 /* Compute the key for the hw shader variant */
710 static inline union r600_shader_key
r600_shader_selector_key(struct pipe_context
* ctx
,
711 struct r600_pipe_shader_selector
* sel
)
713 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
714 union r600_shader_key key
;
715 memset(&key
, 0, sizeof(key
));
718 case PIPE_SHADER_VERTEX
: {
719 key
.vs
.as_ls
= (rctx
->tes_shader
!= NULL
);
721 key
.vs
.as_es
= (rctx
->gs_shader
!= NULL
);
723 if (rctx
->ps_shader
->current
->shader
.gs_prim_id_input
&& !rctx
->gs_shader
) {
724 key
.vs
.as_gs_a
= true;
725 key
.vs
.prim_id_out
= rctx
->ps_shader
->current
->shader
.input
[rctx
->ps_shader
->current
->shader
.ps_prim_id_input
].spi_sid
;
729 case PIPE_SHADER_GEOMETRY
:
731 case PIPE_SHADER_FRAGMENT
: {
732 key
.ps
.color_two_side
= rctx
->rasterizer
&& rctx
->rasterizer
->two_side
;
733 key
.ps
.alpha_to_one
= rctx
->alpha_to_one
&&
734 rctx
->rasterizer
&& rctx
->rasterizer
->multisample_enable
&&
735 !rctx
->framebuffer
.cb0_is_integer
;
736 key
.ps
.nr_cbufs
= rctx
->framebuffer
.state
.nr_cbufs
;
737 /* Dual-source blending only makes sense with nr_cbufs == 1. */
738 if (key
.ps
.nr_cbufs
== 1 && rctx
->dual_src_blend
)
742 case PIPE_SHADER_TESS_EVAL
:
743 key
.tes
.as_es
= (rctx
->gs_shader
!= NULL
);
745 case PIPE_SHADER_TESS_CTRL
:
746 key
.tcs
.prim_mode
= rctx
->tes_shader
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
755 /* Select the hw shader variant depending on the current state.
756 * (*dirty) is set to 1 if current variant was changed */
757 static int r600_shader_select(struct pipe_context
*ctx
,
758 struct r600_pipe_shader_selector
* sel
,
761 union r600_shader_key key
;
762 struct r600_pipe_shader
* shader
= NULL
;
765 memset(&key
, 0, sizeof(key
));
766 key
= r600_shader_selector_key(ctx
, sel
);
768 /* Check if we don't need to change anything.
769 * This path is also used for most shaders that don't need multiple
770 * variants, it will cost just a computation of the key and this
772 if (likely(sel
->current
&& memcmp(&sel
->current
->key
, &key
, sizeof(key
)) == 0)) {
776 /* lookup if we have other variants in the list */
777 if (sel
->num_shaders
> 1) {
778 struct r600_pipe_shader
*p
= sel
->current
, *c
= p
->next_variant
;
780 while (c
&& memcmp(&c
->key
, &key
, sizeof(key
)) != 0) {
786 p
->next_variant
= c
->next_variant
;
791 if (unlikely(!shader
)) {
792 shader
= CALLOC(1, sizeof(struct r600_pipe_shader
));
793 shader
->selector
= sel
;
795 r
= r600_pipe_shader_create(ctx
, shader
, key
);
797 R600_ERR("Failed to build shader variant (type=%u) %d\n",
804 /* We don't know the value of nr_ps_max_color_exports until we built
805 * at least one variant, so we may need to recompute the key after
806 * building first variant. */
807 if (sel
->type
== PIPE_SHADER_FRAGMENT
&&
808 sel
->num_shaders
== 0) {
809 sel
->nr_ps_max_color_exports
= shader
->shader
.nr_ps_max_color_exports
;
810 key
= r600_shader_selector_key(ctx
, sel
);
813 memcpy(&shader
->key
, &key
, sizeof(key
));
820 shader
->next_variant
= sel
->current
;
821 sel
->current
= shader
;
826 static void *r600_create_shader_state(struct pipe_context
*ctx
,
827 const struct pipe_shader_state
*state
,
828 unsigned pipe_shader_type
)
830 struct r600_pipe_shader_selector
*sel
= CALLOC_STRUCT(r600_pipe_shader_selector
);
833 sel
->type
= pipe_shader_type
;
834 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
835 sel
->so
= state
->stream_output
;
836 tgsi_scan_shader(state
->tokens
, &sel
->info
);
838 switch (pipe_shader_type
) {
839 case PIPE_SHADER_GEOMETRY
:
840 sel
->gs_output_prim
=
841 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
842 sel
->gs_max_out_vertices
=
843 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
844 sel
->gs_num_invocations
=
845 sel
->info
.properties
[TGSI_PROPERTY_GS_INVOCATIONS
];
847 case PIPE_SHADER_VERTEX
:
848 case PIPE_SHADER_TESS_CTRL
:
849 sel
->lds_patch_outputs_written_mask
= 0;
850 sel
->lds_outputs_written_mask
= 0;
852 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
853 unsigned name
= sel
->info
.output_semantic_name
[i
];
854 unsigned index
= sel
->info
.output_semantic_index
[i
];
857 case TGSI_SEMANTIC_TESSINNER
:
858 case TGSI_SEMANTIC_TESSOUTER
:
859 case TGSI_SEMANTIC_PATCH
:
860 sel
->lds_patch_outputs_written_mask
|=
861 1llu << r600_get_lds_unique_index(name
, index
);
864 sel
->lds_outputs_written_mask
|=
865 1llu << r600_get_lds_unique_index(name
, index
);
876 static void *r600_create_ps_state(struct pipe_context
*ctx
,
877 const struct pipe_shader_state
*state
)
879 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_FRAGMENT
);
882 static void *r600_create_vs_state(struct pipe_context
*ctx
,
883 const struct pipe_shader_state
*state
)
885 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_VERTEX
);
888 static void *r600_create_gs_state(struct pipe_context
*ctx
,
889 const struct pipe_shader_state
*state
)
891 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_GEOMETRY
);
894 static void *r600_create_tcs_state(struct pipe_context
*ctx
,
895 const struct pipe_shader_state
*state
)
897 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_TESS_CTRL
);
900 static void *r600_create_tes_state(struct pipe_context
*ctx
,
901 const struct pipe_shader_state
*state
)
903 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_TESS_EVAL
);
906 static void r600_bind_ps_state(struct pipe_context
*ctx
, void *state
)
908 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
911 state
= rctx
->dummy_pixel_shader
;
913 rctx
->ps_shader
= (struct r600_pipe_shader_selector
*)state
;
916 static struct tgsi_shader_info
*r600_get_vs_info(struct r600_context
*rctx
)
919 return &rctx
->gs_shader
->info
;
920 else if (rctx
->tes_shader
)
921 return &rctx
->tes_shader
->info
;
922 else if (rctx
->vs_shader
)
923 return &rctx
->vs_shader
->info
;
928 static void r600_bind_vs_state(struct pipe_context
*ctx
, void *state
)
930 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
935 rctx
->vs_shader
= (struct r600_pipe_shader_selector
*)state
;
936 r600_update_vs_writes_viewport_index(&rctx
->b
, r600_get_vs_info(rctx
));
937 rctx
->b
.streamout
.stride_in_dw
= rctx
->vs_shader
->so
.stride
;
940 static void r600_bind_gs_state(struct pipe_context
*ctx
, void *state
)
942 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
944 rctx
->gs_shader
= (struct r600_pipe_shader_selector
*)state
;
945 r600_update_vs_writes_viewport_index(&rctx
->b
, r600_get_vs_info(rctx
));
949 rctx
->b
.streamout
.stride_in_dw
= rctx
->gs_shader
->so
.stride
;
952 static void r600_bind_tcs_state(struct pipe_context
*ctx
, void *state
)
954 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
956 rctx
->tcs_shader
= (struct r600_pipe_shader_selector
*)state
;
959 static void r600_bind_tes_state(struct pipe_context
*ctx
, void *state
)
961 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
963 rctx
->tes_shader
= (struct r600_pipe_shader_selector
*)state
;
964 r600_update_vs_writes_viewport_index(&rctx
->b
, r600_get_vs_info(rctx
));
968 rctx
->b
.streamout
.stride_in_dw
= rctx
->tes_shader
->so
.stride
;
971 static void r600_delete_shader_selector(struct pipe_context
*ctx
,
972 struct r600_pipe_shader_selector
*sel
)
974 struct r600_pipe_shader
*p
= sel
->current
, *c
;
977 r600_pipe_shader_destroy(ctx
, p
);
987 static void r600_delete_ps_state(struct pipe_context
*ctx
, void *state
)
989 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
990 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
992 if (rctx
->ps_shader
== sel
) {
993 rctx
->ps_shader
= NULL
;
996 r600_delete_shader_selector(ctx
, sel
);
999 static void r600_delete_vs_state(struct pipe_context
*ctx
, void *state
)
1001 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1002 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1004 if (rctx
->vs_shader
== sel
) {
1005 rctx
->vs_shader
= NULL
;
1008 r600_delete_shader_selector(ctx
, sel
);
1012 static void r600_delete_gs_state(struct pipe_context
*ctx
, void *state
)
1014 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1015 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1017 if (rctx
->gs_shader
== sel
) {
1018 rctx
->gs_shader
= NULL
;
1021 r600_delete_shader_selector(ctx
, sel
);
1024 static void r600_delete_tcs_state(struct pipe_context
*ctx
, void *state
)
1026 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1027 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1029 if (rctx
->tcs_shader
== sel
) {
1030 rctx
->tcs_shader
= NULL
;
1033 r600_delete_shader_selector(ctx
, sel
);
1036 static void r600_delete_tes_state(struct pipe_context
*ctx
, void *state
)
1038 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1039 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1041 if (rctx
->tes_shader
== sel
) {
1042 rctx
->tes_shader
= NULL
;
1045 r600_delete_shader_selector(ctx
, sel
);
1048 void r600_constant_buffers_dirty(struct r600_context
*rctx
, struct r600_constbuf_state
*state
)
1050 if (state
->dirty_mask
) {
1051 state
->atom
.num_dw
= rctx
->b
.chip_class
>= EVERGREEN
? util_bitcount(state
->dirty_mask
)*20
1052 : util_bitcount(state
->dirty_mask
)*19;
1053 r600_mark_atom_dirty(rctx
, &state
->atom
);
1057 static void r600_set_constant_buffer(struct pipe_context
*ctx
, uint shader
, uint index
,
1058 const struct pipe_constant_buffer
*input
)
1060 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1061 struct r600_constbuf_state
*state
= &rctx
->constbuf_state
[shader
];
1062 struct pipe_constant_buffer
*cb
;
1065 /* Note that the state tracker can unbind constant buffers by
1066 * passing NULL here.
1068 if (unlikely(!input
|| (!input
->buffer
&& !input
->user_buffer
))) {
1069 state
->enabled_mask
&= ~(1 << index
);
1070 state
->dirty_mask
&= ~(1 << index
);
1071 pipe_resource_reference(&state
->cb
[index
].buffer
, NULL
);
1075 cb
= &state
->cb
[index
];
1076 cb
->buffer_size
= input
->buffer_size
;
1078 ptr
= input
->user_buffer
;
1081 /* Upload the user buffer. */
1082 if (R600_BIG_ENDIAN
) {
1084 unsigned i
, size
= input
->buffer_size
;
1086 if (!(tmpPtr
= malloc(size
))) {
1087 R600_ERR("Failed to allocate BE swap buffer.\n");
1091 for (i
= 0; i
< size
/ 4; ++i
) {
1092 tmpPtr
[i
] = util_cpu_to_le32(((uint32_t *)ptr
)[i
]);
1095 u_upload_data(rctx
->b
.uploader
, 0, size
, 256, tmpPtr
, &cb
->buffer_offset
, &cb
->buffer
);
1098 u_upload_data(rctx
->b
.uploader
, 0, input
->buffer_size
, 256, ptr
, &cb
->buffer_offset
, &cb
->buffer
);
1100 /* account it in gtt */
1101 rctx
->b
.gtt
+= input
->buffer_size
;
1103 /* Setup the hw buffer. */
1104 cb
->buffer_offset
= input
->buffer_offset
;
1105 pipe_resource_reference(&cb
->buffer
, input
->buffer
);
1106 r600_context_add_resource_size(ctx
, input
->buffer
);
1109 state
->enabled_mask
|= 1 << index
;
1110 state
->dirty_mask
|= 1 << index
;
1111 r600_constant_buffers_dirty(rctx
, state
);
1114 static void r600_set_sample_mask(struct pipe_context
*pipe
, unsigned sample_mask
)
1116 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
1118 if (rctx
->sample_mask
.sample_mask
== (uint16_t)sample_mask
)
1121 rctx
->sample_mask
.sample_mask
= sample_mask
;
1122 r600_mark_atom_dirty(rctx
, &rctx
->sample_mask
.atom
);
1125 static void r600_update_driver_const_buffers(struct r600_context
*rctx
)
1129 struct pipe_constant_buffer cb
;
1130 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
1131 struct r600_shader_driver_constants_info
*info
= &rctx
->driver_consts
[sh
];
1132 if (!info
->vs_ucp_dirty
&&
1133 !info
->texture_const_dirty
&&
1134 !info
->ps_sample_pos_dirty
)
1137 ptr
= info
->constants
;
1138 size
= info
->alloc_size
;
1139 if (info
->vs_ucp_dirty
) {
1140 assert(sh
== PIPE_SHADER_VERTEX
);
1142 ptr
= rctx
->clip_state
.state
.ucp
;
1143 size
= R600_UCP_SIZE
;
1145 memcpy(ptr
, rctx
->clip_state
.state
.ucp
, R600_UCP_SIZE
);
1147 info
->vs_ucp_dirty
= false;
1150 if (info
->ps_sample_pos_dirty
) {
1151 assert(sh
== PIPE_SHADER_FRAGMENT
);
1153 ptr
= rctx
->sample_positions
;
1154 size
= R600_UCP_SIZE
;
1156 memcpy(ptr
, rctx
->sample_positions
, R600_UCP_SIZE
);
1158 info
->ps_sample_pos_dirty
= false;
1161 if (info
->texture_const_dirty
) {
1164 if (sh
== PIPE_SHADER_VERTEX
)
1165 memcpy(ptr
, rctx
->clip_state
.state
.ucp
, R600_UCP_SIZE
);
1166 if (sh
== PIPE_SHADER_FRAGMENT
)
1167 memcpy(ptr
, rctx
->sample_positions
, R600_UCP_SIZE
);
1169 info
->texture_const_dirty
= false;
1172 cb
.user_buffer
= ptr
;
1173 cb
.buffer_offset
= 0;
1174 cb
.buffer_size
= size
;
1175 rctx
->b
.b
.set_constant_buffer(&rctx
->b
.b
, sh
, R600_BUFFER_INFO_CONST_BUFFER
, &cb
);
1176 pipe_resource_reference(&cb
.buffer
, NULL
);
1180 static void *r600_alloc_buf_consts(struct r600_context
*rctx
, int shader_type
,
1181 int array_size
, uint32_t *base_offset
)
1183 struct r600_shader_driver_constants_info
*info
= &rctx
->driver_consts
[shader_type
];
1184 if (array_size
+ R600_UCP_SIZE
> info
->alloc_size
) {
1185 info
->constants
= realloc(info
->constants
, array_size
+ R600_UCP_SIZE
);
1186 info
->alloc_size
= array_size
+ R600_UCP_SIZE
;
1188 memset(info
->constants
+ (R600_UCP_SIZE
/ 4), 0, array_size
);
1189 info
->texture_const_dirty
= true;
1190 *base_offset
= R600_UCP_SIZE
;
1191 return info
->constants
;
1194 * On r600/700 hw we don't have vertex fetch swizzle, though TBO
1195 * doesn't require full swizzles it does need masking and setting alpha
1196 * to one, so we setup a set of 5 constants with the masks + alpha value
1197 * then in the shader, we AND the 4 components with 0xffffffff or 0,
1198 * then OR the alpha with the value given here.
1199 * We use a 6th constant to store the txq buffer size in
1200 * we use 7th slot for number of cube layers in a cube map array.
1202 static void r600_setup_buffer_constants(struct r600_context
*rctx
, int shader_type
)
1204 struct r600_textures_info
*samplers
= &rctx
->samplers
[shader_type
];
1206 uint32_t array_size
;
1208 uint32_t *constants
;
1209 uint32_t base_offset
;
1210 if (!samplers
->views
.dirty_buffer_constants
)
1213 samplers
->views
.dirty_buffer_constants
= FALSE
;
1215 bits
= util_last_bit(samplers
->views
.enabled_mask
);
1216 array_size
= bits
* 8 * sizeof(uint32_t) * 4;
1218 constants
= r600_alloc_buf_consts(rctx
, shader_type
, array_size
, &base_offset
);
1220 for (i
= 0; i
< bits
; i
++) {
1221 if (samplers
->views
.enabled_mask
& (1 << i
)) {
1222 int offset
= (base_offset
/ 4) + i
* 8;
1223 const struct util_format_description
*desc
;
1224 desc
= util_format_description(samplers
->views
.views
[i
]->base
.format
);
1226 for (j
= 0; j
< 4; j
++)
1227 if (j
< desc
->nr_channels
)
1228 constants
[offset
+j
] = 0xffffffff;
1230 constants
[offset
+j
] = 0x0;
1231 if (desc
->nr_channels
< 4) {
1232 if (desc
->channel
[0].pure_integer
)
1233 constants
[offset
+4] = 1;
1235 constants
[offset
+4] = fui(1.0);
1237 constants
[offset
+ 4] = 0;
1239 constants
[offset
+ 5] = samplers
->views
.views
[i
]->base
.texture
->width0
/ util_format_get_blocksize(samplers
->views
.views
[i
]->base
.format
);
1240 constants
[offset
+ 6] = samplers
->views
.views
[i
]->base
.texture
->array_size
/ 6;
1246 /* On evergreen we store two values
1247 * 1. buffer size for TXQ
1248 * 2. number of cube layers in a cube map array.
1250 static void eg_setup_buffer_constants(struct r600_context
*rctx
, int shader_type
)
1252 struct r600_textures_info
*samplers
= &rctx
->samplers
[shader_type
];
1254 uint32_t array_size
;
1256 uint32_t *constants
;
1257 uint32_t base_offset
;
1258 if (!samplers
->views
.dirty_buffer_constants
)
1261 samplers
->views
.dirty_buffer_constants
= FALSE
;
1263 bits
= util_last_bit(samplers
->views
.enabled_mask
);
1264 array_size
= bits
* 2 * sizeof(uint32_t) * 4;
1266 constants
= r600_alloc_buf_consts(rctx
, shader_type
, array_size
,
1269 for (i
= 0; i
< bits
; i
++) {
1270 if (samplers
->views
.enabled_mask
& (1 << i
)) {
1271 uint32_t offset
= (base_offset
/ 4) + i
* 2;
1272 constants
[offset
] = samplers
->views
.views
[i
]->base
.texture
->width0
/ util_format_get_blocksize(samplers
->views
.views
[i
]->base
.format
);
1273 constants
[offset
+ 1] = samplers
->views
.views
[i
]->base
.texture
->array_size
/ 6;
1278 /* set sample xy locations as array of fragment shader constants */
1279 void r600_set_sample_locations_constant_buffer(struct r600_context
*rctx
)
1282 struct pipe_context
*ctx
= &rctx
->b
.b
;
1284 assert(rctx
->framebuffer
.nr_samples
< R600_UCP_SIZE
);
1285 assert(rctx
->framebuffer
.nr_samples
<= ARRAY_SIZE(rctx
->sample_positions
)/4);
1287 memset(rctx
->sample_positions
, 0, 4 * 4 * 16);
1288 for (i
= 0; i
< rctx
->framebuffer
.nr_samples
; i
++) {
1289 ctx
->get_sample_position(ctx
, rctx
->framebuffer
.nr_samples
, i
, &rctx
->sample_positions
[4*i
]);
1290 /* Also fill in center-zeroed positions used for interpolateAtSample */
1291 rctx
->sample_positions
[4*i
+ 2] = rctx
->sample_positions
[4*i
+ 0] - 0.5f
;
1292 rctx
->sample_positions
[4*i
+ 3] = rctx
->sample_positions
[4*i
+ 1] - 0.5f
;
1295 rctx
->driver_consts
[PIPE_SHADER_FRAGMENT
].ps_sample_pos_dirty
= true;
1298 static void update_shader_atom(struct pipe_context
*ctx
,
1299 struct r600_shader_state
*state
,
1300 struct r600_pipe_shader
*shader
)
1302 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1304 state
->shader
= shader
;
1306 state
->atom
.num_dw
= shader
->command_buffer
.num_dw
;
1307 r600_context_add_resource_size(ctx
, (struct pipe_resource
*)shader
->bo
);
1309 state
->atom
.num_dw
= 0;
1311 r600_mark_atom_dirty(rctx
, &state
->atom
);
1314 static void update_gs_block_state(struct r600_context
*rctx
, unsigned enable
)
1316 if (rctx
->shader_stages
.geom_enable
!= enable
) {
1317 rctx
->shader_stages
.geom_enable
= enable
;
1318 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1321 if (rctx
->gs_rings
.enable
!= enable
) {
1322 rctx
->gs_rings
.enable
= enable
;
1323 r600_mark_atom_dirty(rctx
, &rctx
->gs_rings
.atom
);
1325 if (enable
&& !rctx
->gs_rings
.esgs_ring
.buffer
) {
1326 unsigned size
= 0x1C000;
1327 rctx
->gs_rings
.esgs_ring
.buffer
=
1328 pipe_buffer_create(rctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1329 PIPE_USAGE_DEFAULT
, size
);
1330 rctx
->gs_rings
.esgs_ring
.buffer_size
= size
;
1334 rctx
->gs_rings
.gsvs_ring
.buffer
=
1335 pipe_buffer_create(rctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1336 PIPE_USAGE_DEFAULT
, size
);
1337 rctx
->gs_rings
.gsvs_ring
.buffer_size
= size
;
1341 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_GEOMETRY
,
1342 R600_GS_RING_CONST_BUFFER
, &rctx
->gs_rings
.esgs_ring
);
1343 if (rctx
->tes_shader
) {
1344 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_TESS_EVAL
,
1345 R600_GS_RING_CONST_BUFFER
, &rctx
->gs_rings
.gsvs_ring
);
1347 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_VERTEX
,
1348 R600_GS_RING_CONST_BUFFER
, &rctx
->gs_rings
.gsvs_ring
);
1351 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_GEOMETRY
,
1352 R600_GS_RING_CONST_BUFFER
, NULL
);
1353 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_VERTEX
,
1354 R600_GS_RING_CONST_BUFFER
, NULL
);
1355 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_TESS_EVAL
,
1356 R600_GS_RING_CONST_BUFFER
, NULL
);
1361 static void r600_update_clip_state(struct r600_context
*rctx
,
1362 struct r600_pipe_shader
*current
)
1364 if (current
->pa_cl_vs_out_cntl
!= rctx
->clip_misc_state
.pa_cl_vs_out_cntl
||
1365 current
->shader
.clip_dist_write
!= rctx
->clip_misc_state
.clip_dist_write
||
1366 current
->shader
.vs_position_window_space
!= rctx
->clip_misc_state
.clip_disable
||
1367 current
->shader
.vs_out_viewport
!= rctx
->clip_misc_state
.vs_out_viewport
) {
1368 rctx
->clip_misc_state
.pa_cl_vs_out_cntl
= current
->pa_cl_vs_out_cntl
;
1369 rctx
->clip_misc_state
.clip_dist_write
= current
->shader
.clip_dist_write
;
1370 rctx
->clip_misc_state
.clip_disable
= current
->shader
.vs_position_window_space
;
1371 rctx
->clip_misc_state
.vs_out_viewport
= current
->shader
.vs_out_viewport
;
1372 r600_mark_atom_dirty(rctx
, &rctx
->clip_misc_state
.atom
);
1376 static void r600_generate_fixed_func_tcs(struct r600_context
*rctx
)
1378 struct ureg_src const0
, const1
;
1379 struct ureg_dst tessouter
, tessinner
;
1380 struct ureg_program
*ureg
= ureg_create(PIPE_SHADER_TESS_CTRL
);
1383 return; /* if we get here, we're screwed */
1385 assert(!rctx
->fixed_func_tcs_shader
);
1387 ureg_DECL_constant2D(ureg
, 0, 3, R600_LDS_INFO_CONST_BUFFER
);
1388 const0
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 2),
1389 R600_LDS_INFO_CONST_BUFFER
);
1390 const1
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 3),
1391 R600_LDS_INFO_CONST_BUFFER
);
1393 tessouter
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSOUTER
, 0);
1394 tessinner
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSINNER
, 0);
1396 ureg_MOV(ureg
, tessouter
, const0
);
1397 ureg_MOV(ureg
, tessinner
, const1
);
1400 rctx
->fixed_func_tcs_shader
=
1401 ureg_create_shader_and_destroy(ureg
, &rctx
->b
.b
);
1404 #define SELECT_SHADER_OR_FAIL(x) do { \
1405 r600_shader_select(ctx, rctx->x##_shader, &x##_dirty); \
1406 if (unlikely(!rctx->x##_shader->current)) \
1410 #define UPDATE_SHADER(hw, sw) do { \
1411 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) \
1412 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1415 #define UPDATE_SHADER_CLIP(hw, sw) do { \
1416 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) { \
1417 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1418 clip_so_current = rctx->sw##_shader->current; \
1422 #define UPDATE_SHADER_GS(hw, hw2, sw) do { \
1423 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) { \
1424 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1425 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw2)], rctx->sw##_shader->current->gs_copy_shader); \
1426 clip_so_current = rctx->sw##_shader->current->gs_copy_shader; \
1430 #define SET_NULL_SHADER(hw) do { \
1431 if (rctx->hw_shader_stages[(hw)].shader) \
1432 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], NULL); \
1435 static bool r600_update_derived_state(struct r600_context
*rctx
)
1437 struct pipe_context
* ctx
= (struct pipe_context
*)rctx
;
1438 bool ps_dirty
= false, vs_dirty
= false, gs_dirty
= false;
1439 bool tcs_dirty
= false, tes_dirty
= false, fixed_func_tcs_dirty
= false;
1441 bool need_buf_const
;
1442 struct r600_pipe_shader
*clip_so_current
= NULL
;
1444 if (!rctx
->blitter
->running
) {
1448 counter
= p_atomic_read(&rctx
->screen
->b
.compressed_colortex_counter
);
1449 if (counter
!= rctx
->b
.last_compressed_colortex_counter
) {
1450 rctx
->b
.last_compressed_colortex_counter
= counter
;
1452 for (i
= 0; i
< PIPE_SHADER_TYPES
; ++i
) {
1453 r600_update_compressed_colortex_mask(&rctx
->samplers
[i
].views
);
1457 /* Decompress textures if needed. */
1458 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
1459 struct r600_samplerview_state
*views
= &rctx
->samplers
[i
].views
;
1460 if (views
->compressed_depthtex_mask
) {
1461 r600_decompress_depth_textures(rctx
, views
);
1463 if (views
->compressed_colortex_mask
) {
1464 r600_decompress_color_textures(rctx
, views
);
1469 SELECT_SHADER_OR_FAIL(ps
);
1471 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1473 update_gs_block_state(rctx
, rctx
->gs_shader
!= NULL
);
1475 if (rctx
->gs_shader
)
1476 SELECT_SHADER_OR_FAIL(gs
);
1479 if (rctx
->tcs_shader
) {
1480 SELECT_SHADER_OR_FAIL(tcs
);
1482 UPDATE_SHADER(EG_HW_STAGE_HS
, tcs
);
1483 } else if (rctx
->tes_shader
) {
1484 if (!rctx
->fixed_func_tcs_shader
) {
1485 r600_generate_fixed_func_tcs(rctx
);
1486 if (!rctx
->fixed_func_tcs_shader
)
1490 SELECT_SHADER_OR_FAIL(fixed_func_tcs
);
1492 UPDATE_SHADER(EG_HW_STAGE_HS
, fixed_func_tcs
);
1494 SET_NULL_SHADER(EG_HW_STAGE_HS
);
1496 if (rctx
->tes_shader
) {
1497 SELECT_SHADER_OR_FAIL(tes
);
1500 SELECT_SHADER_OR_FAIL(vs
);
1502 if (rctx
->gs_shader
) {
1503 if (!rctx
->shader_stages
.geom_enable
) {
1504 rctx
->shader_stages
.geom_enable
= true;
1505 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1508 /* gs_shader provides GS and VS (copy shader) */
1509 UPDATE_SHADER_GS(R600_HW_STAGE_GS
, R600_HW_STAGE_VS
, gs
);
1511 /* vs_shader is used as ES */
1513 if (rctx
->tes_shader
) {
1514 /* VS goes to LS, TES goes to ES */
1515 UPDATE_SHADER(R600_HW_STAGE_ES
, tes
);
1516 UPDATE_SHADER(EG_HW_STAGE_LS
, vs
);
1518 /* vs_shader is used as ES */
1519 UPDATE_SHADER(R600_HW_STAGE_ES
, vs
);
1520 SET_NULL_SHADER(EG_HW_STAGE_LS
);
1523 if (unlikely(rctx
->hw_shader_stages
[R600_HW_STAGE_GS
].shader
)) {
1524 SET_NULL_SHADER(R600_HW_STAGE_GS
);
1525 SET_NULL_SHADER(R600_HW_STAGE_ES
);
1526 rctx
->shader_stages
.geom_enable
= false;
1527 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1530 if (rctx
->tes_shader
) {
1531 /* if TES is loaded and no geometry, TES runs on hw VS, VS runs on hw LS */
1532 UPDATE_SHADER_CLIP(R600_HW_STAGE_VS
, tes
);
1533 UPDATE_SHADER(EG_HW_STAGE_LS
, vs
);
1535 SET_NULL_SHADER(EG_HW_STAGE_LS
);
1536 UPDATE_SHADER_CLIP(R600_HW_STAGE_VS
, vs
);
1540 /* Update clip misc state. */
1541 if (clip_so_current
) {
1542 r600_update_clip_state(rctx
, clip_so_current
);
1543 rctx
->b
.streamout
.enabled_stream_buffers_mask
= clip_so_current
->enabled_stream_buffers_mask
;
1546 if (unlikely(ps_dirty
|| rctx
->hw_shader_stages
[R600_HW_STAGE_PS
].shader
!= rctx
->ps_shader
->current
||
1547 rctx
->rasterizer
->sprite_coord_enable
!= rctx
->ps_shader
->current
->sprite_coord_enable
||
1548 rctx
->rasterizer
->flatshade
!= rctx
->ps_shader
->current
->flatshade
)) {
1550 if (rctx
->cb_misc_state
.nr_ps_color_outputs
!= rctx
->ps_shader
->current
->nr_ps_color_outputs
) {
1551 rctx
->cb_misc_state
.nr_ps_color_outputs
= rctx
->ps_shader
->current
->nr_ps_color_outputs
;
1552 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
1555 if (rctx
->b
.chip_class
<= R700
) {
1556 bool multiwrite
= rctx
->ps_shader
->current
->shader
.fs_write_all
;
1558 if (rctx
->cb_misc_state
.multiwrite
!= multiwrite
) {
1559 rctx
->cb_misc_state
.multiwrite
= multiwrite
;
1560 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
1564 if (unlikely(!ps_dirty
&& rctx
->ps_shader
&& rctx
->rasterizer
&&
1565 ((rctx
->rasterizer
->sprite_coord_enable
!= rctx
->ps_shader
->current
->sprite_coord_enable
) ||
1566 (rctx
->rasterizer
->flatshade
!= rctx
->ps_shader
->current
->flatshade
)))) {
1568 if (rctx
->b
.chip_class
>= EVERGREEN
)
1569 evergreen_update_ps_state(ctx
, rctx
->ps_shader
->current
);
1571 r600_update_ps_state(ctx
, rctx
->ps_shader
->current
);
1574 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1576 UPDATE_SHADER(R600_HW_STAGE_PS
, ps
);
1578 if (rctx
->b
.chip_class
>= EVERGREEN
) {
1579 evergreen_update_db_shader_control(rctx
);
1581 r600_update_db_shader_control(rctx
);
1584 /* on R600 we stuff masks + txq info into one constant buffer */
1585 /* on evergreen we only need a txq info one */
1586 if (rctx
->ps_shader
) {
1587 need_buf_const
= rctx
->ps_shader
->current
->shader
.uses_tex_buffers
|| rctx
->ps_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1588 if (need_buf_const
) {
1589 if (rctx
->b
.chip_class
< EVERGREEN
)
1590 r600_setup_buffer_constants(rctx
, PIPE_SHADER_FRAGMENT
);
1592 eg_setup_buffer_constants(rctx
, PIPE_SHADER_FRAGMENT
);
1596 if (rctx
->vs_shader
) {
1597 need_buf_const
= rctx
->vs_shader
->current
->shader
.uses_tex_buffers
|| rctx
->vs_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1598 if (need_buf_const
) {
1599 if (rctx
->b
.chip_class
< EVERGREEN
)
1600 r600_setup_buffer_constants(rctx
, PIPE_SHADER_VERTEX
);
1602 eg_setup_buffer_constants(rctx
, PIPE_SHADER_VERTEX
);
1606 if (rctx
->gs_shader
) {
1607 need_buf_const
= rctx
->gs_shader
->current
->shader
.uses_tex_buffers
|| rctx
->gs_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1608 if (need_buf_const
) {
1609 if (rctx
->b
.chip_class
< EVERGREEN
)
1610 r600_setup_buffer_constants(rctx
, PIPE_SHADER_GEOMETRY
);
1612 eg_setup_buffer_constants(rctx
, PIPE_SHADER_GEOMETRY
);
1616 r600_update_driver_const_buffers(rctx
);
1618 if (rctx
->b
.chip_class
< EVERGREEN
&& rctx
->ps_shader
&& rctx
->vs_shader
) {
1619 if (!r600_adjust_gprs(rctx
)) {
1620 /* discard rendering */
1625 if (rctx
->b
.chip_class
== EVERGREEN
) {
1626 if (!evergreen_adjust_gprs(rctx
)) {
1627 /* discard rendering */
1632 blend_disable
= (rctx
->dual_src_blend
&&
1633 rctx
->ps_shader
->current
->nr_ps_color_outputs
< 2);
1635 if (blend_disable
!= rctx
->force_blend_disable
) {
1636 rctx
->force_blend_disable
= blend_disable
;
1637 r600_bind_blend_state_internal(rctx
,
1638 rctx
->blend_state
.cso
,
1645 void r600_emit_clip_misc_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1647 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1648 struct r600_clip_misc_state
*state
= &rctx
->clip_misc_state
;
1650 radeon_set_context_reg(cs
, R_028810_PA_CL_CLIP_CNTL
,
1651 state
->pa_cl_clip_cntl
|
1652 (state
->clip_dist_write
? 0 : state
->clip_plane_enable
& 0x3F) |
1653 S_028810_CLIP_DISABLE(state
->clip_disable
));
1654 radeon_set_context_reg(cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
1655 state
->pa_cl_vs_out_cntl
|
1656 (state
->clip_plane_enable
& state
->clip_dist_write
));
1657 /* reuse needs to be set off if we write oViewport */
1658 if (rctx
->b
.chip_class
>= EVERGREEN
)
1659 radeon_set_context_reg(cs
, R_028AB4_VGT_REUSE_OFF
,
1660 S_028AB4_REUSE_OFF(state
->vs_out_viewport
));
1663 static void r600_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*dinfo
)
1665 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1666 struct pipe_draw_info info
= *dinfo
;
1667 struct pipe_index_buffer ib
= {};
1668 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1669 bool render_cond_bit
= rctx
->b
.render_cond
&& !rctx
->b
.render_cond_force_off
;
1671 unsigned num_patches
, dirty_fb_counter
;
1673 if (!info
.indirect
&& !info
.count
&& (info
.indexed
|| !info
.count_from_stream_output
)) {
1677 if (!rctx
->vs_shader
|| !rctx
->ps_shader
) {
1682 /* make sure that the gfx ring is only one active */
1683 if (radeon_emitted(rctx
->b
.dma
.cs
, 0)) {
1684 rctx
->b
.dma
.flush(rctx
, RADEON_FLUSH_ASYNC
, NULL
);
1687 /* Re-emit the framebuffer state if needed. */
1688 dirty_fb_counter
= p_atomic_read(&rctx
->b
.screen
->dirty_fb_counter
);
1689 if (dirty_fb_counter
!= rctx
->b
.last_dirty_fb_counter
) {
1690 rctx
->b
.last_dirty_fb_counter
= dirty_fb_counter
;
1691 r600_mark_atom_dirty(rctx
, &rctx
->framebuffer
.atom
);
1694 if (!r600_update_derived_state(rctx
)) {
1695 /* useless to render because current rendering command
1702 /* Initialize the index buffer struct. */
1703 pipe_resource_reference(&ib
.buffer
, rctx
->index_buffer
.buffer
);
1704 ib
.user_buffer
= rctx
->index_buffer
.user_buffer
;
1705 ib
.index_size
= rctx
->index_buffer
.index_size
;
1706 ib
.offset
= rctx
->index_buffer
.offset
;
1707 if (!info
.indirect
) {
1708 ib
.offset
+= info
.start
* ib
.index_size
;
1711 /* Translate 8-bit indices to 16-bit. */
1712 if (unlikely(ib
.index_size
== 1)) {
1713 struct pipe_resource
*out_buffer
= NULL
;
1714 unsigned out_offset
;
1716 unsigned start
, count
;
1718 if (likely(!info
.indirect
)) {
1723 /* Have to get start/count from indirect buffer, slow path ahead... */
1724 struct r600_resource
*indirect_resource
= (struct r600_resource
*)info
.indirect
;
1725 unsigned *data
= r600_buffer_map_sync_with_rings(&rctx
->b
, indirect_resource
,
1726 PIPE_TRANSFER_READ
);
1728 data
+= info
.indirect_offset
/ sizeof(unsigned);
1729 start
= data
[2] * ib
.index_size
;
1738 u_upload_alloc(rctx
->b
.uploader
, start
, count
* 2, 256,
1739 &out_offset
, &out_buffer
, &ptr
);
1741 util_shorten_ubyte_elts_to_userptr(
1742 &rctx
->b
.b
, &ib
, 0, ib
.offset
+ start
, count
, ptr
);
1744 pipe_resource_reference(&ib
.buffer
, NULL
);
1745 ib
.user_buffer
= NULL
;
1746 ib
.buffer
= out_buffer
;
1747 ib
.offset
= out_offset
;
1751 /* Upload the index buffer.
1752 * The upload is skipped for small index counts on little-endian machines
1753 * and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
1754 * Indirect draws never use immediate indices.
1755 * Note: Instanced rendering in combination with immediate indices hangs. */
1756 if (ib
.user_buffer
&& (R600_BIG_ENDIAN
|| info
.indirect
||
1757 info
.instance_count
> 1 ||
1758 info
.count
*ib
.index_size
> 20)) {
1759 u_upload_data(rctx
->b
.uploader
, 0, info
.count
* ib
.index_size
, 256,
1760 ib
.user_buffer
, &ib
.offset
, &ib
.buffer
);
1761 ib
.user_buffer
= NULL
;
1764 info
.index_bias
= info
.start
;
1767 /* Set the index offset and primitive restart. */
1768 if (rctx
->vgt_state
.vgt_multi_prim_ib_reset_en
!= info
.primitive_restart
||
1769 rctx
->vgt_state
.vgt_multi_prim_ib_reset_indx
!= info
.restart_index
||
1770 rctx
->vgt_state
.vgt_indx_offset
!= info
.index_bias
||
1771 (rctx
->vgt_state
.last_draw_was_indirect
&& !info
.indirect
)) {
1772 rctx
->vgt_state
.vgt_multi_prim_ib_reset_en
= info
.primitive_restart
;
1773 rctx
->vgt_state
.vgt_multi_prim_ib_reset_indx
= info
.restart_index
;
1774 rctx
->vgt_state
.vgt_indx_offset
= info
.index_bias
;
1775 r600_mark_atom_dirty(rctx
, &rctx
->vgt_state
.atom
);
1778 /* Workaround for hardware deadlock on certain R600 ASICs: write into a CB register. */
1779 if (rctx
->b
.chip_class
== R600
) {
1780 rctx
->b
.flags
|= R600_CONTEXT_PS_PARTIAL_FLUSH
;
1781 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
1784 if (rctx
->b
.chip_class
>= EVERGREEN
)
1785 evergreen_setup_tess_constants(rctx
, &info
, &num_patches
);
1788 r600_need_cs_space(rctx
, ib
.user_buffer
? 5 : 0, TRUE
);
1789 r600_flush_emit(rctx
);
1791 mask
= rctx
->dirty_atoms
;
1793 r600_emit_atom(rctx
, rctx
->atoms
[u_bit_scan64(&mask
)]);
1796 if (rctx
->b
.chip_class
== CAYMAN
) {
1797 /* Copied from radeonsi. */
1798 unsigned primgroup_size
= 128; /* recommended without a GS */
1799 bool ia_switch_on_eop
= false;
1800 bool partial_vs_wave
= false;
1802 if (rctx
->gs_shader
)
1803 primgroup_size
= 64; /* recommended with a GS */
1805 if ((rctx
->rasterizer
&& rctx
->rasterizer
->pa_sc_line_stipple
) ||
1806 (rctx
->b
.screen
->debug_flags
& DBG_SWITCH_ON_EOP
)) {
1807 ia_switch_on_eop
= true;
1810 if (r600_get_strmout_en(&rctx
->b
))
1811 partial_vs_wave
= true;
1813 radeon_set_context_reg(cs
, CM_R_028AA8_IA_MULTI_VGT_PARAM
,
1814 S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
1815 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
1816 S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1));
1819 if (rctx
->b
.chip_class
>= EVERGREEN
) {
1820 uint32_t ls_hs_config
= evergreen_get_ls_hs_config(rctx
, &info
,
1823 evergreen_set_ls_hs_config(rctx
, cs
, ls_hs_config
);
1824 evergreen_set_lds_alloc(rctx
, cs
, rctx
->lds_alloc
);
1827 /* On R6xx, CULL_FRONT=1 culls all points, lines, and rectangles,
1828 * even though it should have no effect on those. */
1829 if (rctx
->b
.chip_class
== R600
&& rctx
->rasterizer
) {
1830 unsigned su_sc_mode_cntl
= rctx
->rasterizer
->pa_su_sc_mode_cntl
;
1831 unsigned prim
= info
.mode
;
1833 if (rctx
->gs_shader
) {
1834 prim
= rctx
->gs_shader
->gs_output_prim
;
1836 prim
= r600_conv_prim_to_gs_out(prim
); /* decrease the number of types to 3 */
1838 if (prim
== V_028A6C_OUTPRIM_TYPE_POINTLIST
||
1839 prim
== V_028A6C_OUTPRIM_TYPE_LINESTRIP
||
1840 info
.mode
== R600_PRIM_RECTANGLE_LIST
) {
1841 su_sc_mode_cntl
&= C_028814_CULL_FRONT
;
1843 radeon_set_context_reg(cs
, R_028814_PA_SU_SC_MODE_CNTL
, su_sc_mode_cntl
);
1846 /* Update start instance. */
1847 if (!info
.indirect
&& rctx
->last_start_instance
!= info
.start_instance
) {
1848 radeon_set_ctl_const(cs
, R_03CFF4_SQ_VTX_START_INST_LOC
, info
.start_instance
);
1849 rctx
->last_start_instance
= info
.start_instance
;
1852 /* Update the primitive type. */
1853 if (rctx
->last_primitive_type
!= info
.mode
) {
1854 unsigned ls_mask
= 0;
1856 if (info
.mode
== PIPE_PRIM_LINES
)
1858 else if (info
.mode
== PIPE_PRIM_LINE_STRIP
||
1859 info
.mode
== PIPE_PRIM_LINE_LOOP
)
1862 radeon_set_context_reg(cs
, R_028A0C_PA_SC_LINE_STIPPLE
,
1863 S_028A0C_AUTO_RESET_CNTL(ls_mask
) |
1864 (rctx
->rasterizer
? rctx
->rasterizer
->pa_sc_line_stipple
: 0));
1865 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
,
1866 r600_conv_pipe_prim(info
.mode
));
1868 rctx
->last_primitive_type
= info
.mode
;
1872 if (!info
.indirect
) {
1873 radeon_emit(cs
, PKT3(PKT3_NUM_INSTANCES
, 0, 0));
1874 radeon_emit(cs
, info
.instance_count
);
1877 if (unlikely(info
.indirect
)) {
1878 uint64_t va
= r600_resource(info
.indirect
)->gpu_address
;
1879 assert(rctx
->b
.chip_class
>= EVERGREEN
);
1881 // Invalidate so non-indirect draw calls reset this state
1882 rctx
->vgt_state
.last_draw_was_indirect
= true;
1883 rctx
->last_start_instance
= -1;
1885 radeon_emit(cs
, PKT3(EG_PKT3_SET_BASE
, 2, 0));
1886 radeon_emit(cs
, EG_DRAW_INDEX_INDIRECT_PATCH_TABLE_BASE
);
1887 radeon_emit(cs
, va
);
1888 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
1890 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1891 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
1892 (struct r600_resource
*)info
.indirect
,
1894 RADEON_PRIO_DRAW_INDIRECT
));
1898 radeon_emit(cs
, PKT3(PKT3_INDEX_TYPE
, 0, 0));
1899 radeon_emit(cs
, ib
.index_size
== 4 ?
1900 (VGT_INDEX_32
| (R600_BIG_ENDIAN
? VGT_DMA_SWAP_32_BIT
: 0)) :
1901 (VGT_INDEX_16
| (R600_BIG_ENDIAN
? VGT_DMA_SWAP_16_BIT
: 0)));
1903 if (ib
.user_buffer
) {
1904 unsigned size_bytes
= info
.count
*ib
.index_size
;
1905 unsigned size_dw
= align(size_bytes
, 4) / 4;
1906 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_IMMD
, 1 + size_dw
, render_cond_bit
));
1907 radeon_emit(cs
, info
.count
);
1908 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_IMMEDIATE
);
1909 radeon_emit_array(cs
, ib
.user_buffer
, size_dw
);
1911 uint64_t va
= r600_resource(ib
.buffer
)->gpu_address
+ ib
.offset
;
1913 if (likely(!info
.indirect
)) {
1914 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX
, 3, render_cond_bit
));
1915 radeon_emit(cs
, va
);
1916 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
1917 radeon_emit(cs
, info
.count
);
1918 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
1919 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1920 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
1921 (struct r600_resource
*)ib
.buffer
,
1923 RADEON_PRIO_INDEX_BUFFER
));
1926 uint32_t max_size
= (ib
.buffer
->width0
- ib
.offset
) / ib
.index_size
;
1928 radeon_emit(cs
, PKT3(EG_PKT3_INDEX_BASE
, 1, 0));
1929 radeon_emit(cs
, va
);
1930 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
1932 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1933 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
1934 (struct r600_resource
*)ib
.buffer
,
1936 RADEON_PRIO_INDEX_BUFFER
));
1938 radeon_emit(cs
, PKT3(EG_PKT3_INDEX_BUFFER_SIZE
, 0, 0));
1939 radeon_emit(cs
, max_size
);
1941 radeon_emit(cs
, PKT3(EG_PKT3_DRAW_INDEX_INDIRECT
, 1, render_cond_bit
));
1942 radeon_emit(cs
, info
.indirect_offset
);
1943 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
1947 if (unlikely(info
.count_from_stream_output
)) {
1948 struct r600_so_target
*t
= (struct r600_so_target
*)info
.count_from_stream_output
;
1949 uint64_t va
= t
->buf_filled_size
->gpu_address
+ t
->buf_filled_size_offset
;
1951 radeon_set_context_reg(cs
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
, t
->stride_in_dw
);
1953 radeon_emit(cs
, PKT3(PKT3_COPY_DW
, 4, 0));
1954 radeon_emit(cs
, COPY_DW_SRC_IS_MEM
| COPY_DW_DST_IS_REG
);
1955 radeon_emit(cs
, va
& 0xFFFFFFFFUL
); /* src address lo */
1956 radeon_emit(cs
, (va
>> 32UL) & 0xFFUL
); /* src address hi */
1957 radeon_emit(cs
, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2); /* dst register */
1958 radeon_emit(cs
, 0); /* unused */
1960 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1961 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
1962 t
->buf_filled_size
, RADEON_USAGE_READ
,
1963 RADEON_PRIO_SO_FILLED_SIZE
));
1966 if (likely(!info
.indirect
)) {
1967 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_AUTO
, 1, render_cond_bit
));
1968 radeon_emit(cs
, info
.count
);
1971 radeon_emit(cs
, PKT3(EG_PKT3_DRAW_INDIRECT
, 1, render_cond_bit
));
1972 radeon_emit(cs
, info
.indirect_offset
);
1974 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
1975 (info
.count_from_stream_output
? S_0287F0_USE_OPAQUE(1) : 0));
1978 /* SMX returns CONTEXT_DONE too early workaround */
1979 if (rctx
->b
.family
== CHIP_R600
||
1980 rctx
->b
.family
== CHIP_RV610
||
1981 rctx
->b
.family
== CHIP_RV630
||
1982 rctx
->b
.family
== CHIP_RV635
) {
1983 /* if we have gs shader or streamout
1984 we need to do a wait idle after every draw */
1985 if (rctx
->gs_shader
|| r600_get_strmout_en(&rctx
->b
)) {
1986 radeon_set_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
1990 /* ES ring rolling over at EOP - workaround */
1991 if (rctx
->b
.chip_class
== R600
) {
1992 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1993 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SQ_NON_EVENT
));
1996 /* Set the depth buffer as dirty. */
1997 if (rctx
->framebuffer
.state
.zsbuf
) {
1998 struct pipe_surface
*surf
= rctx
->framebuffer
.state
.zsbuf
;
1999 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
2001 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
2003 if (rtex
->surface
.flags
& RADEON_SURF_SBUFFER
)
2004 rtex
->stencil_dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
2006 if (rctx
->framebuffer
.compressed_cb_mask
) {
2007 struct pipe_surface
*surf
;
2008 struct r600_texture
*rtex
;
2009 unsigned mask
= rctx
->framebuffer
.compressed_cb_mask
;
2012 unsigned i
= u_bit_scan(&mask
);
2013 surf
= rctx
->framebuffer
.state
.cbufs
[i
];
2014 rtex
= (struct r600_texture
*)surf
->texture
;
2016 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
2021 pipe_resource_reference(&ib
.buffer
, NULL
);
2022 rctx
->b
.num_draw_calls
++;
2025 uint32_t r600_translate_stencil_op(int s_op
)
2028 case PIPE_STENCIL_OP_KEEP
:
2029 return V_028800_STENCIL_KEEP
;
2030 case PIPE_STENCIL_OP_ZERO
:
2031 return V_028800_STENCIL_ZERO
;
2032 case PIPE_STENCIL_OP_REPLACE
:
2033 return V_028800_STENCIL_REPLACE
;
2034 case PIPE_STENCIL_OP_INCR
:
2035 return V_028800_STENCIL_INCR
;
2036 case PIPE_STENCIL_OP_DECR
:
2037 return V_028800_STENCIL_DECR
;
2038 case PIPE_STENCIL_OP_INCR_WRAP
:
2039 return V_028800_STENCIL_INCR_WRAP
;
2040 case PIPE_STENCIL_OP_DECR_WRAP
:
2041 return V_028800_STENCIL_DECR_WRAP
;
2042 case PIPE_STENCIL_OP_INVERT
:
2043 return V_028800_STENCIL_INVERT
;
2045 R600_ERR("Unknown stencil op %d", s_op
);
2052 uint32_t r600_translate_fill(uint32_t func
)
2055 case PIPE_POLYGON_MODE_FILL
:
2057 case PIPE_POLYGON_MODE_LINE
:
2059 case PIPE_POLYGON_MODE_POINT
:
2067 unsigned r600_tex_wrap(unsigned wrap
)
2071 case PIPE_TEX_WRAP_REPEAT
:
2072 return V_03C000_SQ_TEX_WRAP
;
2073 case PIPE_TEX_WRAP_CLAMP
:
2074 return V_03C000_SQ_TEX_CLAMP_HALF_BORDER
;
2075 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
2076 return V_03C000_SQ_TEX_CLAMP_LAST_TEXEL
;
2077 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
2078 return V_03C000_SQ_TEX_CLAMP_BORDER
;
2079 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
2080 return V_03C000_SQ_TEX_MIRROR
;
2081 case PIPE_TEX_WRAP_MIRROR_CLAMP
:
2082 return V_03C000_SQ_TEX_MIRROR_ONCE_HALF_BORDER
;
2083 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
:
2084 return V_03C000_SQ_TEX_MIRROR_ONCE_LAST_TEXEL
;
2085 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
:
2086 return V_03C000_SQ_TEX_MIRROR_ONCE_BORDER
;
2090 unsigned r600_tex_mipfilter(unsigned filter
)
2093 case PIPE_TEX_MIPFILTER_NEAREST
:
2094 return V_03C000_SQ_TEX_Z_FILTER_POINT
;
2095 case PIPE_TEX_MIPFILTER_LINEAR
:
2096 return V_03C000_SQ_TEX_Z_FILTER_LINEAR
;
2098 case PIPE_TEX_MIPFILTER_NONE
:
2099 return V_03C000_SQ_TEX_Z_FILTER_NONE
;
2103 unsigned r600_tex_compare(unsigned compare
)
2107 case PIPE_FUNC_NEVER
:
2108 return V_03C000_SQ_TEX_DEPTH_COMPARE_NEVER
;
2109 case PIPE_FUNC_LESS
:
2110 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESS
;
2111 case PIPE_FUNC_EQUAL
:
2112 return V_03C000_SQ_TEX_DEPTH_COMPARE_EQUAL
;
2113 case PIPE_FUNC_LEQUAL
:
2114 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESSEQUAL
;
2115 case PIPE_FUNC_GREATER
:
2116 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATER
;
2117 case PIPE_FUNC_NOTEQUAL
:
2118 return V_03C000_SQ_TEX_DEPTH_COMPARE_NOTEQUAL
;
2119 case PIPE_FUNC_GEQUAL
:
2120 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL
;
2121 case PIPE_FUNC_ALWAYS
:
2122 return V_03C000_SQ_TEX_DEPTH_COMPARE_ALWAYS
;
2126 static bool wrap_mode_uses_border_color(unsigned wrap
, bool linear_filter
)
2128 return wrap
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
2129 wrap
== PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
||
2131 (wrap
== PIPE_TEX_WRAP_CLAMP
||
2132 wrap
== PIPE_TEX_WRAP_MIRROR_CLAMP
));
2135 bool sampler_state_needs_border_color(const struct pipe_sampler_state
*state
)
2137 bool linear_filter
= state
->min_img_filter
!= PIPE_TEX_FILTER_NEAREST
||
2138 state
->mag_img_filter
!= PIPE_TEX_FILTER_NEAREST
;
2140 return (state
->border_color
.ui
[0] || state
->border_color
.ui
[1] ||
2141 state
->border_color
.ui
[2] || state
->border_color
.ui
[3]) &&
2142 (wrap_mode_uses_border_color(state
->wrap_s
, linear_filter
) ||
2143 wrap_mode_uses_border_color(state
->wrap_t
, linear_filter
) ||
2144 wrap_mode_uses_border_color(state
->wrap_r
, linear_filter
));
2147 void r600_emit_shader(struct r600_context
*rctx
, struct r600_atom
*a
)
2150 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
2151 struct r600_pipe_shader
*shader
= ((struct r600_shader_state
*)a
)->shader
;
2156 r600_emit_command_buffer(cs
, &shader
->command_buffer
);
2157 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2158 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, shader
->bo
,
2159 RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
));
2162 unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format
,
2163 const unsigned char *swizzle_view
,
2167 unsigned char swizzle
[4];
2168 unsigned result
= 0;
2169 const uint32_t tex_swizzle_shift
[4] = {
2172 const uint32_t vtx_swizzle_shift
[4] = {
2175 const uint32_t swizzle_bit
[4] = {
2178 const uint32_t *swizzle_shift
= tex_swizzle_shift
;
2181 swizzle_shift
= vtx_swizzle_shift
;
2184 util_format_compose_swizzles(swizzle_format
, swizzle_view
, swizzle
);
2186 memcpy(swizzle
, swizzle_format
, 4);
2190 for (i
= 0; i
< 4; i
++) {
2191 switch (swizzle
[i
]) {
2192 case PIPE_SWIZZLE_Y
:
2193 result
|= swizzle_bit
[1] << swizzle_shift
[i
];
2195 case PIPE_SWIZZLE_Z
:
2196 result
|= swizzle_bit
[2] << swizzle_shift
[i
];
2198 case PIPE_SWIZZLE_W
:
2199 result
|= swizzle_bit
[3] << swizzle_shift
[i
];
2201 case PIPE_SWIZZLE_0
:
2202 result
|= V_038010_SQ_SEL_0
<< swizzle_shift
[i
];
2204 case PIPE_SWIZZLE_1
:
2205 result
|= V_038010_SQ_SEL_1
<< swizzle_shift
[i
];
2207 default: /* PIPE_SWIZZLE_X */
2208 result
|= swizzle_bit
[0] << swizzle_shift
[i
];
2214 /* texture format translate */
2215 uint32_t r600_translate_texformat(struct pipe_screen
*screen
,
2216 enum pipe_format format
,
2217 const unsigned char *swizzle_view
,
2218 uint32_t *word4_p
, uint32_t *yuv_format_p
,
2219 bool do_endian_swap
)
2221 struct r600_screen
*rscreen
= (struct r600_screen
*)screen
;
2222 uint32_t result
= 0, word4
= 0, yuv_format
= 0;
2223 const struct util_format_description
*desc
;
2224 boolean uniform
= TRUE
;
2225 bool is_srgb_valid
= FALSE
;
2226 const unsigned char swizzle_xxxx
[4] = {0, 0, 0, 0};
2227 const unsigned char swizzle_yyyy
[4] = {1, 1, 1, 1};
2228 const unsigned char swizzle_xxxy
[4] = {0, 0, 0, 1};
2229 const unsigned char swizzle_zyx1
[4] = {2, 1, 0, 5};
2230 const unsigned char swizzle_zyxw
[4] = {2, 1, 0, 3};
2233 const uint32_t sign_bit
[4] = {
2234 S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED
),
2235 S_038010_FORMAT_COMP_Y(V_038010_SQ_FORMAT_COMP_SIGNED
),
2236 S_038010_FORMAT_COMP_Z(V_038010_SQ_FORMAT_COMP_SIGNED
),
2237 S_038010_FORMAT_COMP_W(V_038010_SQ_FORMAT_COMP_SIGNED
)
2240 /* Need to replace the specified texture formats in case of big-endian.
2241 * These formats are formats that have channels with number of bits
2242 * not divisible by 8.
2243 * Mesa conversion functions don't swap bits for those formats, and because
2244 * we transmit this over a serial bus to the GPU (PCIe), the
2245 * bit-endianess is important!!!
2246 * In case we have an "opposite" format, just use that for the swizzling
2247 * information. If we don't have such an "opposite" format, we need
2248 * to use a fixed swizzle info instead (see below)
2250 if (format
== PIPE_FORMAT_R4A4_UNORM
&& do_endian_swap
)
2251 format
= PIPE_FORMAT_A4R4_UNORM
;
2253 desc
= util_format_description(format
);
2255 /* Depth and stencil swizzling is handled separately. */
2256 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_ZS
) {
2257 /* Need to check for specific texture formats that don't have
2258 * an "opposite" format we can use. For those formats, we directly
2259 * specify the swizzling, which is the LE swizzling as defined in
2262 if (do_endian_swap
) {
2263 if (format
== PIPE_FORMAT_L4A4_UNORM
)
2264 word4
|= r600_get_swizzle_combined(swizzle_xxxy
, swizzle_view
, FALSE
);
2265 else if (format
== PIPE_FORMAT_B4G4R4A4_UNORM
)
2266 word4
|= r600_get_swizzle_combined(swizzle_zyxw
, swizzle_view
, FALSE
);
2267 else if (format
== PIPE_FORMAT_B4G4R4X4_UNORM
|| format
== PIPE_FORMAT_B5G6R5_UNORM
)
2268 word4
|= r600_get_swizzle_combined(swizzle_zyx1
, swizzle_view
, FALSE
);
2270 word4
|= r600_get_swizzle_combined(desc
->swizzle
, swizzle_view
, FALSE
);
2272 word4
|= r600_get_swizzle_combined(desc
->swizzle
, swizzle_view
, FALSE
);
2276 /* Colorspace (return non-RGB formats directly). */
2277 switch (desc
->colorspace
) {
2278 /* Depth stencil formats */
2279 case UTIL_FORMAT_COLORSPACE_ZS
:
2281 /* Depth sampler formats. */
2282 case PIPE_FORMAT_Z16_UNORM
:
2283 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2286 case PIPE_FORMAT_Z24X8_UNORM
:
2287 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
2288 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2291 case PIPE_FORMAT_X8Z24_UNORM
:
2292 case PIPE_FORMAT_S8_UINT_Z24_UNORM
:
2293 if (rscreen
->b
.chip_class
< EVERGREEN
)
2295 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
2298 case PIPE_FORMAT_Z32_FLOAT
:
2299 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2300 result
= FMT_32_FLOAT
;
2302 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
2303 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2304 result
= FMT_X24_8_32_FLOAT
;
2306 /* Stencil sampler formats. */
2307 case PIPE_FORMAT_S8_UINT
:
2308 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2309 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2312 case PIPE_FORMAT_X24S8_UINT
:
2313 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2314 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
2317 case PIPE_FORMAT_S8X24_UINT
:
2318 if (rscreen
->b
.chip_class
< EVERGREEN
)
2320 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2321 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2324 case PIPE_FORMAT_X32_S8X24_UINT
:
2325 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2326 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
2327 result
= FMT_X24_8_32_FLOAT
;
2333 case UTIL_FORMAT_COLORSPACE_YUV
:
2334 yuv_format
|= (1 << 30);
2336 case PIPE_FORMAT_UYVY
:
2337 case PIPE_FORMAT_YUYV
:
2341 goto out_unknown
; /* XXX */
2343 case UTIL_FORMAT_COLORSPACE_SRGB
:
2344 word4
|= S_038010_FORCE_DEGAMMA(1);
2351 if (desc
->layout
== UTIL_FORMAT_LAYOUT_RGTC
) {
2353 case PIPE_FORMAT_RGTC1_SNORM
:
2354 case PIPE_FORMAT_LATC1_SNORM
:
2355 word4
|= sign_bit
[0];
2356 case PIPE_FORMAT_RGTC1_UNORM
:
2357 case PIPE_FORMAT_LATC1_UNORM
:
2360 case PIPE_FORMAT_RGTC2_SNORM
:
2361 case PIPE_FORMAT_LATC2_SNORM
:
2362 word4
|= sign_bit
[0] | sign_bit
[1];
2363 case PIPE_FORMAT_RGTC2_UNORM
:
2364 case PIPE_FORMAT_LATC2_UNORM
:
2372 if (desc
->layout
== UTIL_FORMAT_LAYOUT_S3TC
) {
2373 if (!util_format_s3tc_enabled
) {
2378 case PIPE_FORMAT_DXT1_RGB
:
2379 case PIPE_FORMAT_DXT1_RGBA
:
2380 case PIPE_FORMAT_DXT1_SRGB
:
2381 case PIPE_FORMAT_DXT1_SRGBA
:
2383 is_srgb_valid
= TRUE
;
2385 case PIPE_FORMAT_DXT3_RGBA
:
2386 case PIPE_FORMAT_DXT3_SRGBA
:
2388 is_srgb_valid
= TRUE
;
2390 case PIPE_FORMAT_DXT5_RGBA
:
2391 case PIPE_FORMAT_DXT5_SRGBA
:
2393 is_srgb_valid
= TRUE
;
2400 if (desc
->layout
== UTIL_FORMAT_LAYOUT_BPTC
) {
2401 if (rscreen
->b
.chip_class
< EVERGREEN
)
2405 case PIPE_FORMAT_BPTC_RGBA_UNORM
:
2406 case PIPE_FORMAT_BPTC_SRGBA
:
2408 is_srgb_valid
= TRUE
;
2410 case PIPE_FORMAT_BPTC_RGB_FLOAT
:
2411 word4
|= sign_bit
[0] | sign_bit
[1] | sign_bit
[2];
2413 case PIPE_FORMAT_BPTC_RGB_UFLOAT
:
2421 if (desc
->layout
== UTIL_FORMAT_LAYOUT_SUBSAMPLED
) {
2423 case PIPE_FORMAT_R8G8_B8G8_UNORM
:
2424 case PIPE_FORMAT_G8R8_B8R8_UNORM
:
2427 case PIPE_FORMAT_G8R8_G8B8_UNORM
:
2428 case PIPE_FORMAT_R8G8_R8B8_UNORM
:
2436 if (format
== PIPE_FORMAT_R9G9B9E5_FLOAT
) {
2437 result
= FMT_5_9_9_9_SHAREDEXP
;
2439 } else if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) {
2440 result
= FMT_10_11_11_FLOAT
;
2445 for (i
= 0; i
< desc
->nr_channels
; i
++) {
2446 if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_SIGNED
) {
2447 word4
|= sign_bit
[i
];
2451 /* R8G8Bx_SNORM - XXX CxV8U8 */
2453 /* See whether the components are of the same size. */
2454 for (i
= 1; i
< desc
->nr_channels
; i
++) {
2455 uniform
= uniform
&& desc
->channel
[0].size
== desc
->channel
[i
].size
;
2458 /* Non-uniform formats. */
2460 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
&&
2461 desc
->channel
[0].pure_integer
)
2462 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2463 switch(desc
->nr_channels
) {
2465 if (desc
->channel
[0].size
== 5 &&
2466 desc
->channel
[1].size
== 6 &&
2467 desc
->channel
[2].size
== 5) {
2473 if (desc
->channel
[0].size
== 5 &&
2474 desc
->channel
[1].size
== 5 &&
2475 desc
->channel
[2].size
== 5 &&
2476 desc
->channel
[3].size
== 1) {
2477 result
= FMT_1_5_5_5
;
2480 if (desc
->channel
[0].size
== 10 &&
2481 desc
->channel
[1].size
== 10 &&
2482 desc
->channel
[2].size
== 10 &&
2483 desc
->channel
[3].size
== 2) {
2484 result
= FMT_2_10_10_10
;
2492 /* Find the first non-VOID channel. */
2493 for (i
= 0; i
< 4; i
++) {
2494 if (desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
) {
2502 /* uniform formats */
2503 switch (desc
->channel
[i
].type
) {
2504 case UTIL_FORMAT_TYPE_UNSIGNED
:
2505 case UTIL_FORMAT_TYPE_SIGNED
:
2507 if (!desc
->channel
[i
].normalized
&&
2508 desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
) {
2512 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
&&
2513 desc
->channel
[i
].pure_integer
)
2514 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2516 switch (desc
->channel
[i
].size
) {
2518 switch (desc
->nr_channels
) {
2523 result
= FMT_4_4_4_4
;
2528 switch (desc
->nr_channels
) {
2536 result
= FMT_8_8_8_8
;
2537 is_srgb_valid
= TRUE
;
2542 switch (desc
->nr_channels
) {
2550 result
= FMT_16_16_16_16
;
2555 switch (desc
->nr_channels
) {
2563 result
= FMT_32_32_32_32
;
2569 case UTIL_FORMAT_TYPE_FLOAT
:
2570 switch (desc
->channel
[i
].size
) {
2572 switch (desc
->nr_channels
) {
2574 result
= FMT_16_FLOAT
;
2577 result
= FMT_16_16_FLOAT
;
2580 result
= FMT_16_16_16_16_FLOAT
;
2585 switch (desc
->nr_channels
) {
2587 result
= FMT_32_FLOAT
;
2590 result
= FMT_32_32_FLOAT
;
2593 result
= FMT_32_32_32_32_FLOAT
;
2602 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
&& !is_srgb_valid
)
2607 *yuv_format_p
= yuv_format
;
2610 /* R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format)); */
2614 uint32_t r600_translate_colorformat(enum chip_class chip
, enum pipe_format format
,
2615 bool do_endian_swap
)
2617 const struct util_format_description
*desc
= util_format_description(format
);
2618 int channel
= util_format_get_first_non_void_channel(format
);
2621 #define HAS_SIZE(x,y,z,w) \
2622 (desc->channel[0].size == (x) && desc->channel[1].size == (y) && \
2623 desc->channel[2].size == (z) && desc->channel[3].size == (w))
2625 if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) /* isn't plain */
2626 return V_0280A0_COLOR_10_11_11_FLOAT
;
2628 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
||
2632 is_float
= desc
->channel
[channel
].type
== UTIL_FORMAT_TYPE_FLOAT
;
2634 switch (desc
->nr_channels
) {
2636 switch (desc
->channel
[0].size
) {
2638 return V_0280A0_COLOR_8
;
2641 return V_0280A0_COLOR_16_FLOAT
;
2643 return V_0280A0_COLOR_16
;
2646 return V_0280A0_COLOR_32_FLOAT
;
2648 return V_0280A0_COLOR_32
;
2652 if (desc
->channel
[0].size
== desc
->channel
[1].size
) {
2653 switch (desc
->channel
[0].size
) {
2656 return V_0280A0_COLOR_4_4
;
2658 return ~0U; /* removed on Evergreen */
2660 return V_0280A0_COLOR_8_8
;
2663 return V_0280A0_COLOR_16_16_FLOAT
;
2665 return V_0280A0_COLOR_16_16
;
2668 return V_0280A0_COLOR_32_32_FLOAT
;
2670 return V_0280A0_COLOR_32_32
;
2672 } else if (HAS_SIZE(8,24,0,0)) {
2673 return (do_endian_swap
? V_0280A0_COLOR_8_24
: V_0280A0_COLOR_24_8
);
2674 } else if (HAS_SIZE(24,8,0,0)) {
2675 return V_0280A0_COLOR_8_24
;
2679 if (HAS_SIZE(5,6,5,0)) {
2680 return V_0280A0_COLOR_5_6_5
;
2681 } else if (HAS_SIZE(32,8,24,0)) {
2682 return V_0280A0_COLOR_X24_8_32_FLOAT
;
2686 if (desc
->channel
[0].size
== desc
->channel
[1].size
&&
2687 desc
->channel
[0].size
== desc
->channel
[2].size
&&
2688 desc
->channel
[0].size
== desc
->channel
[3].size
) {
2689 switch (desc
->channel
[0].size
) {
2691 return V_0280A0_COLOR_4_4_4_4
;
2693 return V_0280A0_COLOR_8_8_8_8
;
2696 return V_0280A0_COLOR_16_16_16_16_FLOAT
;
2698 return V_0280A0_COLOR_16_16_16_16
;
2701 return V_0280A0_COLOR_32_32_32_32_FLOAT
;
2703 return V_0280A0_COLOR_32_32_32_32
;
2705 } else if (HAS_SIZE(5,5,5,1)) {
2706 return V_0280A0_COLOR_1_5_5_5
;
2707 } else if (HAS_SIZE(10,10,10,2)) {
2708 return V_0280A0_COLOR_2_10_10_10
;
2715 uint32_t r600_colorformat_endian_swap(uint32_t colorformat
, bool do_endian_swap
)
2717 if (R600_BIG_ENDIAN
) {
2718 switch(colorformat
) {
2719 /* 8-bit buffers. */
2720 case V_0280A0_COLOR_4_4
:
2721 case V_0280A0_COLOR_8
:
2724 /* 16-bit buffers. */
2725 case V_0280A0_COLOR_8_8
:
2727 * No need to do endian swaps on array formats,
2728 * as mesa<-->pipe formats conversion take into account
2733 case V_0280A0_COLOR_5_6_5
:
2734 case V_0280A0_COLOR_1_5_5_5
:
2735 case V_0280A0_COLOR_4_4_4_4
:
2736 case V_0280A0_COLOR_16
:
2737 return (do_endian_swap
? ENDIAN_8IN16
: ENDIAN_NONE
);
2739 /* 32-bit buffers. */
2740 case V_0280A0_COLOR_8_8_8_8
:
2742 * No need to do endian swaps on array formats,
2743 * as mesa<-->pipe formats conversion take into account
2748 case V_0280A0_COLOR_2_10_10_10
:
2749 case V_0280A0_COLOR_8_24
:
2750 case V_0280A0_COLOR_24_8
:
2751 case V_0280A0_COLOR_32_FLOAT
:
2752 return (do_endian_swap
? ENDIAN_8IN32
: ENDIAN_NONE
);
2754 case V_0280A0_COLOR_16_16_FLOAT
:
2755 case V_0280A0_COLOR_16_16
:
2756 return ENDIAN_8IN16
;
2758 /* 64-bit buffers. */
2759 case V_0280A0_COLOR_16_16_16_16
:
2760 case V_0280A0_COLOR_16_16_16_16_FLOAT
:
2761 return ENDIAN_8IN16
;
2763 case V_0280A0_COLOR_32_32_FLOAT
:
2764 case V_0280A0_COLOR_32_32
:
2765 case V_0280A0_COLOR_X24_8_32_FLOAT
:
2766 return ENDIAN_8IN32
;
2768 /* 128-bit buffers. */
2769 case V_0280A0_COLOR_32_32_32_32_FLOAT
:
2770 case V_0280A0_COLOR_32_32_32_32
:
2771 return ENDIAN_8IN32
;
2773 return ENDIAN_NONE
; /* Unsupported. */
2780 static void r600_invalidate_buffer(struct pipe_context
*ctx
, struct pipe_resource
*buf
)
2782 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2783 struct r600_resource
*rbuffer
= r600_resource(buf
);
2784 unsigned i
, shader
, mask
, alignment
= rbuffer
->buf
->alignment
;
2785 struct r600_pipe_sampler_view
*view
;
2787 /* Reallocate the buffer in the same pipe_resource. */
2788 r600_init_resource(&rctx
->screen
->b
, rbuffer
, rbuffer
->b
.b
.width0
,
2791 /* We changed the buffer, now we need to bind it where the old one was bound. */
2792 /* Vertex buffers. */
2793 mask
= rctx
->vertex_buffer_state
.enabled_mask
;
2795 i
= u_bit_scan(&mask
);
2796 if (rctx
->vertex_buffer_state
.vb
[i
].buffer
== &rbuffer
->b
.b
) {
2797 rctx
->vertex_buffer_state
.dirty_mask
|= 1 << i
;
2798 r600_vertex_buffers_dirty(rctx
);
2801 /* Streamout buffers. */
2802 for (i
= 0; i
< rctx
->b
.streamout
.num_targets
; i
++) {
2803 if (rctx
->b
.streamout
.targets
[i
] &&
2804 rctx
->b
.streamout
.targets
[i
]->b
.buffer
== &rbuffer
->b
.b
) {
2805 if (rctx
->b
.streamout
.begin_emitted
) {
2806 r600_emit_streamout_end(&rctx
->b
);
2808 rctx
->b
.streamout
.append_bitmask
= rctx
->b
.streamout
.enabled_mask
;
2809 r600_streamout_buffers_dirty(&rctx
->b
);
2813 /* Constant buffers. */
2814 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
2815 struct r600_constbuf_state
*state
= &rctx
->constbuf_state
[shader
];
2817 uint32_t mask
= state
->enabled_mask
;
2820 unsigned i
= u_bit_scan(&mask
);
2821 if (state
->cb
[i
].buffer
== &rbuffer
->b
.b
) {
2823 state
->dirty_mask
|= 1 << i
;
2827 r600_constant_buffers_dirty(rctx
, state
);
2831 /* Texture buffer objects - update the virtual addresses in descriptors. */
2832 LIST_FOR_EACH_ENTRY(view
, &rctx
->b
.texture_buffers
, list
) {
2833 if (view
->base
.texture
== &rbuffer
->b
.b
) {
2834 uint64_t offset
= view
->base
.u
.buf
.offset
;
2835 uint64_t va
= rbuffer
->gpu_address
+ offset
;
2837 view
->tex_resource_words
[0] = va
;
2838 view
->tex_resource_words
[2] &= C_038008_BASE_ADDRESS_HI
;
2839 view
->tex_resource_words
[2] |= S_038008_BASE_ADDRESS_HI(va
>> 32);
2842 /* Texture buffer objects - make bindings dirty if needed. */
2843 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
2844 struct r600_samplerview_state
*state
= &rctx
->samplers
[shader
].views
;
2846 uint32_t mask
= state
->enabled_mask
;
2849 unsigned i
= u_bit_scan(&mask
);
2850 if (state
->views
[i
]->base
.texture
== &rbuffer
->b
.b
) {
2852 state
->dirty_mask
|= 1 << i
;
2856 r600_sampler_views_dirty(rctx
, state
);
2861 static void r600_set_active_query_state(struct pipe_context
*ctx
, boolean enable
)
2863 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2865 /* Pipeline stat & streamout queries. */
2867 rctx
->b
.flags
&= ~R600_CONTEXT_STOP_PIPELINE_STATS
;
2868 rctx
->b
.flags
|= R600_CONTEXT_START_PIPELINE_STATS
;
2870 rctx
->b
.flags
&= ~R600_CONTEXT_START_PIPELINE_STATS
;
2871 rctx
->b
.flags
|= R600_CONTEXT_STOP_PIPELINE_STATS
;
2874 /* Occlusion queries. */
2875 if (rctx
->db_misc_state
.occlusion_queries_disabled
!= !enable
) {
2876 rctx
->db_misc_state
.occlusion_queries_disabled
= !enable
;
2877 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
2881 static void r600_set_occlusion_query_state(struct pipe_context
*ctx
, bool enable
)
2883 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2885 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
2888 static void r600_need_gfx_cs_space(struct pipe_context
*ctx
, unsigned num_dw
,
2889 bool include_draw_vbo
)
2891 r600_need_cs_space((struct r600_context
*)ctx
, num_dw
, include_draw_vbo
);
2894 /* keep this at the end of this file, please */
2895 void r600_init_common_state_functions(struct r600_context
*rctx
)
2897 rctx
->b
.b
.create_fs_state
= r600_create_ps_state
;
2898 rctx
->b
.b
.create_vs_state
= r600_create_vs_state
;
2899 rctx
->b
.b
.create_gs_state
= r600_create_gs_state
;
2900 rctx
->b
.b
.create_tcs_state
= r600_create_tcs_state
;
2901 rctx
->b
.b
.create_tes_state
= r600_create_tes_state
;
2902 rctx
->b
.b
.create_vertex_elements_state
= r600_create_vertex_fetch_shader
;
2903 rctx
->b
.b
.bind_blend_state
= r600_bind_blend_state
;
2904 rctx
->b
.b
.bind_depth_stencil_alpha_state
= r600_bind_dsa_state
;
2905 rctx
->b
.b
.bind_sampler_states
= r600_bind_sampler_states
;
2906 rctx
->b
.b
.bind_fs_state
= r600_bind_ps_state
;
2907 rctx
->b
.b
.bind_rasterizer_state
= r600_bind_rs_state
;
2908 rctx
->b
.b
.bind_vertex_elements_state
= r600_bind_vertex_elements
;
2909 rctx
->b
.b
.bind_vs_state
= r600_bind_vs_state
;
2910 rctx
->b
.b
.bind_gs_state
= r600_bind_gs_state
;
2911 rctx
->b
.b
.bind_tcs_state
= r600_bind_tcs_state
;
2912 rctx
->b
.b
.bind_tes_state
= r600_bind_tes_state
;
2913 rctx
->b
.b
.delete_blend_state
= r600_delete_blend_state
;
2914 rctx
->b
.b
.delete_depth_stencil_alpha_state
= r600_delete_dsa_state
;
2915 rctx
->b
.b
.delete_fs_state
= r600_delete_ps_state
;
2916 rctx
->b
.b
.delete_rasterizer_state
= r600_delete_rs_state
;
2917 rctx
->b
.b
.delete_sampler_state
= r600_delete_sampler_state
;
2918 rctx
->b
.b
.delete_vertex_elements_state
= r600_delete_vertex_elements
;
2919 rctx
->b
.b
.delete_vs_state
= r600_delete_vs_state
;
2920 rctx
->b
.b
.delete_gs_state
= r600_delete_gs_state
;
2921 rctx
->b
.b
.delete_tcs_state
= r600_delete_tcs_state
;
2922 rctx
->b
.b
.delete_tes_state
= r600_delete_tes_state
;
2923 rctx
->b
.b
.set_blend_color
= r600_set_blend_color
;
2924 rctx
->b
.b
.set_clip_state
= r600_set_clip_state
;
2925 rctx
->b
.b
.set_constant_buffer
= r600_set_constant_buffer
;
2926 rctx
->b
.b
.set_sample_mask
= r600_set_sample_mask
;
2927 rctx
->b
.b
.set_stencil_ref
= r600_set_pipe_stencil_ref
;
2928 rctx
->b
.b
.set_vertex_buffers
= r600_set_vertex_buffers
;
2929 rctx
->b
.b
.set_index_buffer
= r600_set_index_buffer
;
2930 rctx
->b
.b
.set_sampler_views
= r600_set_sampler_views
;
2931 rctx
->b
.b
.sampler_view_destroy
= r600_sampler_view_destroy
;
2932 rctx
->b
.b
.texture_barrier
= r600_texture_barrier
;
2933 rctx
->b
.b
.set_stream_output_targets
= r600_set_streamout_targets
;
2934 rctx
->b
.b
.set_active_query_state
= r600_set_active_query_state
;
2935 rctx
->b
.b
.draw_vbo
= r600_draw_vbo
;
2936 rctx
->b
.invalidate_buffer
= r600_invalidate_buffer
;
2937 rctx
->b
.set_occlusion_query_state
= r600_set_occlusion_query_state
;
2938 rctx
->b
.need_gfx_cs_space
= r600_need_gfx_cs_space
;