2 * Copyright 2010 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie <airlied@redhat.com>
25 * Jerome Glisse <jglisse@redhat.com>
27 #include "r600_formats.h"
28 #include "r600_shader.h"
31 #include "util/u_format_s3tc.h"
32 #include "util/u_index_modify.h"
33 #include "util/u_memory.h"
34 #include "util/u_upload_mgr.h"
35 #include "util/u_math.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "tgsi/tgsi_ureg.h"
40 void r600_init_command_buffer(struct r600_command_buffer
*cb
, unsigned num_dw
)
43 cb
->buf
= CALLOC(1, 4 * num_dw
);
44 cb
->max_num_dw
= num_dw
;
47 void r600_release_command_buffer(struct r600_command_buffer
*cb
)
52 void r600_add_atom(struct r600_context
*rctx
,
53 struct r600_atom
*atom
,
56 assert(id
< R600_NUM_ATOMS
);
57 assert(rctx
->atoms
[id
] == NULL
);
58 rctx
->atoms
[id
] = atom
;
62 void r600_init_atom(struct r600_context
*rctx
,
63 struct r600_atom
*atom
,
65 void (*emit
)(struct r600_context
*ctx
, struct r600_atom
*state
),
68 atom
->emit
= (void*)emit
;
69 atom
->num_dw
= num_dw
;
70 r600_add_atom(rctx
, atom
, id
);
73 void r600_emit_cso_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
75 r600_emit_command_buffer(rctx
->b
.gfx
.cs
, ((struct r600_cso_state
*)atom
)->cb
);
78 void r600_emit_alphatest_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
80 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
81 struct r600_alphatest_state
*a
= (struct r600_alphatest_state
*)atom
;
82 unsigned alpha_ref
= a
->sx_alpha_ref
;
84 if (rctx
->b
.chip_class
>= EVERGREEN
&& a
->cb0_export_16bpc
) {
88 radeon_set_context_reg(cs
, R_028410_SX_ALPHA_TEST_CONTROL
,
89 a
->sx_alpha_test_control
|
90 S_028410_ALPHA_TEST_BYPASS(a
->bypass
));
91 radeon_set_context_reg(cs
, R_028438_SX_ALPHA_REF
, alpha_ref
);
94 static void r600_texture_barrier(struct pipe_context
*ctx
)
96 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
98 rctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
|
99 R600_CONTEXT_FLUSH_AND_INV_CB
|
100 R600_CONTEXT_FLUSH_AND_INV
|
101 R600_CONTEXT_WAIT_3D_IDLE
;
104 static unsigned r600_conv_pipe_prim(unsigned prim
)
106 static const unsigned prim_conv
[] = {
107 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
108 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
109 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
110 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
111 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
112 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
113 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
114 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
115 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
116 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
117 [PIPE_PRIM_LINES_ADJACENCY
] = V_008958_DI_PT_LINELIST_ADJ
,
118 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_008958_DI_PT_LINESTRIP_ADJ
,
119 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_008958_DI_PT_TRILIST_ADJ
,
120 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_008958_DI_PT_TRISTRIP_ADJ
,
121 [PIPE_PRIM_PATCHES
] = V_008958_DI_PT_PATCH
,
122 [R600_PRIM_RECTANGLE_LIST
] = V_008958_DI_PT_RECTLIST
124 assert(prim
< ARRAY_SIZE(prim_conv
));
125 return prim_conv
[prim
];
128 unsigned r600_conv_prim_to_gs_out(unsigned mode
)
130 static const int prim_conv
[] = {
131 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
132 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
133 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
134 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
135 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
136 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
137 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
138 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
139 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
140 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
141 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
142 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
143 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
144 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
145 [PIPE_PRIM_PATCHES
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
146 [R600_PRIM_RECTANGLE_LIST
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
148 assert(mode
< ARRAY_SIZE(prim_conv
));
150 return prim_conv
[mode
];
153 /* common state between evergreen and r600 */
155 static void r600_bind_blend_state_internal(struct r600_context
*rctx
,
156 struct r600_blend_state
*blend
, bool blend_disable
)
158 unsigned color_control
;
159 bool update_cb
= false;
161 rctx
->alpha_to_one
= blend
->alpha_to_one
;
162 rctx
->dual_src_blend
= blend
->dual_src_blend
;
164 if (!blend_disable
) {
165 r600_set_cso_state_with_cb(rctx
, &rctx
->blend_state
, blend
, &blend
->buffer
);
166 color_control
= blend
->cb_color_control
;
168 /* Blending is disabled. */
169 r600_set_cso_state_with_cb(rctx
, &rctx
->blend_state
, blend
, &blend
->buffer_no_blend
);
170 color_control
= blend
->cb_color_control_no_blend
;
173 /* Update derived states. */
174 if (rctx
->cb_misc_state
.blend_colormask
!= blend
->cb_target_mask
) {
175 rctx
->cb_misc_state
.blend_colormask
= blend
->cb_target_mask
;
178 if (rctx
->b
.chip_class
<= R700
&&
179 rctx
->cb_misc_state
.cb_color_control
!= color_control
) {
180 rctx
->cb_misc_state
.cb_color_control
= color_control
;
183 if (rctx
->cb_misc_state
.dual_src_blend
!= blend
->dual_src_blend
) {
184 rctx
->cb_misc_state
.dual_src_blend
= blend
->dual_src_blend
;
188 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
192 static void r600_bind_blend_state(struct pipe_context
*ctx
, void *state
)
194 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
195 struct r600_blend_state
*blend
= (struct r600_blend_state
*)state
;
198 r600_set_cso_state_with_cb(rctx
, &rctx
->blend_state
, NULL
, NULL
);
202 r600_bind_blend_state_internal(rctx
, blend
, rctx
->force_blend_disable
);
205 static void r600_set_blend_color(struct pipe_context
*ctx
,
206 const struct pipe_blend_color
*state
)
208 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
210 rctx
->blend_color
.state
= *state
;
211 r600_mark_atom_dirty(rctx
, &rctx
->blend_color
.atom
);
214 void r600_emit_blend_color(struct r600_context
*rctx
, struct r600_atom
*atom
)
216 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
217 struct pipe_blend_color
*state
= &rctx
->blend_color
.state
;
219 radeon_set_context_reg_seq(cs
, R_028414_CB_BLEND_RED
, 4);
220 radeon_emit(cs
, fui(state
->color
[0])); /* R_028414_CB_BLEND_RED */
221 radeon_emit(cs
, fui(state
->color
[1])); /* R_028418_CB_BLEND_GREEN */
222 radeon_emit(cs
, fui(state
->color
[2])); /* R_02841C_CB_BLEND_BLUE */
223 radeon_emit(cs
, fui(state
->color
[3])); /* R_028420_CB_BLEND_ALPHA */
226 void r600_emit_vgt_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
228 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
229 struct r600_vgt_state
*a
= (struct r600_vgt_state
*)atom
;
231 radeon_set_context_reg(cs
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, a
->vgt_multi_prim_ib_reset_en
);
232 radeon_set_context_reg_seq(cs
, R_028408_VGT_INDX_OFFSET
, 2);
233 radeon_emit(cs
, a
->vgt_indx_offset
); /* R_028408_VGT_INDX_OFFSET */
234 radeon_emit(cs
, a
->vgt_multi_prim_ib_reset_indx
); /* R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX */
235 if (a
->last_draw_was_indirect
) {
236 a
->last_draw_was_indirect
= false;
237 radeon_set_ctl_const(cs
, R_03CFF0_SQ_VTX_BASE_VTX_LOC
, 0);
241 static void r600_set_clip_state(struct pipe_context
*ctx
,
242 const struct pipe_clip_state
*state
)
244 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
246 rctx
->clip_state
.state
= *state
;
247 r600_mark_atom_dirty(rctx
, &rctx
->clip_state
.atom
);
248 rctx
->driver_consts
[PIPE_SHADER_VERTEX
].vs_ucp_dirty
= true;
251 static void r600_set_stencil_ref(struct pipe_context
*ctx
,
252 const struct r600_stencil_ref
*state
)
254 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
256 rctx
->stencil_ref
.state
= *state
;
257 r600_mark_atom_dirty(rctx
, &rctx
->stencil_ref
.atom
);
260 void r600_emit_stencil_ref(struct r600_context
*rctx
, struct r600_atom
*atom
)
262 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
263 struct r600_stencil_ref_state
*a
= (struct r600_stencil_ref_state
*)atom
;
265 radeon_set_context_reg_seq(cs
, R_028430_DB_STENCILREFMASK
, 2);
266 radeon_emit(cs
, /* R_028430_DB_STENCILREFMASK */
267 S_028430_STENCILREF(a
->state
.ref_value
[0]) |
268 S_028430_STENCILMASK(a
->state
.valuemask
[0]) |
269 S_028430_STENCILWRITEMASK(a
->state
.writemask
[0]));
270 radeon_emit(cs
, /* R_028434_DB_STENCILREFMASK_BF */
271 S_028434_STENCILREF_BF(a
->state
.ref_value
[1]) |
272 S_028434_STENCILMASK_BF(a
->state
.valuemask
[1]) |
273 S_028434_STENCILWRITEMASK_BF(a
->state
.writemask
[1]));
276 static void r600_set_pipe_stencil_ref(struct pipe_context
*ctx
,
277 const struct pipe_stencil_ref
*state
)
279 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
280 struct r600_dsa_state
*dsa
= (struct r600_dsa_state
*)rctx
->dsa_state
.cso
;
281 struct r600_stencil_ref ref
;
283 rctx
->stencil_ref
.pipe_state
= *state
;
288 ref
.ref_value
[0] = state
->ref_value
[0];
289 ref
.ref_value
[1] = state
->ref_value
[1];
290 ref
.valuemask
[0] = dsa
->valuemask
[0];
291 ref
.valuemask
[1] = dsa
->valuemask
[1];
292 ref
.writemask
[0] = dsa
->writemask
[0];
293 ref
.writemask
[1] = dsa
->writemask
[1];
295 r600_set_stencil_ref(ctx
, &ref
);
298 static void r600_bind_dsa_state(struct pipe_context
*ctx
, void *state
)
300 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
301 struct r600_dsa_state
*dsa
= state
;
302 struct r600_stencil_ref ref
;
305 r600_set_cso_state_with_cb(rctx
, &rctx
->dsa_state
, NULL
, NULL
);
309 r600_set_cso_state_with_cb(rctx
, &rctx
->dsa_state
, dsa
, &dsa
->buffer
);
311 ref
.ref_value
[0] = rctx
->stencil_ref
.pipe_state
.ref_value
[0];
312 ref
.ref_value
[1] = rctx
->stencil_ref
.pipe_state
.ref_value
[1];
313 ref
.valuemask
[0] = dsa
->valuemask
[0];
314 ref
.valuemask
[1] = dsa
->valuemask
[1];
315 ref
.writemask
[0] = dsa
->writemask
[0];
316 ref
.writemask
[1] = dsa
->writemask
[1];
317 if (rctx
->zwritemask
!= dsa
->zwritemask
) {
318 rctx
->zwritemask
= dsa
->zwritemask
;
319 if (rctx
->b
.chip_class
>= EVERGREEN
) {
320 /* work around some issue when not writing to zbuffer
321 * we are having lockup on evergreen so do not enable
322 * hyperz when not writing zbuffer
324 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
328 r600_set_stencil_ref(ctx
, &ref
);
330 /* Update alphatest state. */
331 if (rctx
->alphatest_state
.sx_alpha_test_control
!= dsa
->sx_alpha_test_control
||
332 rctx
->alphatest_state
.sx_alpha_ref
!= dsa
->alpha_ref
) {
333 rctx
->alphatest_state
.sx_alpha_test_control
= dsa
->sx_alpha_test_control
;
334 rctx
->alphatest_state
.sx_alpha_ref
= dsa
->alpha_ref
;
335 r600_mark_atom_dirty(rctx
, &rctx
->alphatest_state
.atom
);
339 static void r600_bind_rs_state(struct pipe_context
*ctx
, void *state
)
341 struct r600_rasterizer_state
*rs
= (struct r600_rasterizer_state
*)state
;
342 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
347 rctx
->rasterizer
= rs
;
349 r600_set_cso_state_with_cb(rctx
, &rctx
->rasterizer_state
, rs
, &rs
->buffer
);
351 if (rs
->offset_enable
&&
352 (rs
->offset_units
!= rctx
->poly_offset_state
.offset_units
||
353 rs
->offset_scale
!= rctx
->poly_offset_state
.offset_scale
)) {
354 rctx
->poly_offset_state
.offset_units
= rs
->offset_units
;
355 rctx
->poly_offset_state
.offset_scale
= rs
->offset_scale
;
356 r600_mark_atom_dirty(rctx
, &rctx
->poly_offset_state
.atom
);
359 /* Update clip_misc_state. */
360 if (rctx
->clip_misc_state
.pa_cl_clip_cntl
!= rs
->pa_cl_clip_cntl
||
361 rctx
->clip_misc_state
.clip_plane_enable
!= rs
->clip_plane_enable
) {
362 rctx
->clip_misc_state
.pa_cl_clip_cntl
= rs
->pa_cl_clip_cntl
;
363 rctx
->clip_misc_state
.clip_plane_enable
= rs
->clip_plane_enable
;
364 r600_mark_atom_dirty(rctx
, &rctx
->clip_misc_state
.atom
);
367 r600_set_scissor_enable(&rctx
->b
, rs
->scissor_enable
);
369 /* Re-emit PA_SC_LINE_STIPPLE. */
370 rctx
->last_primitive_type
= -1;
373 static void r600_delete_rs_state(struct pipe_context
*ctx
, void *state
)
375 struct r600_rasterizer_state
*rs
= (struct r600_rasterizer_state
*)state
;
377 r600_release_command_buffer(&rs
->buffer
);
381 static void r600_sampler_view_destroy(struct pipe_context
*ctx
,
382 struct pipe_sampler_view
*state
)
384 struct r600_pipe_sampler_view
*view
= (struct r600_pipe_sampler_view
*)state
;
386 if (view
->tex_resource
->gpu_address
&&
387 view
->tex_resource
->b
.b
.target
== PIPE_BUFFER
)
388 LIST_DELINIT(&view
->list
);
390 pipe_resource_reference(&state
->texture
, NULL
);
394 void r600_sampler_states_dirty(struct r600_context
*rctx
,
395 struct r600_sampler_states
*state
)
397 if (state
->dirty_mask
) {
398 if (state
->dirty_mask
& state
->has_bordercolor_mask
) {
399 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
402 util_bitcount(state
->dirty_mask
& state
->has_bordercolor_mask
) * 11 +
403 util_bitcount(state
->dirty_mask
& ~state
->has_bordercolor_mask
) * 5;
404 r600_mark_atom_dirty(rctx
, &state
->atom
);
408 static void r600_bind_sampler_states(struct pipe_context
*pipe
,
411 unsigned count
, void **states
)
413 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
414 struct r600_textures_info
*dst
= &rctx
->samplers
[shader
];
415 struct r600_pipe_sampler_state
**rstates
= (struct r600_pipe_sampler_state
**)states
;
416 int seamless_cube_map
= -1;
418 /* This sets 1-bit for states with index >= count. */
419 uint32_t disable_mask
= ~((1ull << count
) - 1);
420 /* These are the new states set by this function. */
421 uint32_t new_mask
= 0;
423 assert(start
== 0); /* XXX fix below */
430 for (i
= 0; i
< count
; i
++) {
431 struct r600_pipe_sampler_state
*rstate
= rstates
[i
];
433 if (rstate
== dst
->states
.states
[i
]) {
438 if (rstate
->border_color_use
) {
439 dst
->states
.has_bordercolor_mask
|= 1 << i
;
441 dst
->states
.has_bordercolor_mask
&= ~(1 << i
);
443 seamless_cube_map
= rstate
->seamless_cube_map
;
447 disable_mask
|= 1 << i
;
451 memcpy(dst
->states
.states
, rstates
, sizeof(void*) * count
);
452 memset(dst
->states
.states
+ count
, 0, sizeof(void*) * (NUM_TEX_UNITS
- count
));
454 dst
->states
.enabled_mask
&= ~disable_mask
;
455 dst
->states
.dirty_mask
&= dst
->states
.enabled_mask
;
456 dst
->states
.enabled_mask
|= new_mask
;
457 dst
->states
.dirty_mask
|= new_mask
;
458 dst
->states
.has_bordercolor_mask
&= dst
->states
.enabled_mask
;
460 r600_sampler_states_dirty(rctx
, &dst
->states
);
462 /* Seamless cubemap state. */
463 if (rctx
->b
.chip_class
<= R700
&&
464 seamless_cube_map
!= -1 &&
465 seamless_cube_map
!= rctx
->seamless_cube_map
.enabled
) {
466 /* change in TA_CNTL_AUX need a pipeline flush */
467 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
468 rctx
->seamless_cube_map
.enabled
= seamless_cube_map
;
469 r600_mark_atom_dirty(rctx
, &rctx
->seamless_cube_map
.atom
);
473 static void r600_delete_sampler_state(struct pipe_context
*ctx
, void *state
)
478 static void r600_delete_blend_state(struct pipe_context
*ctx
, void *state
)
480 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
481 struct r600_blend_state
*blend
= (struct r600_blend_state
*)state
;
483 if (rctx
->blend_state
.cso
== state
) {
484 ctx
->bind_blend_state(ctx
, NULL
);
487 r600_release_command_buffer(&blend
->buffer
);
488 r600_release_command_buffer(&blend
->buffer_no_blend
);
492 static void r600_delete_dsa_state(struct pipe_context
*ctx
, void *state
)
494 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
495 struct r600_dsa_state
*dsa
= (struct r600_dsa_state
*)state
;
497 if (rctx
->dsa_state
.cso
== state
) {
498 ctx
->bind_depth_stencil_alpha_state(ctx
, NULL
);
501 r600_release_command_buffer(&dsa
->buffer
);
505 static void r600_bind_vertex_elements(struct pipe_context
*ctx
, void *state
)
507 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
509 r600_set_cso_state(rctx
, &rctx
->vertex_fetch_shader
, state
);
512 static void r600_delete_vertex_elements(struct pipe_context
*ctx
, void *state
)
514 struct r600_fetch_shader
*shader
= (struct r600_fetch_shader
*)state
;
515 pipe_resource_reference((struct pipe_resource
**)&shader
->buffer
, NULL
);
519 static void r600_set_index_buffer(struct pipe_context
*ctx
,
520 const struct pipe_index_buffer
*ib
)
522 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
525 pipe_resource_reference(&rctx
->index_buffer
.buffer
, ib
->buffer
);
526 memcpy(&rctx
->index_buffer
, ib
, sizeof(*ib
));
527 r600_context_add_resource_size(ctx
, ib
->buffer
);
529 pipe_resource_reference(&rctx
->index_buffer
.buffer
, NULL
);
533 void r600_vertex_buffers_dirty(struct r600_context
*rctx
)
535 if (rctx
->vertex_buffer_state
.dirty_mask
) {
536 rctx
->vertex_buffer_state
.atom
.num_dw
= (rctx
->b
.chip_class
>= EVERGREEN
? 12 : 11) *
537 util_bitcount(rctx
->vertex_buffer_state
.dirty_mask
);
538 r600_mark_atom_dirty(rctx
, &rctx
->vertex_buffer_state
.atom
);
542 static void r600_set_vertex_buffers(struct pipe_context
*ctx
,
543 unsigned start_slot
, unsigned count
,
544 const struct pipe_vertex_buffer
*input
)
546 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
547 struct r600_vertexbuf_state
*state
= &rctx
->vertex_buffer_state
;
548 struct pipe_vertex_buffer
*vb
= state
->vb
+ start_slot
;
550 uint32_t disable_mask
= 0;
551 /* These are the new buffers set by this function. */
552 uint32_t new_buffer_mask
= 0;
554 /* Set vertex buffers. */
556 for (i
= 0; i
< count
; i
++) {
557 if (memcmp(&input
[i
], &vb
[i
], sizeof(struct pipe_vertex_buffer
))) {
558 if (input
[i
].buffer
) {
559 vb
[i
].stride
= input
[i
].stride
;
560 vb
[i
].buffer_offset
= input
[i
].buffer_offset
;
561 pipe_resource_reference(&vb
[i
].buffer
, input
[i
].buffer
);
562 new_buffer_mask
|= 1 << i
;
563 r600_context_add_resource_size(ctx
, input
[i
].buffer
);
565 pipe_resource_reference(&vb
[i
].buffer
, NULL
);
566 disable_mask
|= 1 << i
;
571 for (i
= 0; i
< count
; i
++) {
572 pipe_resource_reference(&vb
[i
].buffer
, NULL
);
574 disable_mask
= ((1ull << count
) - 1);
577 disable_mask
<<= start_slot
;
578 new_buffer_mask
<<= start_slot
;
580 rctx
->vertex_buffer_state
.enabled_mask
&= ~disable_mask
;
581 rctx
->vertex_buffer_state
.dirty_mask
&= rctx
->vertex_buffer_state
.enabled_mask
;
582 rctx
->vertex_buffer_state
.enabled_mask
|= new_buffer_mask
;
583 rctx
->vertex_buffer_state
.dirty_mask
|= new_buffer_mask
;
585 r600_vertex_buffers_dirty(rctx
);
588 void r600_sampler_views_dirty(struct r600_context
*rctx
,
589 struct r600_samplerview_state
*state
)
591 if (state
->dirty_mask
) {
592 state
->atom
.num_dw
= (rctx
->b
.chip_class
>= EVERGREEN
? 14 : 13) *
593 util_bitcount(state
->dirty_mask
);
594 r600_mark_atom_dirty(rctx
, &state
->atom
);
598 static void r600_set_sampler_views(struct pipe_context
*pipe
, unsigned shader
,
599 unsigned start
, unsigned count
,
600 struct pipe_sampler_view
**views
)
602 struct r600_context
*rctx
= (struct r600_context
*) pipe
;
603 struct r600_textures_info
*dst
= &rctx
->samplers
[shader
];
604 struct r600_pipe_sampler_view
**rviews
= (struct r600_pipe_sampler_view
**)views
;
605 uint32_t dirty_sampler_states_mask
= 0;
607 /* This sets 1-bit for textures with index >= count. */
608 uint32_t disable_mask
= ~((1ull << count
) - 1);
609 /* These are the new textures set by this function. */
610 uint32_t new_mask
= 0;
612 /* Set textures with index >= count to NULL. */
613 uint32_t remaining_mask
;
615 assert(start
== 0); /* XXX fix below */
622 remaining_mask
= dst
->views
.enabled_mask
& disable_mask
;
624 while (remaining_mask
) {
625 i
= u_bit_scan(&remaining_mask
);
626 assert(dst
->views
.views
[i
]);
628 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], NULL
);
631 for (i
= 0; i
< count
; i
++) {
632 if (rviews
[i
] == dst
->views
.views
[i
]) {
637 struct r600_texture
*rtex
=
638 (struct r600_texture
*)rviews
[i
]->base
.texture
;
639 bool is_buffer
= rviews
[i
]->base
.texture
->target
== PIPE_BUFFER
;
641 if (!is_buffer
&& rtex
->is_depth
&& !rtex
->is_flushing_texture
) {
642 dst
->views
.compressed_depthtex_mask
|= 1 << i
;
644 dst
->views
.compressed_depthtex_mask
&= ~(1 << i
);
647 /* Track compressed colorbuffers. */
648 if (!is_buffer
&& rtex
->cmask
.size
) {
649 dst
->views
.compressed_colortex_mask
|= 1 << i
;
651 dst
->views
.compressed_colortex_mask
&= ~(1 << i
);
654 /* Changing from array to non-arrays textures and vice versa requires
655 * updating TEX_ARRAY_OVERRIDE in sampler states on R6xx-R7xx. */
656 if (rctx
->b
.chip_class
<= R700
&&
657 (dst
->states
.enabled_mask
& (1 << i
)) &&
658 (rviews
[i
]->base
.texture
->target
== PIPE_TEXTURE_1D_ARRAY
||
659 rviews
[i
]->base
.texture
->target
== PIPE_TEXTURE_2D_ARRAY
) != dst
->is_array_sampler
[i
]) {
660 dirty_sampler_states_mask
|= 1 << i
;
663 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], views
[i
]);
665 r600_context_add_resource_size(pipe
, views
[i
]->texture
);
667 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], NULL
);
668 disable_mask
|= 1 << i
;
672 dst
->views
.enabled_mask
&= ~disable_mask
;
673 dst
->views
.dirty_mask
&= dst
->views
.enabled_mask
;
674 dst
->views
.enabled_mask
|= new_mask
;
675 dst
->views
.dirty_mask
|= new_mask
;
676 dst
->views
.compressed_depthtex_mask
&= dst
->views
.enabled_mask
;
677 dst
->views
.compressed_colortex_mask
&= dst
->views
.enabled_mask
;
678 dst
->views
.dirty_buffer_constants
= TRUE
;
679 r600_sampler_views_dirty(rctx
, &dst
->views
);
681 if (dirty_sampler_states_mask
) {
682 dst
->states
.dirty_mask
|= dirty_sampler_states_mask
;
683 r600_sampler_states_dirty(rctx
, &dst
->states
);
687 static void r600_update_compressed_colortex_mask(struct r600_samplerview_state
*views
)
689 uint32_t mask
= views
->enabled_mask
;
692 unsigned i
= u_bit_scan(&mask
);
693 struct pipe_resource
*res
= views
->views
[i
]->base
.texture
;
695 if (res
&& res
->target
!= PIPE_BUFFER
) {
696 struct r600_texture
*rtex
= (struct r600_texture
*)res
;
698 if (rtex
->cmask
.size
) {
699 views
->compressed_colortex_mask
|= 1 << i
;
701 views
->compressed_colortex_mask
&= ~(1 << i
);
707 /* Compute the key for the hw shader variant */
708 static inline union r600_shader_key
r600_shader_selector_key(struct pipe_context
* ctx
,
709 struct r600_pipe_shader_selector
* sel
)
711 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
712 union r600_shader_key key
;
713 memset(&key
, 0, sizeof(key
));
716 case PIPE_SHADER_VERTEX
: {
717 key
.vs
.as_ls
= (rctx
->tes_shader
!= NULL
);
719 key
.vs
.as_es
= (rctx
->gs_shader
!= NULL
);
721 if (rctx
->ps_shader
->current
->shader
.gs_prim_id_input
&& !rctx
->gs_shader
) {
722 key
.vs
.as_gs_a
= true;
723 key
.vs
.prim_id_out
= rctx
->ps_shader
->current
->shader
.input
[rctx
->ps_shader
->current
->shader
.ps_prim_id_input
].spi_sid
;
727 case PIPE_SHADER_GEOMETRY
:
729 case PIPE_SHADER_FRAGMENT
: {
730 key
.ps
.color_two_side
= rctx
->rasterizer
&& rctx
->rasterizer
->two_side
;
731 key
.ps
.alpha_to_one
= rctx
->alpha_to_one
&&
732 rctx
->rasterizer
&& rctx
->rasterizer
->multisample_enable
&&
733 !rctx
->framebuffer
.cb0_is_integer
;
734 key
.ps
.nr_cbufs
= rctx
->framebuffer
.state
.nr_cbufs
;
735 /* Dual-source blending only makes sense with nr_cbufs == 1. */
736 if (key
.ps
.nr_cbufs
== 1 && rctx
->dual_src_blend
)
740 case PIPE_SHADER_TESS_EVAL
:
741 key
.tes
.as_es
= (rctx
->gs_shader
!= NULL
);
743 case PIPE_SHADER_TESS_CTRL
:
744 key
.tcs
.prim_mode
= rctx
->tes_shader
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
753 /* Select the hw shader variant depending on the current state.
754 * (*dirty) is set to 1 if current variant was changed */
755 static int r600_shader_select(struct pipe_context
*ctx
,
756 struct r600_pipe_shader_selector
* sel
,
759 union r600_shader_key key
;
760 struct r600_pipe_shader
* shader
= NULL
;
763 memset(&key
, 0, sizeof(key
));
764 key
= r600_shader_selector_key(ctx
, sel
);
766 /* Check if we don't need to change anything.
767 * This path is also used for most shaders that don't need multiple
768 * variants, it will cost just a computation of the key and this
770 if (likely(sel
->current
&& memcmp(&sel
->current
->key
, &key
, sizeof(key
)) == 0)) {
774 /* lookup if we have other variants in the list */
775 if (sel
->num_shaders
> 1) {
776 struct r600_pipe_shader
*p
= sel
->current
, *c
= p
->next_variant
;
778 while (c
&& memcmp(&c
->key
, &key
, sizeof(key
)) != 0) {
784 p
->next_variant
= c
->next_variant
;
789 if (unlikely(!shader
)) {
790 shader
= CALLOC(1, sizeof(struct r600_pipe_shader
));
791 shader
->selector
= sel
;
793 r
= r600_pipe_shader_create(ctx
, shader
, key
);
795 R600_ERR("Failed to build shader variant (type=%u) %d\n",
802 /* We don't know the value of nr_ps_max_color_exports until we built
803 * at least one variant, so we may need to recompute the key after
804 * building first variant. */
805 if (sel
->type
== PIPE_SHADER_FRAGMENT
&&
806 sel
->num_shaders
== 0) {
807 sel
->nr_ps_max_color_exports
= shader
->shader
.nr_ps_max_color_exports
;
808 key
= r600_shader_selector_key(ctx
, sel
);
811 memcpy(&shader
->key
, &key
, sizeof(key
));
818 shader
->next_variant
= sel
->current
;
819 sel
->current
= shader
;
824 static void *r600_create_shader_state(struct pipe_context
*ctx
,
825 const struct pipe_shader_state
*state
,
826 unsigned pipe_shader_type
)
828 struct r600_pipe_shader_selector
*sel
= CALLOC_STRUCT(r600_pipe_shader_selector
);
831 sel
->type
= pipe_shader_type
;
832 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
833 sel
->so
= state
->stream_output
;
834 tgsi_scan_shader(state
->tokens
, &sel
->info
);
836 switch (pipe_shader_type
) {
837 case PIPE_SHADER_GEOMETRY
:
838 sel
->gs_output_prim
=
839 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
840 sel
->gs_max_out_vertices
=
841 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
842 sel
->gs_num_invocations
=
843 sel
->info
.properties
[TGSI_PROPERTY_GS_INVOCATIONS
];
845 case PIPE_SHADER_VERTEX
:
846 case PIPE_SHADER_TESS_CTRL
:
847 sel
->lds_patch_outputs_written_mask
= 0;
848 sel
->lds_outputs_written_mask
= 0;
850 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
851 unsigned name
= sel
->info
.output_semantic_name
[i
];
852 unsigned index
= sel
->info
.output_semantic_index
[i
];
855 case TGSI_SEMANTIC_TESSINNER
:
856 case TGSI_SEMANTIC_TESSOUTER
:
857 case TGSI_SEMANTIC_PATCH
:
858 sel
->lds_patch_outputs_written_mask
|=
859 1llu << r600_get_lds_unique_index(name
, index
);
862 sel
->lds_outputs_written_mask
|=
863 1llu << r600_get_lds_unique_index(name
, index
);
874 static void *r600_create_ps_state(struct pipe_context
*ctx
,
875 const struct pipe_shader_state
*state
)
877 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_FRAGMENT
);
880 static void *r600_create_vs_state(struct pipe_context
*ctx
,
881 const struct pipe_shader_state
*state
)
883 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_VERTEX
);
886 static void *r600_create_gs_state(struct pipe_context
*ctx
,
887 const struct pipe_shader_state
*state
)
889 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_GEOMETRY
);
892 static void *r600_create_tcs_state(struct pipe_context
*ctx
,
893 const struct pipe_shader_state
*state
)
895 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_TESS_CTRL
);
898 static void *r600_create_tes_state(struct pipe_context
*ctx
,
899 const struct pipe_shader_state
*state
)
901 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_TESS_EVAL
);
904 static void r600_bind_ps_state(struct pipe_context
*ctx
, void *state
)
906 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
909 state
= rctx
->dummy_pixel_shader
;
911 rctx
->ps_shader
= (struct r600_pipe_shader_selector
*)state
;
914 static struct tgsi_shader_info
*r600_get_vs_info(struct r600_context
*rctx
)
917 return &rctx
->gs_shader
->info
;
918 else if (rctx
->tes_shader
)
919 return &rctx
->tes_shader
->info
;
920 else if (rctx
->vs_shader
)
921 return &rctx
->vs_shader
->info
;
926 static void r600_bind_vs_state(struct pipe_context
*ctx
, void *state
)
928 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
933 rctx
->vs_shader
= (struct r600_pipe_shader_selector
*)state
;
934 r600_update_vs_writes_viewport_index(&rctx
->b
, r600_get_vs_info(rctx
));
935 rctx
->b
.streamout
.stride_in_dw
= rctx
->vs_shader
->so
.stride
;
938 static void r600_bind_gs_state(struct pipe_context
*ctx
, void *state
)
940 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
942 rctx
->gs_shader
= (struct r600_pipe_shader_selector
*)state
;
943 r600_update_vs_writes_viewport_index(&rctx
->b
, r600_get_vs_info(rctx
));
947 rctx
->b
.streamout
.stride_in_dw
= rctx
->gs_shader
->so
.stride
;
950 static void r600_bind_tcs_state(struct pipe_context
*ctx
, void *state
)
952 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
954 rctx
->tcs_shader
= (struct r600_pipe_shader_selector
*)state
;
957 static void r600_bind_tes_state(struct pipe_context
*ctx
, void *state
)
959 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
961 rctx
->tes_shader
= (struct r600_pipe_shader_selector
*)state
;
962 r600_update_vs_writes_viewport_index(&rctx
->b
, r600_get_vs_info(rctx
));
966 rctx
->b
.streamout
.stride_in_dw
= rctx
->tes_shader
->so
.stride
;
969 static void r600_delete_shader_selector(struct pipe_context
*ctx
,
970 struct r600_pipe_shader_selector
*sel
)
972 struct r600_pipe_shader
*p
= sel
->current
, *c
;
975 r600_pipe_shader_destroy(ctx
, p
);
985 static void r600_delete_ps_state(struct pipe_context
*ctx
, void *state
)
987 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
988 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
990 if (rctx
->ps_shader
== sel
) {
991 rctx
->ps_shader
= NULL
;
994 r600_delete_shader_selector(ctx
, sel
);
997 static void r600_delete_vs_state(struct pipe_context
*ctx
, void *state
)
999 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1000 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1002 if (rctx
->vs_shader
== sel
) {
1003 rctx
->vs_shader
= NULL
;
1006 r600_delete_shader_selector(ctx
, sel
);
1010 static void r600_delete_gs_state(struct pipe_context
*ctx
, void *state
)
1012 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1013 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1015 if (rctx
->gs_shader
== sel
) {
1016 rctx
->gs_shader
= NULL
;
1019 r600_delete_shader_selector(ctx
, sel
);
1022 static void r600_delete_tcs_state(struct pipe_context
*ctx
, void *state
)
1024 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1025 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1027 if (rctx
->tcs_shader
== sel
) {
1028 rctx
->tcs_shader
= NULL
;
1031 r600_delete_shader_selector(ctx
, sel
);
1034 static void r600_delete_tes_state(struct pipe_context
*ctx
, void *state
)
1036 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1037 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
1039 if (rctx
->tes_shader
== sel
) {
1040 rctx
->tes_shader
= NULL
;
1043 r600_delete_shader_selector(ctx
, sel
);
1046 void r600_constant_buffers_dirty(struct r600_context
*rctx
, struct r600_constbuf_state
*state
)
1048 if (state
->dirty_mask
) {
1049 state
->atom
.num_dw
= rctx
->b
.chip_class
>= EVERGREEN
? util_bitcount(state
->dirty_mask
)*20
1050 : util_bitcount(state
->dirty_mask
)*19;
1051 r600_mark_atom_dirty(rctx
, &state
->atom
);
1055 static void r600_set_constant_buffer(struct pipe_context
*ctx
, uint shader
, uint index
,
1056 struct pipe_constant_buffer
*input
)
1058 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1059 struct r600_constbuf_state
*state
= &rctx
->constbuf_state
[shader
];
1060 struct pipe_constant_buffer
*cb
;
1063 /* Note that the state tracker can unbind constant buffers by
1064 * passing NULL here.
1066 if (unlikely(!input
|| (!input
->buffer
&& !input
->user_buffer
))) {
1067 state
->enabled_mask
&= ~(1 << index
);
1068 state
->dirty_mask
&= ~(1 << index
);
1069 pipe_resource_reference(&state
->cb
[index
].buffer
, NULL
);
1073 cb
= &state
->cb
[index
];
1074 cb
->buffer_size
= input
->buffer_size
;
1076 ptr
= input
->user_buffer
;
1079 /* Upload the user buffer. */
1080 if (R600_BIG_ENDIAN
) {
1082 unsigned i
, size
= input
->buffer_size
;
1084 if (!(tmpPtr
= malloc(size
))) {
1085 R600_ERR("Failed to allocate BE swap buffer.\n");
1089 for (i
= 0; i
< size
/ 4; ++i
) {
1090 tmpPtr
[i
] = util_cpu_to_le32(((uint32_t *)ptr
)[i
]);
1093 u_upload_data(rctx
->b
.uploader
, 0, size
, 256, tmpPtr
, &cb
->buffer_offset
, &cb
->buffer
);
1096 u_upload_data(rctx
->b
.uploader
, 0, input
->buffer_size
, 256, ptr
, &cb
->buffer_offset
, &cb
->buffer
);
1098 /* account it in gtt */
1099 rctx
->b
.gtt
+= input
->buffer_size
;
1101 /* Setup the hw buffer. */
1102 cb
->buffer_offset
= input
->buffer_offset
;
1103 pipe_resource_reference(&cb
->buffer
, input
->buffer
);
1104 r600_context_add_resource_size(ctx
, input
->buffer
);
1107 state
->enabled_mask
|= 1 << index
;
1108 state
->dirty_mask
|= 1 << index
;
1109 r600_constant_buffers_dirty(rctx
, state
);
1112 static void r600_set_sample_mask(struct pipe_context
*pipe
, unsigned sample_mask
)
1114 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
1116 if (rctx
->sample_mask
.sample_mask
== (uint16_t)sample_mask
)
1119 rctx
->sample_mask
.sample_mask
= sample_mask
;
1120 r600_mark_atom_dirty(rctx
, &rctx
->sample_mask
.atom
);
1123 static void r600_update_driver_const_buffers(struct r600_context
*rctx
)
1127 struct pipe_constant_buffer cb
;
1128 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
1129 struct r600_shader_driver_constants_info
*info
= &rctx
->driver_consts
[sh
];
1130 if (!info
->vs_ucp_dirty
&&
1131 !info
->texture_const_dirty
&&
1132 !info
->ps_sample_pos_dirty
)
1135 ptr
= info
->constants
;
1136 size
= info
->alloc_size
;
1137 if (info
->vs_ucp_dirty
) {
1138 assert(sh
== PIPE_SHADER_VERTEX
);
1140 ptr
= rctx
->clip_state
.state
.ucp
;
1141 size
= R600_UCP_SIZE
;
1143 memcpy(ptr
, rctx
->clip_state
.state
.ucp
, R600_UCP_SIZE
);
1145 info
->vs_ucp_dirty
= false;
1148 if (info
->ps_sample_pos_dirty
) {
1149 assert(sh
== PIPE_SHADER_FRAGMENT
);
1151 ptr
= rctx
->sample_positions
;
1152 size
= R600_UCP_SIZE
;
1154 memcpy(ptr
, rctx
->sample_positions
, R600_UCP_SIZE
);
1156 info
->ps_sample_pos_dirty
= false;
1159 if (info
->texture_const_dirty
) {
1162 if (sh
== PIPE_SHADER_VERTEX
)
1163 memcpy(ptr
, rctx
->clip_state
.state
.ucp
, R600_UCP_SIZE
);
1164 if (sh
== PIPE_SHADER_FRAGMENT
)
1165 memcpy(ptr
, rctx
->sample_positions
, R600_UCP_SIZE
);
1167 info
->texture_const_dirty
= false;
1170 cb
.user_buffer
= ptr
;
1171 cb
.buffer_offset
= 0;
1172 cb
.buffer_size
= size
;
1173 rctx
->b
.b
.set_constant_buffer(&rctx
->b
.b
, sh
, R600_BUFFER_INFO_CONST_BUFFER
, &cb
);
1174 pipe_resource_reference(&cb
.buffer
, NULL
);
1178 static void *r600_alloc_buf_consts(struct r600_context
*rctx
, int shader_type
,
1179 int array_size
, uint32_t *base_offset
)
1181 struct r600_shader_driver_constants_info
*info
= &rctx
->driver_consts
[shader_type
];
1182 if (array_size
+ R600_UCP_SIZE
> info
->alloc_size
) {
1183 info
->constants
= realloc(info
->constants
, array_size
+ R600_UCP_SIZE
);
1184 info
->alloc_size
= array_size
+ R600_UCP_SIZE
;
1186 memset(info
->constants
+ (R600_UCP_SIZE
/ 4), 0, array_size
);
1187 info
->texture_const_dirty
= true;
1188 *base_offset
= R600_UCP_SIZE
;
1189 return info
->constants
;
1192 * On r600/700 hw we don't have vertex fetch swizzle, though TBO
1193 * doesn't require full swizzles it does need masking and setting alpha
1194 * to one, so we setup a set of 5 constants with the masks + alpha value
1195 * then in the shader, we AND the 4 components with 0xffffffff or 0,
1196 * then OR the alpha with the value given here.
1197 * We use a 6th constant to store the txq buffer size in
1198 * we use 7th slot for number of cube layers in a cube map array.
1200 static void r600_setup_buffer_constants(struct r600_context
*rctx
, int shader_type
)
1202 struct r600_textures_info
*samplers
= &rctx
->samplers
[shader_type
];
1204 uint32_t array_size
;
1206 uint32_t *constants
;
1207 uint32_t base_offset
;
1208 if (!samplers
->views
.dirty_buffer_constants
)
1211 samplers
->views
.dirty_buffer_constants
= FALSE
;
1213 bits
= util_last_bit(samplers
->views
.enabled_mask
);
1214 array_size
= bits
* 8 * sizeof(uint32_t) * 4;
1216 constants
= r600_alloc_buf_consts(rctx
, shader_type
, array_size
, &base_offset
);
1218 for (i
= 0; i
< bits
; i
++) {
1219 if (samplers
->views
.enabled_mask
& (1 << i
)) {
1220 int offset
= (base_offset
/ 4) + i
* 8;
1221 const struct util_format_description
*desc
;
1222 desc
= util_format_description(samplers
->views
.views
[i
]->base
.format
);
1224 for (j
= 0; j
< 4; j
++)
1225 if (j
< desc
->nr_channels
)
1226 constants
[offset
+j
] = 0xffffffff;
1228 constants
[offset
+j
] = 0x0;
1229 if (desc
->nr_channels
< 4) {
1230 if (desc
->channel
[0].pure_integer
)
1231 constants
[offset
+4] = 1;
1233 constants
[offset
+4] = fui(1.0);
1235 constants
[offset
+ 4] = 0;
1237 constants
[offset
+ 5] = samplers
->views
.views
[i
]->base
.texture
->width0
/ util_format_get_blocksize(samplers
->views
.views
[i
]->base
.format
);
1238 constants
[offset
+ 6] = samplers
->views
.views
[i
]->base
.texture
->array_size
/ 6;
1244 /* On evergreen we store two values
1245 * 1. buffer size for TXQ
1246 * 2. number of cube layers in a cube map array.
1248 static void eg_setup_buffer_constants(struct r600_context
*rctx
, int shader_type
)
1250 struct r600_textures_info
*samplers
= &rctx
->samplers
[shader_type
];
1252 uint32_t array_size
;
1254 uint32_t *constants
;
1255 uint32_t base_offset
;
1256 if (!samplers
->views
.dirty_buffer_constants
)
1259 samplers
->views
.dirty_buffer_constants
= FALSE
;
1261 bits
= util_last_bit(samplers
->views
.enabled_mask
);
1262 array_size
= bits
* 2 * sizeof(uint32_t) * 4;
1264 constants
= r600_alloc_buf_consts(rctx
, shader_type
, array_size
,
1267 for (i
= 0; i
< bits
; i
++) {
1268 if (samplers
->views
.enabled_mask
& (1 << i
)) {
1269 uint32_t offset
= (base_offset
/ 4) + i
* 2;
1270 constants
[offset
] = samplers
->views
.views
[i
]->base
.texture
->width0
/ util_format_get_blocksize(samplers
->views
.views
[i
]->base
.format
);
1271 constants
[offset
+ 1] = samplers
->views
.views
[i
]->base
.texture
->array_size
/ 6;
1276 /* set sample xy locations as array of fragment shader constants */
1277 void r600_set_sample_locations_constant_buffer(struct r600_context
*rctx
)
1280 struct pipe_context
*ctx
= &rctx
->b
.b
;
1282 assert(rctx
->framebuffer
.nr_samples
< R600_UCP_SIZE
);
1283 assert(rctx
->framebuffer
.nr_samples
<= ARRAY_SIZE(rctx
->sample_positions
)/4);
1285 memset(rctx
->sample_positions
, 0, 4 * 4 * 16);
1286 for (i
= 0; i
< rctx
->framebuffer
.nr_samples
; i
++) {
1287 ctx
->get_sample_position(ctx
, rctx
->framebuffer
.nr_samples
, i
, &rctx
->sample_positions
[4*i
]);
1288 /* Also fill in center-zeroed positions used for interpolateAtSample */
1289 rctx
->sample_positions
[4*i
+ 2] = rctx
->sample_positions
[4*i
+ 0] - 0.5f
;
1290 rctx
->sample_positions
[4*i
+ 3] = rctx
->sample_positions
[4*i
+ 1] - 0.5f
;
1293 rctx
->driver_consts
[PIPE_SHADER_FRAGMENT
].ps_sample_pos_dirty
= true;
1296 static void update_shader_atom(struct pipe_context
*ctx
,
1297 struct r600_shader_state
*state
,
1298 struct r600_pipe_shader
*shader
)
1300 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1302 state
->shader
= shader
;
1304 state
->atom
.num_dw
= shader
->command_buffer
.num_dw
;
1305 r600_context_add_resource_size(ctx
, (struct pipe_resource
*)shader
->bo
);
1307 state
->atom
.num_dw
= 0;
1309 r600_mark_atom_dirty(rctx
, &state
->atom
);
1312 static void update_gs_block_state(struct r600_context
*rctx
, unsigned enable
)
1314 if (rctx
->shader_stages
.geom_enable
!= enable
) {
1315 rctx
->shader_stages
.geom_enable
= enable
;
1316 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1319 if (rctx
->gs_rings
.enable
!= enable
) {
1320 rctx
->gs_rings
.enable
= enable
;
1321 r600_mark_atom_dirty(rctx
, &rctx
->gs_rings
.atom
);
1323 if (enable
&& !rctx
->gs_rings
.esgs_ring
.buffer
) {
1324 unsigned size
= 0x1C000;
1325 rctx
->gs_rings
.esgs_ring
.buffer
=
1326 pipe_buffer_create(rctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1327 PIPE_USAGE_DEFAULT
, size
);
1328 rctx
->gs_rings
.esgs_ring
.buffer_size
= size
;
1332 rctx
->gs_rings
.gsvs_ring
.buffer
=
1333 pipe_buffer_create(rctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1334 PIPE_USAGE_DEFAULT
, size
);
1335 rctx
->gs_rings
.gsvs_ring
.buffer_size
= size
;
1339 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_GEOMETRY
,
1340 R600_GS_RING_CONST_BUFFER
, &rctx
->gs_rings
.esgs_ring
);
1341 if (rctx
->tes_shader
) {
1342 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_TESS_EVAL
,
1343 R600_GS_RING_CONST_BUFFER
, &rctx
->gs_rings
.gsvs_ring
);
1345 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_VERTEX
,
1346 R600_GS_RING_CONST_BUFFER
, &rctx
->gs_rings
.gsvs_ring
);
1349 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_GEOMETRY
,
1350 R600_GS_RING_CONST_BUFFER
, NULL
);
1351 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_VERTEX
,
1352 R600_GS_RING_CONST_BUFFER
, NULL
);
1353 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_TESS_EVAL
,
1354 R600_GS_RING_CONST_BUFFER
, NULL
);
1359 static void r600_update_clip_state(struct r600_context
*rctx
,
1360 struct r600_pipe_shader
*current
)
1362 if (current
->pa_cl_vs_out_cntl
!= rctx
->clip_misc_state
.pa_cl_vs_out_cntl
||
1363 current
->shader
.clip_dist_write
!= rctx
->clip_misc_state
.clip_dist_write
||
1364 current
->shader
.vs_position_window_space
!= rctx
->clip_misc_state
.clip_disable
||
1365 current
->shader
.vs_out_viewport
!= rctx
->clip_misc_state
.vs_out_viewport
) {
1366 rctx
->clip_misc_state
.pa_cl_vs_out_cntl
= current
->pa_cl_vs_out_cntl
;
1367 rctx
->clip_misc_state
.clip_dist_write
= current
->shader
.clip_dist_write
;
1368 rctx
->clip_misc_state
.clip_disable
= current
->shader
.vs_position_window_space
;
1369 rctx
->clip_misc_state
.vs_out_viewport
= current
->shader
.vs_out_viewport
;
1370 r600_mark_atom_dirty(rctx
, &rctx
->clip_misc_state
.atom
);
1374 static void r600_generate_fixed_func_tcs(struct r600_context
*rctx
)
1376 struct ureg_src const0
, const1
;
1377 struct ureg_dst tessouter
, tessinner
;
1378 struct ureg_program
*ureg
= ureg_create(PIPE_SHADER_TESS_CTRL
);
1381 return; /* if we get here, we're screwed */
1383 assert(!rctx
->fixed_func_tcs_shader
);
1385 ureg_DECL_constant2D(ureg
, 0, 3, R600_LDS_INFO_CONST_BUFFER
);
1386 const0
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 2),
1387 R600_LDS_INFO_CONST_BUFFER
);
1388 const1
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 3),
1389 R600_LDS_INFO_CONST_BUFFER
);
1391 tessouter
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSOUTER
, 0);
1392 tessinner
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSINNER
, 0);
1394 ureg_MOV(ureg
, tessouter
, const0
);
1395 ureg_MOV(ureg
, tessinner
, const1
);
1398 rctx
->fixed_func_tcs_shader
=
1399 ureg_create_shader_and_destroy(ureg
, &rctx
->b
.b
);
1402 #define SELECT_SHADER_OR_FAIL(x) do { \
1403 r600_shader_select(ctx, rctx->x##_shader, &x##_dirty); \
1404 if (unlikely(!rctx->x##_shader->current)) \
1408 #define UPDATE_SHADER(hw, sw) do { \
1409 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) \
1410 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1413 #define UPDATE_SHADER_CLIP(hw, sw) do { \
1414 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) { \
1415 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1416 clip_so_current = rctx->sw##_shader->current; \
1420 #define UPDATE_SHADER_GS(hw, hw2, sw) do { \
1421 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) { \
1422 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1423 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw2)], rctx->sw##_shader->current->gs_copy_shader); \
1424 clip_so_current = rctx->sw##_shader->current->gs_copy_shader; \
1428 #define SET_NULL_SHADER(hw) do { \
1429 if (rctx->hw_shader_stages[(hw)].shader) \
1430 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], NULL); \
1433 static bool r600_update_derived_state(struct r600_context
*rctx
)
1435 struct pipe_context
* ctx
= (struct pipe_context
*)rctx
;
1436 bool ps_dirty
= false, vs_dirty
= false, gs_dirty
= false;
1437 bool tcs_dirty
= false, tes_dirty
= false, fixed_func_tcs_dirty
= false;
1439 bool need_buf_const
;
1440 struct r600_pipe_shader
*clip_so_current
= NULL
;
1442 if (!rctx
->blitter
->running
) {
1446 counter
= p_atomic_read(&rctx
->screen
->b
.compressed_colortex_counter
);
1447 if (counter
!= rctx
->b
.last_compressed_colortex_counter
) {
1448 rctx
->b
.last_compressed_colortex_counter
= counter
;
1450 for (i
= 0; i
< PIPE_SHADER_TYPES
; ++i
) {
1451 r600_update_compressed_colortex_mask(&rctx
->samplers
[i
].views
);
1455 /* Decompress textures if needed. */
1456 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
1457 struct r600_samplerview_state
*views
= &rctx
->samplers
[i
].views
;
1458 if (views
->compressed_depthtex_mask
) {
1459 r600_decompress_depth_textures(rctx
, views
);
1461 if (views
->compressed_colortex_mask
) {
1462 r600_decompress_color_textures(rctx
, views
);
1467 SELECT_SHADER_OR_FAIL(ps
);
1469 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1471 update_gs_block_state(rctx
, rctx
->gs_shader
!= NULL
);
1473 if (rctx
->gs_shader
)
1474 SELECT_SHADER_OR_FAIL(gs
);
1477 if (rctx
->tcs_shader
) {
1478 SELECT_SHADER_OR_FAIL(tcs
);
1480 UPDATE_SHADER(EG_HW_STAGE_HS
, tcs
);
1481 } else if (rctx
->tes_shader
) {
1482 if (!rctx
->fixed_func_tcs_shader
) {
1483 r600_generate_fixed_func_tcs(rctx
);
1484 if (!rctx
->fixed_func_tcs_shader
)
1488 SELECT_SHADER_OR_FAIL(fixed_func_tcs
);
1490 UPDATE_SHADER(EG_HW_STAGE_HS
, fixed_func_tcs
);
1492 SET_NULL_SHADER(EG_HW_STAGE_HS
);
1494 if (rctx
->tes_shader
) {
1495 SELECT_SHADER_OR_FAIL(tes
);
1498 SELECT_SHADER_OR_FAIL(vs
);
1500 if (rctx
->gs_shader
) {
1501 if (!rctx
->shader_stages
.geom_enable
) {
1502 rctx
->shader_stages
.geom_enable
= true;
1503 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1506 /* gs_shader provides GS and VS (copy shader) */
1507 UPDATE_SHADER_GS(R600_HW_STAGE_GS
, R600_HW_STAGE_VS
, gs
);
1509 /* vs_shader is used as ES */
1511 if (rctx
->tes_shader
) {
1512 /* VS goes to LS, TES goes to ES */
1513 UPDATE_SHADER(R600_HW_STAGE_ES
, tes
);
1514 UPDATE_SHADER(EG_HW_STAGE_LS
, vs
);
1516 /* vs_shader is used as ES */
1517 UPDATE_SHADER(R600_HW_STAGE_ES
, vs
);
1518 SET_NULL_SHADER(EG_HW_STAGE_LS
);
1521 if (unlikely(rctx
->hw_shader_stages
[R600_HW_STAGE_GS
].shader
)) {
1522 SET_NULL_SHADER(R600_HW_STAGE_GS
);
1523 SET_NULL_SHADER(R600_HW_STAGE_ES
);
1524 rctx
->shader_stages
.geom_enable
= false;
1525 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1528 if (rctx
->tes_shader
) {
1529 /* if TES is loaded and no geometry, TES runs on hw VS, VS runs on hw LS */
1530 UPDATE_SHADER_CLIP(R600_HW_STAGE_VS
, tes
);
1531 UPDATE_SHADER(EG_HW_STAGE_LS
, vs
);
1533 SET_NULL_SHADER(EG_HW_STAGE_LS
);
1534 UPDATE_SHADER_CLIP(R600_HW_STAGE_VS
, vs
);
1538 /* Update clip misc state. */
1539 if (clip_so_current
) {
1540 r600_update_clip_state(rctx
, clip_so_current
);
1541 rctx
->b
.streamout
.enabled_stream_buffers_mask
= clip_so_current
->enabled_stream_buffers_mask
;
1544 if (unlikely(ps_dirty
|| rctx
->hw_shader_stages
[R600_HW_STAGE_PS
].shader
!= rctx
->ps_shader
->current
||
1545 rctx
->rasterizer
->sprite_coord_enable
!= rctx
->ps_shader
->current
->sprite_coord_enable
||
1546 rctx
->rasterizer
->flatshade
!= rctx
->ps_shader
->current
->flatshade
)) {
1548 if (rctx
->cb_misc_state
.nr_ps_color_outputs
!= rctx
->ps_shader
->current
->nr_ps_color_outputs
) {
1549 rctx
->cb_misc_state
.nr_ps_color_outputs
= rctx
->ps_shader
->current
->nr_ps_color_outputs
;
1550 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
1553 if (rctx
->b
.chip_class
<= R700
) {
1554 bool multiwrite
= rctx
->ps_shader
->current
->shader
.fs_write_all
;
1556 if (rctx
->cb_misc_state
.multiwrite
!= multiwrite
) {
1557 rctx
->cb_misc_state
.multiwrite
= multiwrite
;
1558 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
1562 if (unlikely(!ps_dirty
&& rctx
->ps_shader
&& rctx
->rasterizer
&&
1563 ((rctx
->rasterizer
->sprite_coord_enable
!= rctx
->ps_shader
->current
->sprite_coord_enable
) ||
1564 (rctx
->rasterizer
->flatshade
!= rctx
->ps_shader
->current
->flatshade
)))) {
1566 if (rctx
->b
.chip_class
>= EVERGREEN
)
1567 evergreen_update_ps_state(ctx
, rctx
->ps_shader
->current
);
1569 r600_update_ps_state(ctx
, rctx
->ps_shader
->current
);
1572 r600_mark_atom_dirty(rctx
, &rctx
->shader_stages
.atom
);
1574 UPDATE_SHADER(R600_HW_STAGE_PS
, ps
);
1576 if (rctx
->b
.chip_class
>= EVERGREEN
) {
1577 evergreen_update_db_shader_control(rctx
);
1579 r600_update_db_shader_control(rctx
);
1582 /* on R600 we stuff masks + txq info into one constant buffer */
1583 /* on evergreen we only need a txq info one */
1584 if (rctx
->ps_shader
) {
1585 need_buf_const
= rctx
->ps_shader
->current
->shader
.uses_tex_buffers
|| rctx
->ps_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1586 if (need_buf_const
) {
1587 if (rctx
->b
.chip_class
< EVERGREEN
)
1588 r600_setup_buffer_constants(rctx
, PIPE_SHADER_FRAGMENT
);
1590 eg_setup_buffer_constants(rctx
, PIPE_SHADER_FRAGMENT
);
1594 if (rctx
->vs_shader
) {
1595 need_buf_const
= rctx
->vs_shader
->current
->shader
.uses_tex_buffers
|| rctx
->vs_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1596 if (need_buf_const
) {
1597 if (rctx
->b
.chip_class
< EVERGREEN
)
1598 r600_setup_buffer_constants(rctx
, PIPE_SHADER_VERTEX
);
1600 eg_setup_buffer_constants(rctx
, PIPE_SHADER_VERTEX
);
1604 if (rctx
->gs_shader
) {
1605 need_buf_const
= rctx
->gs_shader
->current
->shader
.uses_tex_buffers
|| rctx
->gs_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1606 if (need_buf_const
) {
1607 if (rctx
->b
.chip_class
< EVERGREEN
)
1608 r600_setup_buffer_constants(rctx
, PIPE_SHADER_GEOMETRY
);
1610 eg_setup_buffer_constants(rctx
, PIPE_SHADER_GEOMETRY
);
1614 r600_update_driver_const_buffers(rctx
);
1616 if (rctx
->b
.chip_class
< EVERGREEN
&& rctx
->ps_shader
&& rctx
->vs_shader
) {
1617 if (!r600_adjust_gprs(rctx
)) {
1618 /* discard rendering */
1623 if (rctx
->b
.chip_class
== EVERGREEN
) {
1624 if (!evergreen_adjust_gprs(rctx
)) {
1625 /* discard rendering */
1630 blend_disable
= (rctx
->dual_src_blend
&&
1631 rctx
->ps_shader
->current
->nr_ps_color_outputs
< 2);
1633 if (blend_disable
!= rctx
->force_blend_disable
) {
1634 rctx
->force_blend_disable
= blend_disable
;
1635 r600_bind_blend_state_internal(rctx
,
1636 rctx
->blend_state
.cso
,
1643 void r600_emit_clip_misc_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1645 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1646 struct r600_clip_misc_state
*state
= &rctx
->clip_misc_state
;
1648 radeon_set_context_reg(cs
, R_028810_PA_CL_CLIP_CNTL
,
1649 state
->pa_cl_clip_cntl
|
1650 (state
->clip_dist_write
? 0 : state
->clip_plane_enable
& 0x3F) |
1651 S_028810_CLIP_DISABLE(state
->clip_disable
));
1652 radeon_set_context_reg(cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
1653 state
->pa_cl_vs_out_cntl
|
1654 (state
->clip_plane_enable
& state
->clip_dist_write
));
1655 /* reuse needs to be set off if we write oViewport */
1656 if (rctx
->b
.chip_class
>= EVERGREEN
)
1657 radeon_set_context_reg(cs
, R_028AB4_VGT_REUSE_OFF
,
1658 S_028AB4_REUSE_OFF(state
->vs_out_viewport
));
1661 static void r600_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*dinfo
)
1663 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1664 struct pipe_draw_info info
= *dinfo
;
1665 struct pipe_index_buffer ib
= {};
1666 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1667 bool render_cond_bit
= rctx
->b
.render_cond
&& !rctx
->b
.render_cond_force_off
;
1669 unsigned num_patches
, dirty_fb_counter
;
1671 if (!info
.indirect
&& !info
.count
&& (info
.indexed
|| !info
.count_from_stream_output
)) {
1675 if (!rctx
->vs_shader
|| !rctx
->ps_shader
) {
1680 /* make sure that the gfx ring is only one active */
1681 if (radeon_emitted(rctx
->b
.dma
.cs
, 0)) {
1682 rctx
->b
.dma
.flush(rctx
, RADEON_FLUSH_ASYNC
, NULL
);
1685 /* Re-emit the framebuffer state if needed. */
1686 dirty_fb_counter
= p_atomic_read(&rctx
->b
.screen
->dirty_fb_counter
);
1687 if (dirty_fb_counter
!= rctx
->b
.last_dirty_fb_counter
) {
1688 rctx
->b
.last_dirty_fb_counter
= dirty_fb_counter
;
1689 r600_mark_atom_dirty(rctx
, &rctx
->framebuffer
.atom
);
1692 if (!r600_update_derived_state(rctx
)) {
1693 /* useless to render because current rendering command
1700 /* Initialize the index buffer struct. */
1701 pipe_resource_reference(&ib
.buffer
, rctx
->index_buffer
.buffer
);
1702 ib
.user_buffer
= rctx
->index_buffer
.user_buffer
;
1703 ib
.index_size
= rctx
->index_buffer
.index_size
;
1704 ib
.offset
= rctx
->index_buffer
.offset
;
1705 if (!info
.indirect
) {
1706 ib
.offset
+= info
.start
* ib
.index_size
;
1709 /* Translate 8-bit indices to 16-bit. */
1710 if (unlikely(ib
.index_size
== 1)) {
1711 struct pipe_resource
*out_buffer
= NULL
;
1712 unsigned out_offset
;
1714 unsigned start
, count
;
1716 if (likely(!info
.indirect
)) {
1721 /* Have to get start/count from indirect buffer, slow path ahead... */
1722 struct r600_resource
*indirect_resource
= (struct r600_resource
*)info
.indirect
;
1723 unsigned *data
= r600_buffer_map_sync_with_rings(&rctx
->b
, indirect_resource
,
1724 PIPE_TRANSFER_READ
);
1726 data
+= info
.indirect_offset
/ sizeof(unsigned);
1727 start
= data
[2] * ib
.index_size
;
1736 u_upload_alloc(rctx
->b
.uploader
, start
, count
* 2, 256,
1737 &out_offset
, &out_buffer
, &ptr
);
1739 util_shorten_ubyte_elts_to_userptr(
1740 &rctx
->b
.b
, &ib
, 0, ib
.offset
+ start
, count
, ptr
);
1742 pipe_resource_reference(&ib
.buffer
, NULL
);
1743 ib
.user_buffer
= NULL
;
1744 ib
.buffer
= out_buffer
;
1745 ib
.offset
= out_offset
;
1749 /* Upload the index buffer.
1750 * The upload is skipped for small index counts on little-endian machines
1751 * and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
1752 * Indirect draws never use immediate indices.
1753 * Note: Instanced rendering in combination with immediate indices hangs. */
1754 if (ib
.user_buffer
&& (R600_BIG_ENDIAN
|| info
.indirect
||
1755 info
.instance_count
> 1 ||
1756 info
.count
*ib
.index_size
> 20)) {
1757 u_upload_data(rctx
->b
.uploader
, 0, info
.count
* ib
.index_size
, 256,
1758 ib
.user_buffer
, &ib
.offset
, &ib
.buffer
);
1759 ib
.user_buffer
= NULL
;
1762 info
.index_bias
= info
.start
;
1765 /* Set the index offset and primitive restart. */
1766 if (rctx
->vgt_state
.vgt_multi_prim_ib_reset_en
!= info
.primitive_restart
||
1767 rctx
->vgt_state
.vgt_multi_prim_ib_reset_indx
!= info
.restart_index
||
1768 rctx
->vgt_state
.vgt_indx_offset
!= info
.index_bias
||
1769 (rctx
->vgt_state
.last_draw_was_indirect
&& !info
.indirect
)) {
1770 rctx
->vgt_state
.vgt_multi_prim_ib_reset_en
= info
.primitive_restart
;
1771 rctx
->vgt_state
.vgt_multi_prim_ib_reset_indx
= info
.restart_index
;
1772 rctx
->vgt_state
.vgt_indx_offset
= info
.index_bias
;
1773 r600_mark_atom_dirty(rctx
, &rctx
->vgt_state
.atom
);
1776 /* Workaround for hardware deadlock on certain R600 ASICs: write into a CB register. */
1777 if (rctx
->b
.chip_class
== R600
) {
1778 rctx
->b
.flags
|= R600_CONTEXT_PS_PARTIAL_FLUSH
;
1779 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
1782 if (rctx
->b
.chip_class
>= EVERGREEN
)
1783 evergreen_setup_tess_constants(rctx
, &info
, &num_patches
);
1786 r600_need_cs_space(rctx
, ib
.user_buffer
? 5 : 0, TRUE
);
1787 r600_flush_emit(rctx
);
1789 mask
= rctx
->dirty_atoms
;
1791 r600_emit_atom(rctx
, rctx
->atoms
[u_bit_scan64(&mask
)]);
1794 if (rctx
->b
.chip_class
== CAYMAN
) {
1795 /* Copied from radeonsi. */
1796 unsigned primgroup_size
= 128; /* recommended without a GS */
1797 bool ia_switch_on_eop
= false;
1798 bool partial_vs_wave
= false;
1800 if (rctx
->gs_shader
)
1801 primgroup_size
= 64; /* recommended with a GS */
1803 if ((rctx
->rasterizer
&& rctx
->rasterizer
->pa_sc_line_stipple
) ||
1804 (rctx
->b
.screen
->debug_flags
& DBG_SWITCH_ON_EOP
)) {
1805 ia_switch_on_eop
= true;
1808 if (r600_get_strmout_en(&rctx
->b
))
1809 partial_vs_wave
= true;
1811 radeon_set_context_reg(cs
, CM_R_028AA8_IA_MULTI_VGT_PARAM
,
1812 S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
1813 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
1814 S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1));
1817 if (rctx
->b
.chip_class
>= EVERGREEN
) {
1818 uint32_t ls_hs_config
= evergreen_get_ls_hs_config(rctx
, &info
,
1821 evergreen_set_ls_hs_config(rctx
, cs
, ls_hs_config
);
1822 evergreen_set_lds_alloc(rctx
, cs
, rctx
->lds_alloc
);
1825 /* On R6xx, CULL_FRONT=1 culls all points, lines, and rectangles,
1826 * even though it should have no effect on those. */
1827 if (rctx
->b
.chip_class
== R600
&& rctx
->rasterizer
) {
1828 unsigned su_sc_mode_cntl
= rctx
->rasterizer
->pa_su_sc_mode_cntl
;
1829 unsigned prim
= info
.mode
;
1831 if (rctx
->gs_shader
) {
1832 prim
= rctx
->gs_shader
->gs_output_prim
;
1834 prim
= r600_conv_prim_to_gs_out(prim
); /* decrease the number of types to 3 */
1836 if (prim
== V_028A6C_OUTPRIM_TYPE_POINTLIST
||
1837 prim
== V_028A6C_OUTPRIM_TYPE_LINESTRIP
||
1838 info
.mode
== R600_PRIM_RECTANGLE_LIST
) {
1839 su_sc_mode_cntl
&= C_028814_CULL_FRONT
;
1841 radeon_set_context_reg(cs
, R_028814_PA_SU_SC_MODE_CNTL
, su_sc_mode_cntl
);
1844 /* Update start instance. */
1845 if (!info
.indirect
&& rctx
->last_start_instance
!= info
.start_instance
) {
1846 radeon_set_ctl_const(cs
, R_03CFF4_SQ_VTX_START_INST_LOC
, info
.start_instance
);
1847 rctx
->last_start_instance
= info
.start_instance
;
1850 /* Update the primitive type. */
1851 if (rctx
->last_primitive_type
!= info
.mode
) {
1852 unsigned ls_mask
= 0;
1854 if (info
.mode
== PIPE_PRIM_LINES
)
1856 else if (info
.mode
== PIPE_PRIM_LINE_STRIP
||
1857 info
.mode
== PIPE_PRIM_LINE_LOOP
)
1860 radeon_set_context_reg(cs
, R_028A0C_PA_SC_LINE_STIPPLE
,
1861 S_028A0C_AUTO_RESET_CNTL(ls_mask
) |
1862 (rctx
->rasterizer
? rctx
->rasterizer
->pa_sc_line_stipple
: 0));
1863 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
,
1864 r600_conv_pipe_prim(info
.mode
));
1866 rctx
->last_primitive_type
= info
.mode
;
1870 if (!info
.indirect
) {
1871 radeon_emit(cs
, PKT3(PKT3_NUM_INSTANCES
, 0, 0));
1872 radeon_emit(cs
, info
.instance_count
);
1875 if (unlikely(info
.indirect
)) {
1876 uint64_t va
= r600_resource(info
.indirect
)->gpu_address
;
1877 assert(rctx
->b
.chip_class
>= EVERGREEN
);
1879 // Invalidate so non-indirect draw calls reset this state
1880 rctx
->vgt_state
.last_draw_was_indirect
= true;
1881 rctx
->last_start_instance
= -1;
1883 radeon_emit(cs
, PKT3(EG_PKT3_SET_BASE
, 2, 0));
1884 radeon_emit(cs
, EG_DRAW_INDEX_INDIRECT_PATCH_TABLE_BASE
);
1885 radeon_emit(cs
, va
);
1886 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
1888 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1889 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
1890 (struct r600_resource
*)info
.indirect
,
1892 RADEON_PRIO_DRAW_INDIRECT
));
1896 radeon_emit(cs
, PKT3(PKT3_INDEX_TYPE
, 0, 0));
1897 radeon_emit(cs
, ib
.index_size
== 4 ?
1898 (VGT_INDEX_32
| (R600_BIG_ENDIAN
? VGT_DMA_SWAP_32_BIT
: 0)) :
1899 (VGT_INDEX_16
| (R600_BIG_ENDIAN
? VGT_DMA_SWAP_16_BIT
: 0)));
1901 if (ib
.user_buffer
) {
1902 unsigned size_bytes
= info
.count
*ib
.index_size
;
1903 unsigned size_dw
= align(size_bytes
, 4) / 4;
1904 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_IMMD
, 1 + size_dw
, render_cond_bit
));
1905 radeon_emit(cs
, info
.count
);
1906 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_IMMEDIATE
);
1907 radeon_emit_array(cs
, ib
.user_buffer
, size_dw
);
1909 uint64_t va
= r600_resource(ib
.buffer
)->gpu_address
+ ib
.offset
;
1911 if (likely(!info
.indirect
)) {
1912 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX
, 3, render_cond_bit
));
1913 radeon_emit(cs
, va
);
1914 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
1915 radeon_emit(cs
, info
.count
);
1916 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
1917 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1918 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
1919 (struct r600_resource
*)ib
.buffer
,
1921 RADEON_PRIO_INDEX_BUFFER
));
1924 uint32_t max_size
= (ib
.buffer
->width0
- ib
.offset
) / ib
.index_size
;
1926 radeon_emit(cs
, PKT3(EG_PKT3_INDEX_BASE
, 1, 0));
1927 radeon_emit(cs
, va
);
1928 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
1930 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1931 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
1932 (struct r600_resource
*)ib
.buffer
,
1934 RADEON_PRIO_INDEX_BUFFER
));
1936 radeon_emit(cs
, PKT3(EG_PKT3_INDEX_BUFFER_SIZE
, 0, 0));
1937 radeon_emit(cs
, max_size
);
1939 radeon_emit(cs
, PKT3(EG_PKT3_DRAW_INDEX_INDIRECT
, 1, render_cond_bit
));
1940 radeon_emit(cs
, info
.indirect_offset
);
1941 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
1945 if (unlikely(info
.count_from_stream_output
)) {
1946 struct r600_so_target
*t
= (struct r600_so_target
*)info
.count_from_stream_output
;
1947 uint64_t va
= t
->buf_filled_size
->gpu_address
+ t
->buf_filled_size_offset
;
1949 radeon_set_context_reg(cs
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
, t
->stride_in_dw
);
1951 radeon_emit(cs
, PKT3(PKT3_COPY_DW
, 4, 0));
1952 radeon_emit(cs
, COPY_DW_SRC_IS_MEM
| COPY_DW_DST_IS_REG
);
1953 radeon_emit(cs
, va
& 0xFFFFFFFFUL
); /* src address lo */
1954 radeon_emit(cs
, (va
>> 32UL) & 0xFFUL
); /* src address hi */
1955 radeon_emit(cs
, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2); /* dst register */
1956 radeon_emit(cs
, 0); /* unused */
1958 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1959 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
1960 t
->buf_filled_size
, RADEON_USAGE_READ
,
1961 RADEON_PRIO_SO_FILLED_SIZE
));
1964 if (likely(!info
.indirect
)) {
1965 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_AUTO
, 1, render_cond_bit
));
1966 radeon_emit(cs
, info
.count
);
1969 radeon_emit(cs
, PKT3(EG_PKT3_DRAW_INDIRECT
, 1, render_cond_bit
));
1970 radeon_emit(cs
, info
.indirect_offset
);
1972 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
1973 (info
.count_from_stream_output
? S_0287F0_USE_OPAQUE(1) : 0));
1976 /* SMX returns CONTEXT_DONE too early workaround */
1977 if (rctx
->b
.family
== CHIP_R600
||
1978 rctx
->b
.family
== CHIP_RV610
||
1979 rctx
->b
.family
== CHIP_RV630
||
1980 rctx
->b
.family
== CHIP_RV635
) {
1981 /* if we have gs shader or streamout
1982 we need to do a wait idle after every draw */
1983 if (rctx
->gs_shader
|| r600_get_strmout_en(&rctx
->b
)) {
1984 radeon_set_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
1988 /* ES ring rolling over at EOP - workaround */
1989 if (rctx
->b
.chip_class
== R600
) {
1990 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1991 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SQ_NON_EVENT
));
1994 /* Set the depth buffer as dirty. */
1995 if (rctx
->framebuffer
.state
.zsbuf
) {
1996 struct pipe_surface
*surf
= rctx
->framebuffer
.state
.zsbuf
;
1997 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
1999 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
2001 if (rtex
->surface
.flags
& RADEON_SURF_SBUFFER
)
2002 rtex
->stencil_dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
2004 if (rctx
->framebuffer
.compressed_cb_mask
) {
2005 struct pipe_surface
*surf
;
2006 struct r600_texture
*rtex
;
2007 unsigned mask
= rctx
->framebuffer
.compressed_cb_mask
;
2010 unsigned i
= u_bit_scan(&mask
);
2011 surf
= rctx
->framebuffer
.state
.cbufs
[i
];
2012 rtex
= (struct r600_texture
*)surf
->texture
;
2014 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
2019 pipe_resource_reference(&ib
.buffer
, NULL
);
2020 rctx
->b
.num_draw_calls
++;
2023 uint32_t r600_translate_stencil_op(int s_op
)
2026 case PIPE_STENCIL_OP_KEEP
:
2027 return V_028800_STENCIL_KEEP
;
2028 case PIPE_STENCIL_OP_ZERO
:
2029 return V_028800_STENCIL_ZERO
;
2030 case PIPE_STENCIL_OP_REPLACE
:
2031 return V_028800_STENCIL_REPLACE
;
2032 case PIPE_STENCIL_OP_INCR
:
2033 return V_028800_STENCIL_INCR
;
2034 case PIPE_STENCIL_OP_DECR
:
2035 return V_028800_STENCIL_DECR
;
2036 case PIPE_STENCIL_OP_INCR_WRAP
:
2037 return V_028800_STENCIL_INCR_WRAP
;
2038 case PIPE_STENCIL_OP_DECR_WRAP
:
2039 return V_028800_STENCIL_DECR_WRAP
;
2040 case PIPE_STENCIL_OP_INVERT
:
2041 return V_028800_STENCIL_INVERT
;
2043 R600_ERR("Unknown stencil op %d", s_op
);
2050 uint32_t r600_translate_fill(uint32_t func
)
2053 case PIPE_POLYGON_MODE_FILL
:
2055 case PIPE_POLYGON_MODE_LINE
:
2057 case PIPE_POLYGON_MODE_POINT
:
2065 unsigned r600_tex_wrap(unsigned wrap
)
2069 case PIPE_TEX_WRAP_REPEAT
:
2070 return V_03C000_SQ_TEX_WRAP
;
2071 case PIPE_TEX_WRAP_CLAMP
:
2072 return V_03C000_SQ_TEX_CLAMP_HALF_BORDER
;
2073 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
2074 return V_03C000_SQ_TEX_CLAMP_LAST_TEXEL
;
2075 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
2076 return V_03C000_SQ_TEX_CLAMP_BORDER
;
2077 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
2078 return V_03C000_SQ_TEX_MIRROR
;
2079 case PIPE_TEX_WRAP_MIRROR_CLAMP
:
2080 return V_03C000_SQ_TEX_MIRROR_ONCE_HALF_BORDER
;
2081 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
:
2082 return V_03C000_SQ_TEX_MIRROR_ONCE_LAST_TEXEL
;
2083 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
:
2084 return V_03C000_SQ_TEX_MIRROR_ONCE_BORDER
;
2088 unsigned r600_tex_mipfilter(unsigned filter
)
2091 case PIPE_TEX_MIPFILTER_NEAREST
:
2092 return V_03C000_SQ_TEX_Z_FILTER_POINT
;
2093 case PIPE_TEX_MIPFILTER_LINEAR
:
2094 return V_03C000_SQ_TEX_Z_FILTER_LINEAR
;
2096 case PIPE_TEX_MIPFILTER_NONE
:
2097 return V_03C000_SQ_TEX_Z_FILTER_NONE
;
2101 unsigned r600_tex_compare(unsigned compare
)
2105 case PIPE_FUNC_NEVER
:
2106 return V_03C000_SQ_TEX_DEPTH_COMPARE_NEVER
;
2107 case PIPE_FUNC_LESS
:
2108 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESS
;
2109 case PIPE_FUNC_EQUAL
:
2110 return V_03C000_SQ_TEX_DEPTH_COMPARE_EQUAL
;
2111 case PIPE_FUNC_LEQUAL
:
2112 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESSEQUAL
;
2113 case PIPE_FUNC_GREATER
:
2114 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATER
;
2115 case PIPE_FUNC_NOTEQUAL
:
2116 return V_03C000_SQ_TEX_DEPTH_COMPARE_NOTEQUAL
;
2117 case PIPE_FUNC_GEQUAL
:
2118 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL
;
2119 case PIPE_FUNC_ALWAYS
:
2120 return V_03C000_SQ_TEX_DEPTH_COMPARE_ALWAYS
;
2124 static bool wrap_mode_uses_border_color(unsigned wrap
, bool linear_filter
)
2126 return wrap
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
2127 wrap
== PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
||
2129 (wrap
== PIPE_TEX_WRAP_CLAMP
||
2130 wrap
== PIPE_TEX_WRAP_MIRROR_CLAMP
));
2133 bool sampler_state_needs_border_color(const struct pipe_sampler_state
*state
)
2135 bool linear_filter
= state
->min_img_filter
!= PIPE_TEX_FILTER_NEAREST
||
2136 state
->mag_img_filter
!= PIPE_TEX_FILTER_NEAREST
;
2138 return (state
->border_color
.ui
[0] || state
->border_color
.ui
[1] ||
2139 state
->border_color
.ui
[2] || state
->border_color
.ui
[3]) &&
2140 (wrap_mode_uses_border_color(state
->wrap_s
, linear_filter
) ||
2141 wrap_mode_uses_border_color(state
->wrap_t
, linear_filter
) ||
2142 wrap_mode_uses_border_color(state
->wrap_r
, linear_filter
));
2145 void r600_emit_shader(struct r600_context
*rctx
, struct r600_atom
*a
)
2148 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
2149 struct r600_pipe_shader
*shader
= ((struct r600_shader_state
*)a
)->shader
;
2154 r600_emit_command_buffer(cs
, &shader
->command_buffer
);
2155 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2156 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, shader
->bo
,
2157 RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
));
2160 unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format
,
2161 const unsigned char *swizzle_view
,
2165 unsigned char swizzle
[4];
2166 unsigned result
= 0;
2167 const uint32_t tex_swizzle_shift
[4] = {
2170 const uint32_t vtx_swizzle_shift
[4] = {
2173 const uint32_t swizzle_bit
[4] = {
2176 const uint32_t *swizzle_shift
= tex_swizzle_shift
;
2179 swizzle_shift
= vtx_swizzle_shift
;
2182 util_format_compose_swizzles(swizzle_format
, swizzle_view
, swizzle
);
2184 memcpy(swizzle
, swizzle_format
, 4);
2188 for (i
= 0; i
< 4; i
++) {
2189 switch (swizzle
[i
]) {
2190 case PIPE_SWIZZLE_Y
:
2191 result
|= swizzle_bit
[1] << swizzle_shift
[i
];
2193 case PIPE_SWIZZLE_Z
:
2194 result
|= swizzle_bit
[2] << swizzle_shift
[i
];
2196 case PIPE_SWIZZLE_W
:
2197 result
|= swizzle_bit
[3] << swizzle_shift
[i
];
2199 case PIPE_SWIZZLE_0
:
2200 result
|= V_038010_SQ_SEL_0
<< swizzle_shift
[i
];
2202 case PIPE_SWIZZLE_1
:
2203 result
|= V_038010_SQ_SEL_1
<< swizzle_shift
[i
];
2205 default: /* PIPE_SWIZZLE_X */
2206 result
|= swizzle_bit
[0] << swizzle_shift
[i
];
2212 /* texture format translate */
2213 uint32_t r600_translate_texformat(struct pipe_screen
*screen
,
2214 enum pipe_format format
,
2215 const unsigned char *swizzle_view
,
2216 uint32_t *word4_p
, uint32_t *yuv_format_p
,
2217 bool do_endian_swap
)
2219 struct r600_screen
*rscreen
= (struct r600_screen
*)screen
;
2220 uint32_t result
= 0, word4
= 0, yuv_format
= 0;
2221 const struct util_format_description
*desc
;
2222 boolean uniform
= TRUE
;
2223 bool is_srgb_valid
= FALSE
;
2224 const unsigned char swizzle_xxxx
[4] = {0, 0, 0, 0};
2225 const unsigned char swizzle_yyyy
[4] = {1, 1, 1, 1};
2226 const unsigned char swizzle_xxxy
[4] = {0, 0, 0, 1};
2227 const unsigned char swizzle_zyx1
[4] = {2, 1, 0, 5};
2228 const unsigned char swizzle_zyxw
[4] = {2, 1, 0, 3};
2231 const uint32_t sign_bit
[4] = {
2232 S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED
),
2233 S_038010_FORMAT_COMP_Y(V_038010_SQ_FORMAT_COMP_SIGNED
),
2234 S_038010_FORMAT_COMP_Z(V_038010_SQ_FORMAT_COMP_SIGNED
),
2235 S_038010_FORMAT_COMP_W(V_038010_SQ_FORMAT_COMP_SIGNED
)
2238 /* Need to replace the specified texture formats in case of big-endian.
2239 * These formats are formats that have channels with number of bits
2240 * not divisible by 8.
2241 * Mesa conversion functions don't swap bits for those formats, and because
2242 * we transmit this over a serial bus to the GPU (PCIe), the
2243 * bit-endianess is important!!!
2244 * In case we have an "opposite" format, just use that for the swizzling
2245 * information. If we don't have such an "opposite" format, we need
2246 * to use a fixed swizzle info instead (see below)
2248 if (format
== PIPE_FORMAT_R4A4_UNORM
&& do_endian_swap
)
2249 format
= PIPE_FORMAT_A4R4_UNORM
;
2251 desc
= util_format_description(format
);
2253 /* Depth and stencil swizzling is handled separately. */
2254 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_ZS
) {
2255 /* Need to check for specific texture formats that don't have
2256 * an "opposite" format we can use. For those formats, we directly
2257 * specify the swizzling, which is the LE swizzling as defined in
2260 if (do_endian_swap
) {
2261 if (format
== PIPE_FORMAT_L4A4_UNORM
)
2262 word4
|= r600_get_swizzle_combined(swizzle_xxxy
, swizzle_view
, FALSE
);
2263 else if (format
== PIPE_FORMAT_B4G4R4A4_UNORM
)
2264 word4
|= r600_get_swizzle_combined(swizzle_zyxw
, swizzle_view
, FALSE
);
2265 else if (format
== PIPE_FORMAT_B4G4R4X4_UNORM
|| format
== PIPE_FORMAT_B5G6R5_UNORM
)
2266 word4
|= r600_get_swizzle_combined(swizzle_zyx1
, swizzle_view
, FALSE
);
2268 word4
|= r600_get_swizzle_combined(desc
->swizzle
, swizzle_view
, FALSE
);
2270 word4
|= r600_get_swizzle_combined(desc
->swizzle
, swizzle_view
, FALSE
);
2274 /* Colorspace (return non-RGB formats directly). */
2275 switch (desc
->colorspace
) {
2276 /* Depth stencil formats */
2277 case UTIL_FORMAT_COLORSPACE_ZS
:
2279 /* Depth sampler formats. */
2280 case PIPE_FORMAT_Z16_UNORM
:
2281 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2284 case PIPE_FORMAT_Z24X8_UNORM
:
2285 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
2286 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2289 case PIPE_FORMAT_X8Z24_UNORM
:
2290 case PIPE_FORMAT_S8_UINT_Z24_UNORM
:
2291 if (rscreen
->b
.chip_class
< EVERGREEN
)
2293 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
2296 case PIPE_FORMAT_Z32_FLOAT
:
2297 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2298 result
= FMT_32_FLOAT
;
2300 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
2301 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2302 result
= FMT_X24_8_32_FLOAT
;
2304 /* Stencil sampler formats. */
2305 case PIPE_FORMAT_S8_UINT
:
2306 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2307 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2310 case PIPE_FORMAT_X24S8_UINT
:
2311 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2312 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
2315 case PIPE_FORMAT_S8X24_UINT
:
2316 if (rscreen
->b
.chip_class
< EVERGREEN
)
2318 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2319 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
2322 case PIPE_FORMAT_X32_S8X24_UINT
:
2323 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2324 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
2325 result
= FMT_X24_8_32_FLOAT
;
2331 case UTIL_FORMAT_COLORSPACE_YUV
:
2332 yuv_format
|= (1 << 30);
2334 case PIPE_FORMAT_UYVY
:
2335 case PIPE_FORMAT_YUYV
:
2339 goto out_unknown
; /* XXX */
2341 case UTIL_FORMAT_COLORSPACE_SRGB
:
2342 word4
|= S_038010_FORCE_DEGAMMA(1);
2349 if (desc
->layout
== UTIL_FORMAT_LAYOUT_RGTC
) {
2351 case PIPE_FORMAT_RGTC1_SNORM
:
2352 case PIPE_FORMAT_LATC1_SNORM
:
2353 word4
|= sign_bit
[0];
2354 case PIPE_FORMAT_RGTC1_UNORM
:
2355 case PIPE_FORMAT_LATC1_UNORM
:
2358 case PIPE_FORMAT_RGTC2_SNORM
:
2359 case PIPE_FORMAT_LATC2_SNORM
:
2360 word4
|= sign_bit
[0] | sign_bit
[1];
2361 case PIPE_FORMAT_RGTC2_UNORM
:
2362 case PIPE_FORMAT_LATC2_UNORM
:
2370 if (desc
->layout
== UTIL_FORMAT_LAYOUT_S3TC
) {
2371 if (!util_format_s3tc_enabled
) {
2376 case PIPE_FORMAT_DXT1_RGB
:
2377 case PIPE_FORMAT_DXT1_RGBA
:
2378 case PIPE_FORMAT_DXT1_SRGB
:
2379 case PIPE_FORMAT_DXT1_SRGBA
:
2381 is_srgb_valid
= TRUE
;
2383 case PIPE_FORMAT_DXT3_RGBA
:
2384 case PIPE_FORMAT_DXT3_SRGBA
:
2386 is_srgb_valid
= TRUE
;
2388 case PIPE_FORMAT_DXT5_RGBA
:
2389 case PIPE_FORMAT_DXT5_SRGBA
:
2391 is_srgb_valid
= TRUE
;
2398 if (desc
->layout
== UTIL_FORMAT_LAYOUT_BPTC
) {
2399 if (rscreen
->b
.chip_class
< EVERGREEN
)
2403 case PIPE_FORMAT_BPTC_RGBA_UNORM
:
2404 case PIPE_FORMAT_BPTC_SRGBA
:
2406 is_srgb_valid
= TRUE
;
2408 case PIPE_FORMAT_BPTC_RGB_FLOAT
:
2409 word4
|= sign_bit
[0] | sign_bit
[1] | sign_bit
[2];
2411 case PIPE_FORMAT_BPTC_RGB_UFLOAT
:
2419 if (desc
->layout
== UTIL_FORMAT_LAYOUT_SUBSAMPLED
) {
2421 case PIPE_FORMAT_R8G8_B8G8_UNORM
:
2422 case PIPE_FORMAT_G8R8_B8R8_UNORM
:
2425 case PIPE_FORMAT_G8R8_G8B8_UNORM
:
2426 case PIPE_FORMAT_R8G8_R8B8_UNORM
:
2434 if (format
== PIPE_FORMAT_R9G9B9E5_FLOAT
) {
2435 result
= FMT_5_9_9_9_SHAREDEXP
;
2437 } else if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) {
2438 result
= FMT_10_11_11_FLOAT
;
2443 for (i
= 0; i
< desc
->nr_channels
; i
++) {
2444 if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_SIGNED
) {
2445 word4
|= sign_bit
[i
];
2449 /* R8G8Bx_SNORM - XXX CxV8U8 */
2451 /* See whether the components are of the same size. */
2452 for (i
= 1; i
< desc
->nr_channels
; i
++) {
2453 uniform
= uniform
&& desc
->channel
[0].size
== desc
->channel
[i
].size
;
2456 /* Non-uniform formats. */
2458 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
&&
2459 desc
->channel
[0].pure_integer
)
2460 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2461 switch(desc
->nr_channels
) {
2463 if (desc
->channel
[0].size
== 5 &&
2464 desc
->channel
[1].size
== 6 &&
2465 desc
->channel
[2].size
== 5) {
2471 if (desc
->channel
[0].size
== 5 &&
2472 desc
->channel
[1].size
== 5 &&
2473 desc
->channel
[2].size
== 5 &&
2474 desc
->channel
[3].size
== 1) {
2475 result
= FMT_1_5_5_5
;
2478 if (desc
->channel
[0].size
== 10 &&
2479 desc
->channel
[1].size
== 10 &&
2480 desc
->channel
[2].size
== 10 &&
2481 desc
->channel
[3].size
== 2) {
2482 result
= FMT_2_10_10_10
;
2490 /* Find the first non-VOID channel. */
2491 for (i
= 0; i
< 4; i
++) {
2492 if (desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
) {
2500 /* uniform formats */
2501 switch (desc
->channel
[i
].type
) {
2502 case UTIL_FORMAT_TYPE_UNSIGNED
:
2503 case UTIL_FORMAT_TYPE_SIGNED
:
2505 if (!desc
->channel
[i
].normalized
&&
2506 desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
) {
2510 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
&&
2511 desc
->channel
[i
].pure_integer
)
2512 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2514 switch (desc
->channel
[i
].size
) {
2516 switch (desc
->nr_channels
) {
2521 result
= FMT_4_4_4_4
;
2526 switch (desc
->nr_channels
) {
2534 result
= FMT_8_8_8_8
;
2535 is_srgb_valid
= TRUE
;
2540 switch (desc
->nr_channels
) {
2548 result
= FMT_16_16_16_16
;
2553 switch (desc
->nr_channels
) {
2561 result
= FMT_32_32_32_32
;
2567 case UTIL_FORMAT_TYPE_FLOAT
:
2568 switch (desc
->channel
[i
].size
) {
2570 switch (desc
->nr_channels
) {
2572 result
= FMT_16_FLOAT
;
2575 result
= FMT_16_16_FLOAT
;
2578 result
= FMT_16_16_16_16_FLOAT
;
2583 switch (desc
->nr_channels
) {
2585 result
= FMT_32_FLOAT
;
2588 result
= FMT_32_32_FLOAT
;
2591 result
= FMT_32_32_32_32_FLOAT
;
2600 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
&& !is_srgb_valid
)
2605 *yuv_format_p
= yuv_format
;
2608 /* R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format)); */
2612 uint32_t r600_translate_colorformat(enum chip_class chip
, enum pipe_format format
,
2613 bool do_endian_swap
)
2615 const struct util_format_description
*desc
= util_format_description(format
);
2616 int channel
= util_format_get_first_non_void_channel(format
);
2619 #define HAS_SIZE(x,y,z,w) \
2620 (desc->channel[0].size == (x) && desc->channel[1].size == (y) && \
2621 desc->channel[2].size == (z) && desc->channel[3].size == (w))
2623 if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) /* isn't plain */
2624 return V_0280A0_COLOR_10_11_11_FLOAT
;
2626 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
||
2630 is_float
= desc
->channel
[channel
].type
== UTIL_FORMAT_TYPE_FLOAT
;
2632 switch (desc
->nr_channels
) {
2634 switch (desc
->channel
[0].size
) {
2636 return V_0280A0_COLOR_8
;
2639 return V_0280A0_COLOR_16_FLOAT
;
2641 return V_0280A0_COLOR_16
;
2644 return V_0280A0_COLOR_32_FLOAT
;
2646 return V_0280A0_COLOR_32
;
2650 if (desc
->channel
[0].size
== desc
->channel
[1].size
) {
2651 switch (desc
->channel
[0].size
) {
2654 return V_0280A0_COLOR_4_4
;
2656 return ~0U; /* removed on Evergreen */
2658 return V_0280A0_COLOR_8_8
;
2661 return V_0280A0_COLOR_16_16_FLOAT
;
2663 return V_0280A0_COLOR_16_16
;
2666 return V_0280A0_COLOR_32_32_FLOAT
;
2668 return V_0280A0_COLOR_32_32
;
2670 } else if (HAS_SIZE(8,24,0,0)) {
2671 return (do_endian_swap
? V_0280A0_COLOR_8_24
: V_0280A0_COLOR_24_8
);
2672 } else if (HAS_SIZE(24,8,0,0)) {
2673 return V_0280A0_COLOR_8_24
;
2677 if (HAS_SIZE(5,6,5,0)) {
2678 return V_0280A0_COLOR_5_6_5
;
2679 } else if (HAS_SIZE(32,8,24,0)) {
2680 return V_0280A0_COLOR_X24_8_32_FLOAT
;
2684 if (desc
->channel
[0].size
== desc
->channel
[1].size
&&
2685 desc
->channel
[0].size
== desc
->channel
[2].size
&&
2686 desc
->channel
[0].size
== desc
->channel
[3].size
) {
2687 switch (desc
->channel
[0].size
) {
2689 return V_0280A0_COLOR_4_4_4_4
;
2691 return V_0280A0_COLOR_8_8_8_8
;
2694 return V_0280A0_COLOR_16_16_16_16_FLOAT
;
2696 return V_0280A0_COLOR_16_16_16_16
;
2699 return V_0280A0_COLOR_32_32_32_32_FLOAT
;
2701 return V_0280A0_COLOR_32_32_32_32
;
2703 } else if (HAS_SIZE(5,5,5,1)) {
2704 return V_0280A0_COLOR_1_5_5_5
;
2705 } else if (HAS_SIZE(10,10,10,2)) {
2706 return V_0280A0_COLOR_2_10_10_10
;
2713 uint32_t r600_colorformat_endian_swap(uint32_t colorformat
, bool do_endian_swap
)
2715 if (R600_BIG_ENDIAN
) {
2716 switch(colorformat
) {
2717 /* 8-bit buffers. */
2718 case V_0280A0_COLOR_4_4
:
2719 case V_0280A0_COLOR_8
:
2722 /* 16-bit buffers. */
2723 case V_0280A0_COLOR_8_8
:
2725 * No need to do endian swaps on array formats,
2726 * as mesa<-->pipe formats conversion take into account
2731 case V_0280A0_COLOR_5_6_5
:
2732 case V_0280A0_COLOR_1_5_5_5
:
2733 case V_0280A0_COLOR_4_4_4_4
:
2734 case V_0280A0_COLOR_16
:
2735 return (do_endian_swap
? ENDIAN_8IN16
: ENDIAN_NONE
);
2737 /* 32-bit buffers. */
2738 case V_0280A0_COLOR_8_8_8_8
:
2740 * No need to do endian swaps on array formats,
2741 * as mesa<-->pipe formats conversion take into account
2746 case V_0280A0_COLOR_2_10_10_10
:
2747 case V_0280A0_COLOR_8_24
:
2748 case V_0280A0_COLOR_24_8
:
2749 case V_0280A0_COLOR_32_FLOAT
:
2750 return (do_endian_swap
? ENDIAN_8IN32
: ENDIAN_NONE
);
2752 case V_0280A0_COLOR_16_16_FLOAT
:
2753 case V_0280A0_COLOR_16_16
:
2754 return ENDIAN_8IN16
;
2756 /* 64-bit buffers. */
2757 case V_0280A0_COLOR_16_16_16_16
:
2758 case V_0280A0_COLOR_16_16_16_16_FLOAT
:
2759 return ENDIAN_8IN16
;
2761 case V_0280A0_COLOR_32_32_FLOAT
:
2762 case V_0280A0_COLOR_32_32
:
2763 case V_0280A0_COLOR_X24_8_32_FLOAT
:
2764 return ENDIAN_8IN32
;
2766 /* 128-bit buffers. */
2767 case V_0280A0_COLOR_32_32_32_32_FLOAT
:
2768 case V_0280A0_COLOR_32_32_32_32
:
2769 return ENDIAN_8IN32
;
2771 return ENDIAN_NONE
; /* Unsupported. */
2778 static void r600_invalidate_buffer(struct pipe_context
*ctx
, struct pipe_resource
*buf
)
2780 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2781 struct r600_resource
*rbuffer
= r600_resource(buf
);
2782 unsigned i
, shader
, mask
, alignment
= rbuffer
->buf
->alignment
;
2783 struct r600_pipe_sampler_view
*view
;
2785 /* Reallocate the buffer in the same pipe_resource. */
2786 r600_init_resource(&rctx
->screen
->b
, rbuffer
, rbuffer
->b
.b
.width0
,
2789 /* We changed the buffer, now we need to bind it where the old one was bound. */
2790 /* Vertex buffers. */
2791 mask
= rctx
->vertex_buffer_state
.enabled_mask
;
2793 i
= u_bit_scan(&mask
);
2794 if (rctx
->vertex_buffer_state
.vb
[i
].buffer
== &rbuffer
->b
.b
) {
2795 rctx
->vertex_buffer_state
.dirty_mask
|= 1 << i
;
2796 r600_vertex_buffers_dirty(rctx
);
2799 /* Streamout buffers. */
2800 for (i
= 0; i
< rctx
->b
.streamout
.num_targets
; i
++) {
2801 if (rctx
->b
.streamout
.targets
[i
] &&
2802 rctx
->b
.streamout
.targets
[i
]->b
.buffer
== &rbuffer
->b
.b
) {
2803 if (rctx
->b
.streamout
.begin_emitted
) {
2804 r600_emit_streamout_end(&rctx
->b
);
2806 rctx
->b
.streamout
.append_bitmask
= rctx
->b
.streamout
.enabled_mask
;
2807 r600_streamout_buffers_dirty(&rctx
->b
);
2811 /* Constant buffers. */
2812 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
2813 struct r600_constbuf_state
*state
= &rctx
->constbuf_state
[shader
];
2815 uint32_t mask
= state
->enabled_mask
;
2818 unsigned i
= u_bit_scan(&mask
);
2819 if (state
->cb
[i
].buffer
== &rbuffer
->b
.b
) {
2821 state
->dirty_mask
|= 1 << i
;
2825 r600_constant_buffers_dirty(rctx
, state
);
2829 /* Texture buffer objects - update the virtual addresses in descriptors. */
2830 LIST_FOR_EACH_ENTRY(view
, &rctx
->b
.texture_buffers
, list
) {
2831 if (view
->base
.texture
== &rbuffer
->b
.b
) {
2832 unsigned stride
= util_format_get_blocksize(view
->base
.format
);
2833 uint64_t offset
= (uint64_t)view
->base
.u
.buf
.first_element
* stride
;
2834 uint64_t va
= rbuffer
->gpu_address
+ offset
;
2836 view
->tex_resource_words
[0] = va
;
2837 view
->tex_resource_words
[2] &= C_038008_BASE_ADDRESS_HI
;
2838 view
->tex_resource_words
[2] |= S_038008_BASE_ADDRESS_HI(va
>> 32);
2841 /* Texture buffer objects - make bindings dirty if needed. */
2842 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
2843 struct r600_samplerview_state
*state
= &rctx
->samplers
[shader
].views
;
2845 uint32_t mask
= state
->enabled_mask
;
2848 unsigned i
= u_bit_scan(&mask
);
2849 if (state
->views
[i
]->base
.texture
== &rbuffer
->b
.b
) {
2851 state
->dirty_mask
|= 1 << i
;
2855 r600_sampler_views_dirty(rctx
, state
);
2860 static void r600_set_active_query_state(struct pipe_context
*ctx
, boolean enable
)
2862 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2864 /* Pipeline stat & streamout queries. */
2866 rctx
->b
.flags
&= ~R600_CONTEXT_STOP_PIPELINE_STATS
;
2867 rctx
->b
.flags
|= R600_CONTEXT_START_PIPELINE_STATS
;
2869 rctx
->b
.flags
&= ~R600_CONTEXT_START_PIPELINE_STATS
;
2870 rctx
->b
.flags
|= R600_CONTEXT_STOP_PIPELINE_STATS
;
2873 /* Occlusion queries. */
2874 if (rctx
->db_misc_state
.occlusion_queries_disabled
!= !enable
) {
2875 rctx
->db_misc_state
.occlusion_queries_disabled
= !enable
;
2876 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
2880 static void r600_set_occlusion_query_state(struct pipe_context
*ctx
, bool enable
)
2882 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2884 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
2887 static void r600_need_gfx_cs_space(struct pipe_context
*ctx
, unsigned num_dw
,
2888 bool include_draw_vbo
)
2890 r600_need_cs_space((struct r600_context
*)ctx
, num_dw
, include_draw_vbo
);
2893 /* keep this at the end of this file, please */
2894 void r600_init_common_state_functions(struct r600_context
*rctx
)
2896 rctx
->b
.b
.create_fs_state
= r600_create_ps_state
;
2897 rctx
->b
.b
.create_vs_state
= r600_create_vs_state
;
2898 rctx
->b
.b
.create_gs_state
= r600_create_gs_state
;
2899 rctx
->b
.b
.create_tcs_state
= r600_create_tcs_state
;
2900 rctx
->b
.b
.create_tes_state
= r600_create_tes_state
;
2901 rctx
->b
.b
.create_vertex_elements_state
= r600_create_vertex_fetch_shader
;
2902 rctx
->b
.b
.bind_blend_state
= r600_bind_blend_state
;
2903 rctx
->b
.b
.bind_depth_stencil_alpha_state
= r600_bind_dsa_state
;
2904 rctx
->b
.b
.bind_sampler_states
= r600_bind_sampler_states
;
2905 rctx
->b
.b
.bind_fs_state
= r600_bind_ps_state
;
2906 rctx
->b
.b
.bind_rasterizer_state
= r600_bind_rs_state
;
2907 rctx
->b
.b
.bind_vertex_elements_state
= r600_bind_vertex_elements
;
2908 rctx
->b
.b
.bind_vs_state
= r600_bind_vs_state
;
2909 rctx
->b
.b
.bind_gs_state
= r600_bind_gs_state
;
2910 rctx
->b
.b
.bind_tcs_state
= r600_bind_tcs_state
;
2911 rctx
->b
.b
.bind_tes_state
= r600_bind_tes_state
;
2912 rctx
->b
.b
.delete_blend_state
= r600_delete_blend_state
;
2913 rctx
->b
.b
.delete_depth_stencil_alpha_state
= r600_delete_dsa_state
;
2914 rctx
->b
.b
.delete_fs_state
= r600_delete_ps_state
;
2915 rctx
->b
.b
.delete_rasterizer_state
= r600_delete_rs_state
;
2916 rctx
->b
.b
.delete_sampler_state
= r600_delete_sampler_state
;
2917 rctx
->b
.b
.delete_vertex_elements_state
= r600_delete_vertex_elements
;
2918 rctx
->b
.b
.delete_vs_state
= r600_delete_vs_state
;
2919 rctx
->b
.b
.delete_gs_state
= r600_delete_gs_state
;
2920 rctx
->b
.b
.delete_tcs_state
= r600_delete_tcs_state
;
2921 rctx
->b
.b
.delete_tes_state
= r600_delete_tes_state
;
2922 rctx
->b
.b
.set_blend_color
= r600_set_blend_color
;
2923 rctx
->b
.b
.set_clip_state
= r600_set_clip_state
;
2924 rctx
->b
.b
.set_constant_buffer
= r600_set_constant_buffer
;
2925 rctx
->b
.b
.set_sample_mask
= r600_set_sample_mask
;
2926 rctx
->b
.b
.set_stencil_ref
= r600_set_pipe_stencil_ref
;
2927 rctx
->b
.b
.set_vertex_buffers
= r600_set_vertex_buffers
;
2928 rctx
->b
.b
.set_index_buffer
= r600_set_index_buffer
;
2929 rctx
->b
.b
.set_sampler_views
= r600_set_sampler_views
;
2930 rctx
->b
.b
.sampler_view_destroy
= r600_sampler_view_destroy
;
2931 rctx
->b
.b
.texture_barrier
= r600_texture_barrier
;
2932 rctx
->b
.b
.set_stream_output_targets
= r600_set_streamout_targets
;
2933 rctx
->b
.b
.set_active_query_state
= r600_set_active_query_state
;
2934 rctx
->b
.b
.draw_vbo
= r600_draw_vbo
;
2935 rctx
->b
.invalidate_buffer
= r600_invalidate_buffer
;
2936 rctx
->b
.set_occlusion_query_state
= r600_set_occlusion_query_state
;
2937 rctx
->b
.need_gfx_cs_space
= r600_need_gfx_cs_space
;