2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "r600_formats.h"
24 #include "r600_shader.h"
27 #include "pipe/p_shader_tokens.h"
28 #include "util/u_pack_color.h"
29 #include "util/u_memory.h"
30 #include "util/u_framebuffer.h"
31 #include "util/u_dual_blend.h"
33 static uint32_t r600_translate_blend_function(int blend_func
)
37 return V_028804_COMB_DST_PLUS_SRC
;
38 case PIPE_BLEND_SUBTRACT
:
39 return V_028804_COMB_SRC_MINUS_DST
;
40 case PIPE_BLEND_REVERSE_SUBTRACT
:
41 return V_028804_COMB_DST_MINUS_SRC
;
43 return V_028804_COMB_MIN_DST_SRC
;
45 return V_028804_COMB_MAX_DST_SRC
;
47 R600_ERR("Unknown blend function %d\n", blend_func
);
54 static uint32_t r600_translate_blend_factor(int blend_fact
)
57 case PIPE_BLENDFACTOR_ONE
:
58 return V_028804_BLEND_ONE
;
59 case PIPE_BLENDFACTOR_SRC_COLOR
:
60 return V_028804_BLEND_SRC_COLOR
;
61 case PIPE_BLENDFACTOR_SRC_ALPHA
:
62 return V_028804_BLEND_SRC_ALPHA
;
63 case PIPE_BLENDFACTOR_DST_ALPHA
:
64 return V_028804_BLEND_DST_ALPHA
;
65 case PIPE_BLENDFACTOR_DST_COLOR
:
66 return V_028804_BLEND_DST_COLOR
;
67 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
:
68 return V_028804_BLEND_SRC_ALPHA_SATURATE
;
69 case PIPE_BLENDFACTOR_CONST_COLOR
:
70 return V_028804_BLEND_CONST_COLOR
;
71 case PIPE_BLENDFACTOR_CONST_ALPHA
:
72 return V_028804_BLEND_CONST_ALPHA
;
73 case PIPE_BLENDFACTOR_ZERO
:
74 return V_028804_BLEND_ZERO
;
75 case PIPE_BLENDFACTOR_INV_SRC_COLOR
:
76 return V_028804_BLEND_ONE_MINUS_SRC_COLOR
;
77 case PIPE_BLENDFACTOR_INV_SRC_ALPHA
:
78 return V_028804_BLEND_ONE_MINUS_SRC_ALPHA
;
79 case PIPE_BLENDFACTOR_INV_DST_ALPHA
:
80 return V_028804_BLEND_ONE_MINUS_DST_ALPHA
;
81 case PIPE_BLENDFACTOR_INV_DST_COLOR
:
82 return V_028804_BLEND_ONE_MINUS_DST_COLOR
;
83 case PIPE_BLENDFACTOR_INV_CONST_COLOR
:
84 return V_028804_BLEND_ONE_MINUS_CONST_COLOR
;
85 case PIPE_BLENDFACTOR_INV_CONST_ALPHA
:
86 return V_028804_BLEND_ONE_MINUS_CONST_ALPHA
;
87 case PIPE_BLENDFACTOR_SRC1_COLOR
:
88 return V_028804_BLEND_SRC1_COLOR
;
89 case PIPE_BLENDFACTOR_SRC1_ALPHA
:
90 return V_028804_BLEND_SRC1_ALPHA
;
91 case PIPE_BLENDFACTOR_INV_SRC1_COLOR
:
92 return V_028804_BLEND_INV_SRC1_COLOR
;
93 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA
:
94 return V_028804_BLEND_INV_SRC1_ALPHA
;
96 R600_ERR("Bad blend factor %d not supported!\n", blend_fact
);
103 static unsigned r600_tex_dim(unsigned dim
, unsigned nr_samples
)
107 case PIPE_TEXTURE_1D
:
108 return V_038000_SQ_TEX_DIM_1D
;
109 case PIPE_TEXTURE_1D_ARRAY
:
110 return V_038000_SQ_TEX_DIM_1D_ARRAY
;
111 case PIPE_TEXTURE_2D
:
112 case PIPE_TEXTURE_RECT
:
113 return nr_samples
> 1 ? V_038000_SQ_TEX_DIM_2D_MSAA
:
114 V_038000_SQ_TEX_DIM_2D
;
115 case PIPE_TEXTURE_2D_ARRAY
:
116 return nr_samples
> 1 ? V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA
:
117 V_038000_SQ_TEX_DIM_2D_ARRAY
;
118 case PIPE_TEXTURE_3D
:
119 return V_038000_SQ_TEX_DIM_3D
;
120 case PIPE_TEXTURE_CUBE
:
121 case PIPE_TEXTURE_CUBE_ARRAY
:
122 return V_038000_SQ_TEX_DIM_CUBEMAP
;
126 static uint32_t r600_translate_dbformat(enum pipe_format format
)
129 case PIPE_FORMAT_Z16_UNORM
:
130 return V_028010_DEPTH_16
;
131 case PIPE_FORMAT_Z24X8_UNORM
:
132 return V_028010_DEPTH_X8_24
;
133 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
134 return V_028010_DEPTH_8_24
;
135 case PIPE_FORMAT_Z32_FLOAT
:
136 return V_028010_DEPTH_32_FLOAT
;
137 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
138 return V_028010_DEPTH_X24_8_32_FLOAT
;
144 static bool r600_is_sampler_format_supported(struct pipe_screen
*screen
, enum pipe_format format
)
146 return r600_translate_texformat(screen
, format
, NULL
, NULL
, NULL
,
150 static bool r600_is_colorbuffer_format_supported(enum chip_class chip
, enum pipe_format format
)
152 return r600_translate_colorformat(chip
, format
, FALSE
) != ~0U &&
153 r600_translate_colorswap(format
, FALSE
) != ~0U;
156 static bool r600_is_zs_format_supported(enum pipe_format format
)
158 return r600_translate_dbformat(format
) != ~0U;
161 boolean
r600_is_format_supported(struct pipe_screen
*screen
,
162 enum pipe_format format
,
163 enum pipe_texture_target target
,
164 unsigned sample_count
,
167 struct r600_screen
*rscreen
= (struct r600_screen
*)screen
;
170 if (target
>= PIPE_MAX_TEXTURE_TYPES
) {
171 R600_ERR("r600: unsupported texture type %d\n", target
);
175 if (!util_format_is_supported(format
, usage
))
178 if (sample_count
> 1) {
179 if (!rscreen
->has_msaa
)
182 /* R11G11B10 is broken on R6xx. */
183 if (rscreen
->b
.chip_class
== R600
&&
184 format
== PIPE_FORMAT_R11G11B10_FLOAT
)
187 /* MSAA integer colorbuffers hang. */
188 if (util_format_is_pure_integer(format
) &&
189 !util_format_is_depth_or_stencil(format
))
192 switch (sample_count
) {
202 if (usage
& PIPE_BIND_SAMPLER_VIEW
) {
203 if (target
== PIPE_BUFFER
) {
204 if (r600_is_vertex_format_supported(format
))
205 retval
|= PIPE_BIND_SAMPLER_VIEW
;
207 if (r600_is_sampler_format_supported(screen
, format
))
208 retval
|= PIPE_BIND_SAMPLER_VIEW
;
212 if ((usage
& (PIPE_BIND_RENDER_TARGET
|
213 PIPE_BIND_DISPLAY_TARGET
|
216 PIPE_BIND_BLENDABLE
)) &&
217 r600_is_colorbuffer_format_supported(rscreen
->b
.chip_class
, format
)) {
219 (PIPE_BIND_RENDER_TARGET
|
220 PIPE_BIND_DISPLAY_TARGET
|
223 if (!util_format_is_pure_integer(format
) &&
224 !util_format_is_depth_or_stencil(format
))
225 retval
|= usage
& PIPE_BIND_BLENDABLE
;
228 if ((usage
& PIPE_BIND_DEPTH_STENCIL
) &&
229 r600_is_zs_format_supported(format
)) {
230 retval
|= PIPE_BIND_DEPTH_STENCIL
;
233 if ((usage
& PIPE_BIND_VERTEX_BUFFER
) &&
234 r600_is_vertex_format_supported(format
)) {
235 retval
|= PIPE_BIND_VERTEX_BUFFER
;
238 if ((usage
& PIPE_BIND_LINEAR
) &&
239 !util_format_is_compressed(format
) &&
240 !(usage
& PIPE_BIND_DEPTH_STENCIL
))
241 retval
|= PIPE_BIND_LINEAR
;
243 return retval
== usage
;
246 static void r600_emit_polygon_offset(struct r600_context
*rctx
, struct r600_atom
*a
)
248 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
249 struct r600_poly_offset_state
*state
= (struct r600_poly_offset_state
*)a
;
250 float offset_units
= state
->offset_units
;
251 float offset_scale
= state
->offset_scale
;
252 uint32_t pa_su_poly_offset_db_fmt_cntl
= 0;
254 if (!state
->offset_units_unscaled
) {
255 switch (state
->zs_format
) {
256 case PIPE_FORMAT_Z24X8_UNORM
:
257 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
258 offset_units
*= 2.0f
;
259 pa_su_poly_offset_db_fmt_cntl
=
260 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-24);
262 case PIPE_FORMAT_Z16_UNORM
:
263 offset_units
*= 4.0f
;
264 pa_su_poly_offset_db_fmt_cntl
=
265 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-16);
268 pa_su_poly_offset_db_fmt_cntl
=
269 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-23) |
270 S_028DF8_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
274 radeon_set_context_reg_seq(cs
, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE
, 4);
275 radeon_emit(cs
, fui(offset_scale
));
276 radeon_emit(cs
, fui(offset_units
));
277 radeon_emit(cs
, fui(offset_scale
));
278 radeon_emit(cs
, fui(offset_units
));
280 radeon_set_context_reg(cs
, R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL
,
281 pa_su_poly_offset_db_fmt_cntl
);
284 static uint32_t r600_get_blend_control(const struct pipe_blend_state
*state
, unsigned i
)
286 int j
= state
->independent_blend_enable
? i
: 0;
288 unsigned eqRGB
= state
->rt
[j
].rgb_func
;
289 unsigned srcRGB
= state
->rt
[j
].rgb_src_factor
;
290 unsigned dstRGB
= state
->rt
[j
].rgb_dst_factor
;
292 unsigned eqA
= state
->rt
[j
].alpha_func
;
293 unsigned srcA
= state
->rt
[j
].alpha_src_factor
;
294 unsigned dstA
= state
->rt
[j
].alpha_dst_factor
;
297 if (!state
->rt
[j
].blend_enable
)
300 bc
|= S_028804_COLOR_COMB_FCN(r600_translate_blend_function(eqRGB
));
301 bc
|= S_028804_COLOR_SRCBLEND(r600_translate_blend_factor(srcRGB
));
302 bc
|= S_028804_COLOR_DESTBLEND(r600_translate_blend_factor(dstRGB
));
304 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
) {
305 bc
|= S_028804_SEPARATE_ALPHA_BLEND(1);
306 bc
|= S_028804_ALPHA_COMB_FCN(r600_translate_blend_function(eqA
));
307 bc
|= S_028804_ALPHA_SRCBLEND(r600_translate_blend_factor(srcA
));
308 bc
|= S_028804_ALPHA_DESTBLEND(r600_translate_blend_factor(dstA
));
313 static void *r600_create_blend_state_mode(struct pipe_context
*ctx
,
314 const struct pipe_blend_state
*state
,
317 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
318 uint32_t color_control
= 0, target_mask
= 0;
319 struct r600_blend_state
*blend
= CALLOC_STRUCT(r600_blend_state
);
325 r600_init_command_buffer(&blend
->buffer
, 20);
326 r600_init_command_buffer(&blend
->buffer_no_blend
, 20);
328 /* R600 does not support per-MRT blends */
329 if (rctx
->b
.family
> CHIP_R600
)
330 color_control
|= S_028808_PER_MRT_BLEND(1);
332 if (state
->logicop_enable
) {
333 color_control
|= (state
->logicop_func
<< 16) | (state
->logicop_func
<< 20);
335 color_control
|= (0xcc << 16);
337 /* we pretend 8 buffer are used, CB_SHADER_MASK will disable unused one */
338 if (state
->independent_blend_enable
) {
339 for (int i
= 0; i
< 8; i
++) {
340 if (state
->rt
[i
].blend_enable
) {
341 color_control
|= S_028808_TARGET_BLEND_ENABLE(1 << i
);
343 target_mask
|= (state
->rt
[i
].colormask
<< (4 * i
));
346 for (int i
= 0; i
< 8; i
++) {
347 if (state
->rt
[0].blend_enable
) {
348 color_control
|= S_028808_TARGET_BLEND_ENABLE(1 << i
);
350 target_mask
|= (state
->rt
[0].colormask
<< (4 * i
));
355 color_control
|= S_028808_SPECIAL_OP(mode
);
357 color_control
|= S_028808_SPECIAL_OP(V_028808_DISABLE
);
359 /* only MRT0 has dual src blend */
360 blend
->dual_src_blend
= util_blend_state_is_dual(state
, 0);
361 blend
->cb_target_mask
= target_mask
;
362 blend
->cb_color_control
= color_control
;
363 blend
->cb_color_control_no_blend
= color_control
& C_028808_TARGET_BLEND_ENABLE
;
364 blend
->alpha_to_one
= state
->alpha_to_one
;
366 r600_store_context_reg(&blend
->buffer
, R_028D44_DB_ALPHA_TO_MASK
,
367 S_028D44_ALPHA_TO_MASK_ENABLE(state
->alpha_to_coverage
) |
368 S_028D44_ALPHA_TO_MASK_OFFSET0(2) |
369 S_028D44_ALPHA_TO_MASK_OFFSET1(2) |
370 S_028D44_ALPHA_TO_MASK_OFFSET2(2) |
371 S_028D44_ALPHA_TO_MASK_OFFSET3(2));
373 /* Copy over the registers set so far into buffer_no_blend. */
374 memcpy(blend
->buffer_no_blend
.buf
, blend
->buffer
.buf
, blend
->buffer
.num_dw
* 4);
375 blend
->buffer_no_blend
.num_dw
= blend
->buffer
.num_dw
;
377 /* Only add blend registers if blending is enabled. */
378 if (!G_028808_TARGET_BLEND_ENABLE(color_control
)) {
382 /* The first R600 does not support per-MRT blends */
383 r600_store_context_reg(&blend
->buffer
, R_028804_CB_BLEND_CONTROL
,
384 r600_get_blend_control(state
, 0));
386 if (rctx
->b
.family
> CHIP_R600
) {
387 r600_store_context_reg_seq(&blend
->buffer
, R_028780_CB_BLEND0_CONTROL
, 8);
388 for (int i
= 0; i
< 8; i
++) {
389 r600_store_value(&blend
->buffer
, r600_get_blend_control(state
, i
));
395 static void *r600_create_blend_state(struct pipe_context
*ctx
,
396 const struct pipe_blend_state
*state
)
398 return r600_create_blend_state_mode(ctx
, state
, V_028808_SPECIAL_NORMAL
);
401 static void *r600_create_dsa_state(struct pipe_context
*ctx
,
402 const struct pipe_depth_stencil_alpha_state
*state
)
404 unsigned db_depth_control
, alpha_test_control
, alpha_ref
;
405 struct r600_dsa_state
*dsa
= CALLOC_STRUCT(r600_dsa_state
);
411 r600_init_command_buffer(&dsa
->buffer
, 3);
413 dsa
->valuemask
[0] = state
->stencil
[0].valuemask
;
414 dsa
->valuemask
[1] = state
->stencil
[1].valuemask
;
415 dsa
->writemask
[0] = state
->stencil
[0].writemask
;
416 dsa
->writemask
[1] = state
->stencil
[1].writemask
;
417 dsa
->zwritemask
= state
->depth
.writemask
;
419 db_depth_control
= S_028800_Z_ENABLE(state
->depth
.enabled
) |
420 S_028800_Z_WRITE_ENABLE(state
->depth
.writemask
) |
421 S_028800_ZFUNC(state
->depth
.func
);
424 if (state
->stencil
[0].enabled
) {
425 db_depth_control
|= S_028800_STENCIL_ENABLE(1);
426 db_depth_control
|= S_028800_STENCILFUNC(state
->stencil
[0].func
); /* translates straight */
427 db_depth_control
|= S_028800_STENCILFAIL(r600_translate_stencil_op(state
->stencil
[0].fail_op
));
428 db_depth_control
|= S_028800_STENCILZPASS(r600_translate_stencil_op(state
->stencil
[0].zpass_op
));
429 db_depth_control
|= S_028800_STENCILZFAIL(r600_translate_stencil_op(state
->stencil
[0].zfail_op
));
431 if (state
->stencil
[1].enabled
) {
432 db_depth_control
|= S_028800_BACKFACE_ENABLE(1);
433 db_depth_control
|= S_028800_STENCILFUNC_BF(state
->stencil
[1].func
); /* translates straight */
434 db_depth_control
|= S_028800_STENCILFAIL_BF(r600_translate_stencil_op(state
->stencil
[1].fail_op
));
435 db_depth_control
|= S_028800_STENCILZPASS_BF(r600_translate_stencil_op(state
->stencil
[1].zpass_op
));
436 db_depth_control
|= S_028800_STENCILZFAIL_BF(r600_translate_stencil_op(state
->stencil
[1].zfail_op
));
441 alpha_test_control
= 0;
443 if (state
->alpha
.enabled
) {
444 alpha_test_control
= S_028410_ALPHA_FUNC(state
->alpha
.func
);
445 alpha_test_control
|= S_028410_ALPHA_TEST_ENABLE(1);
446 alpha_ref
= fui(state
->alpha
.ref_value
);
448 dsa
->sx_alpha_test_control
= alpha_test_control
& 0xff;
449 dsa
->alpha_ref
= alpha_ref
;
451 r600_store_context_reg(&dsa
->buffer
, R_028800_DB_DEPTH_CONTROL
, db_depth_control
);
455 static void *r600_create_rs_state(struct pipe_context
*ctx
,
456 const struct pipe_rasterizer_state
*state
)
458 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
459 unsigned tmp
, sc_mode_cntl
, spi_interp
;
460 float psize_min
, psize_max
;
461 struct r600_rasterizer_state
*rs
= CALLOC_STRUCT(r600_rasterizer_state
);
467 r600_init_command_buffer(&rs
->buffer
, 30);
469 rs
->scissor_enable
= state
->scissor
;
470 rs
->clip_halfz
= state
->clip_halfz
;
471 rs
->flatshade
= state
->flatshade
;
472 rs
->sprite_coord_enable
= state
->sprite_coord_enable
;
473 rs
->two_side
= state
->light_twoside
;
474 rs
->clip_plane_enable
= state
->clip_plane_enable
;
475 rs
->pa_sc_line_stipple
= state
->line_stipple_enable
?
476 S_028A0C_LINE_PATTERN(state
->line_stipple_pattern
) |
477 S_028A0C_REPEAT_COUNT(state
->line_stipple_factor
) : 0;
478 rs
->pa_cl_clip_cntl
=
479 S_028810_DX_CLIP_SPACE_DEF(state
->clip_halfz
) |
480 S_028810_ZCLIP_NEAR_DISABLE(!state
->depth_clip
) |
481 S_028810_ZCLIP_FAR_DISABLE(!state
->depth_clip
) |
482 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1);
483 if (rctx
->b
.chip_class
== R700
) {
484 rs
->pa_cl_clip_cntl
|=
485 S_028810_DX_RASTERIZATION_KILL(state
->rasterizer_discard
);
487 rs
->multisample_enable
= state
->multisample
;
490 rs
->offset_units
= state
->offset_units
;
491 rs
->offset_scale
= state
->offset_scale
* 16.0f
;
492 rs
->offset_enable
= state
->offset_point
|| state
->offset_line
|| state
->offset_tri
;
493 rs
->offset_units_unscaled
= state
->offset_units_unscaled
;
495 if (state
->point_size_per_vertex
) {
496 psize_min
= util_get_min_point_size(state
);
499 /* Force the point size to be as if the vertex output was disabled. */
500 psize_min
= state
->point_size
;
501 psize_max
= state
->point_size
;
504 sc_mode_cntl
= S_028A4C_MSAA_ENABLE(state
->multisample
) |
505 S_028A4C_LINE_STIPPLE_ENABLE(state
->line_stipple_enable
) |
506 S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
507 S_028A4C_PS_ITER_SAMPLE(state
->multisample
&& rctx
->ps_iter_samples
> 1);
508 if (rctx
->b
.family
== CHIP_RV770
) {
509 /* workaround possible rendering corruption on RV770 with hyperz together with sample shading */
510 sc_mode_cntl
|= S_028A4C_TILE_COVER_DISABLE(state
->multisample
&& rctx
->ps_iter_samples
> 1);
512 if (rctx
->b
.chip_class
>= R700
) {
513 sc_mode_cntl
|= S_028A4C_FORCE_EOV_REZ_ENABLE(1) |
514 S_028A4C_R700_ZMM_LINE_OFFSET(1) |
515 S_028A4C_R700_VPORT_SCISSOR_ENABLE(1);
517 sc_mode_cntl
|= S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1);
520 spi_interp
= S_0286D4_FLAT_SHADE_ENA(1);
521 if (state
->sprite_coord_enable
) {
522 spi_interp
|= S_0286D4_PNT_SPRITE_ENA(1) |
523 S_0286D4_PNT_SPRITE_OVRD_X(2) |
524 S_0286D4_PNT_SPRITE_OVRD_Y(3) |
525 S_0286D4_PNT_SPRITE_OVRD_Z(0) |
526 S_0286D4_PNT_SPRITE_OVRD_W(1);
527 if (state
->sprite_coord_mode
!= PIPE_SPRITE_COORD_UPPER_LEFT
) {
528 spi_interp
|= S_0286D4_PNT_SPRITE_TOP_1(1);
532 r600_store_context_reg_seq(&rs
->buffer
, R_028A00_PA_SU_POINT_SIZE
, 3);
533 /* point size 12.4 fixed point (divide by two, because 0.5 = 1 pixel. */
534 tmp
= r600_pack_float_12p4(state
->point_size
/2);
535 r600_store_value(&rs
->buffer
, /* R_028A00_PA_SU_POINT_SIZE */
536 S_028A00_HEIGHT(tmp
) | S_028A00_WIDTH(tmp
));
537 r600_store_value(&rs
->buffer
, /* R_028A04_PA_SU_POINT_MINMAX */
538 S_028A04_MIN_SIZE(r600_pack_float_12p4(psize_min
/2)) |
539 S_028A04_MAX_SIZE(r600_pack_float_12p4(psize_max
/2)));
540 r600_store_value(&rs
->buffer
, /* R_028A08_PA_SU_LINE_CNTL */
541 S_028A08_WIDTH(r600_pack_float_12p4(state
->line_width
/2)));
543 r600_store_context_reg(&rs
->buffer
, R_0286D4_SPI_INTERP_CONTROL_0
, spi_interp
);
544 r600_store_context_reg(&rs
->buffer
, R_028A4C_PA_SC_MODE_CNTL
, sc_mode_cntl
);
545 r600_store_context_reg(&rs
->buffer
, R_028C08_PA_SU_VTX_CNTL
,
546 S_028C08_PIX_CENTER_HALF(state
->half_pixel_center
) |
547 S_028C08_QUANT_MODE(V_028C08_X_1_256TH
));
548 r600_store_context_reg(&rs
->buffer
, R_028DFC_PA_SU_POLY_OFFSET_CLAMP
, fui(state
->offset_clamp
));
550 rs
->pa_su_sc_mode_cntl
= S_028814_PROVOKING_VTX_LAST(!state
->flatshade_first
) |
551 S_028814_CULL_FRONT(state
->cull_face
& PIPE_FACE_FRONT
? 1 : 0) |
552 S_028814_CULL_BACK(state
->cull_face
& PIPE_FACE_BACK
? 1 : 0) |
553 S_028814_FACE(!state
->front_ccw
) |
554 S_028814_POLY_OFFSET_FRONT_ENABLE(util_get_offset(state
, state
->fill_front
)) |
555 S_028814_POLY_OFFSET_BACK_ENABLE(util_get_offset(state
, state
->fill_back
)) |
556 S_028814_POLY_OFFSET_PARA_ENABLE(state
->offset_point
|| state
->offset_line
) |
557 S_028814_POLY_MODE(state
->fill_front
!= PIPE_POLYGON_MODE_FILL
||
558 state
->fill_back
!= PIPE_POLYGON_MODE_FILL
) |
559 S_028814_POLYMODE_FRONT_PTYPE(r600_translate_fill(state
->fill_front
)) |
560 S_028814_POLYMODE_BACK_PTYPE(r600_translate_fill(state
->fill_back
));
561 if (rctx
->b
.chip_class
== R700
) {
562 r600_store_context_reg(&rs
->buffer
, R_028814_PA_SU_SC_MODE_CNTL
, rs
->pa_su_sc_mode_cntl
);
564 if (rctx
->b
.chip_class
== R600
) {
565 r600_store_context_reg(&rs
->buffer
, R_028350_SX_MISC
,
566 S_028350_MULTIPASS(state
->rasterizer_discard
));
571 static unsigned r600_tex_filter(unsigned filter
, unsigned max_aniso
)
573 if (filter
== PIPE_TEX_FILTER_LINEAR
)
574 return max_aniso
> 1 ? V_03C000_SQ_TEX_XY_FILTER_ANISO_BILINEAR
575 : V_03C000_SQ_TEX_XY_FILTER_BILINEAR
;
577 return max_aniso
> 1 ? V_03C000_SQ_TEX_XY_FILTER_ANISO_POINT
578 : V_03C000_SQ_TEX_XY_FILTER_POINT
;
581 static void *r600_create_sampler_state(struct pipe_context
*ctx
,
582 const struct pipe_sampler_state
*state
)
584 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)ctx
->screen
;
585 struct r600_pipe_sampler_state
*ss
= CALLOC_STRUCT(r600_pipe_sampler_state
);
586 unsigned max_aniso
= rscreen
->force_aniso
>= 0 ? rscreen
->force_aniso
587 : state
->max_anisotropy
;
588 unsigned max_aniso_ratio
= r600_tex_aniso_filter(max_aniso
);
594 ss
->seamless_cube_map
= state
->seamless_cube_map
;
595 ss
->border_color_use
= sampler_state_needs_border_color(state
);
597 /* R_03C000_SQ_TEX_SAMPLER_WORD0_0 */
598 ss
->tex_sampler_words
[0] =
599 S_03C000_CLAMP_X(r600_tex_wrap(state
->wrap_s
)) |
600 S_03C000_CLAMP_Y(r600_tex_wrap(state
->wrap_t
)) |
601 S_03C000_CLAMP_Z(r600_tex_wrap(state
->wrap_r
)) |
602 S_03C000_XY_MAG_FILTER(r600_tex_filter(state
->mag_img_filter
, max_aniso
)) |
603 S_03C000_XY_MIN_FILTER(r600_tex_filter(state
->min_img_filter
, max_aniso
)) |
604 S_03C000_MIP_FILTER(r600_tex_mipfilter(state
->min_mip_filter
)) |
605 S_03C000_MAX_ANISO_RATIO(max_aniso_ratio
) |
606 S_03C000_DEPTH_COMPARE_FUNCTION(r600_tex_compare(state
->compare_func
)) |
607 S_03C000_BORDER_COLOR_TYPE(ss
->border_color_use
? V_03C000_SQ_TEX_BORDER_COLOR_REGISTER
: 0);
608 /* R_03C004_SQ_TEX_SAMPLER_WORD1_0 */
609 ss
->tex_sampler_words
[1] =
610 S_03C004_MIN_LOD(S_FIXED(CLAMP(state
->min_lod
, 0, 15), 6)) |
611 S_03C004_MAX_LOD(S_FIXED(CLAMP(state
->max_lod
, 0, 15), 6)) |
612 S_03C004_LOD_BIAS(S_FIXED(CLAMP(state
->lod_bias
, -16, 16), 6));
613 /* R_03C008_SQ_TEX_SAMPLER_WORD2_0 */
614 ss
->tex_sampler_words
[2] = S_03C008_TYPE(1);
616 if (ss
->border_color_use
) {
617 memcpy(&ss
->border_color
, &state
->border_color
, sizeof(state
->border_color
));
622 static struct pipe_sampler_view
*
623 texture_buffer_sampler_view(struct r600_pipe_sampler_view
*view
,
624 unsigned width0
, unsigned height0
)
627 struct r600_texture
*tmp
= (struct r600_texture
*)view
->base
.texture
;
628 int stride
= util_format_get_blocksize(view
->base
.format
);
629 unsigned format
, num_format
, format_comp
, endian
;
630 uint64_t offset
= view
->base
.u
.buf
.offset
;
631 unsigned size
= view
->base
.u
.buf
.size
;
633 r600_vertex_data_type(view
->base
.format
,
634 &format
, &num_format
, &format_comp
,
637 view
->tex_resource
= &tmp
->resource
;
638 view
->skip_mip_address_reloc
= true;
640 view
->tex_resource_words
[0] = offset
;
641 view
->tex_resource_words
[1] = size
- 1;
642 view
->tex_resource_words
[2] = S_038008_BASE_ADDRESS_HI(offset
>> 32UL) |
643 S_038008_STRIDE(stride
) |
644 S_038008_DATA_FORMAT(format
) |
645 S_038008_NUM_FORMAT_ALL(num_format
) |
646 S_038008_FORMAT_COMP_ALL(format_comp
) |
647 S_038008_ENDIAN_SWAP(endian
);
648 view
->tex_resource_words
[3] = 0;
650 * in theory dword 4 is for number of elements, for use with resinfo,
651 * but it seems to utterly fail to work, the amd gpu shader analyser
652 * uses a const buffer to store the element sizes for buffer txq
654 view
->tex_resource_words
[4] = 0;
655 view
->tex_resource_words
[5] = 0;
656 view
->tex_resource_words
[6] = S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_BUFFER
);
660 struct pipe_sampler_view
*
661 r600_create_sampler_view_custom(struct pipe_context
*ctx
,
662 struct pipe_resource
*texture
,
663 const struct pipe_sampler_view
*state
,
664 unsigned width_first_level
, unsigned height_first_level
)
666 struct r600_pipe_sampler_view
*view
= CALLOC_STRUCT(r600_pipe_sampler_view
);
667 struct r600_texture
*tmp
= (struct r600_texture
*)texture
;
668 unsigned format
, endian
;
669 uint32_t word4
= 0, yuv_format
= 0, pitch
= 0;
670 unsigned char swizzle
[4], array_mode
= 0;
671 unsigned width
, height
, depth
, offset_level
, last_level
;
672 bool do_endian_swap
= FALSE
;
677 /* initialize base object */
679 view
->base
.texture
= NULL
;
680 pipe_reference(NULL
, &texture
->reference
);
681 view
->base
.texture
= texture
;
682 view
->base
.reference
.count
= 1;
683 view
->base
.context
= ctx
;
685 if (texture
->target
== PIPE_BUFFER
)
686 return texture_buffer_sampler_view(view
, texture
->width0
, 1);
688 swizzle
[0] = state
->swizzle_r
;
689 swizzle
[1] = state
->swizzle_g
;
690 swizzle
[2] = state
->swizzle_b
;
691 swizzle
[3] = state
->swizzle_a
;
694 do_endian_swap
= !tmp
->db_compatible
;
696 format
= r600_translate_texformat(ctx
->screen
, state
->format
,
698 &word4
, &yuv_format
, do_endian_swap
);
699 assert(format
!= ~0);
705 if (state
->format
== PIPE_FORMAT_X24S8_UINT
||
706 state
->format
== PIPE_FORMAT_S8X24_UINT
||
707 state
->format
== PIPE_FORMAT_X32_S8X24_UINT
||
708 state
->format
== PIPE_FORMAT_S8_UINT
)
709 view
->is_stencil_sampler
= true;
711 if (tmp
->is_depth
&& !r600_can_sample_zs(tmp
, view
->is_stencil_sampler
)) {
712 if (!r600_init_flushed_depth_texture(ctx
, texture
, NULL
)) {
716 tmp
= tmp
->flushed_depth_texture
;
719 endian
= r600_colorformat_endian_swap(format
, do_endian_swap
);
721 offset_level
= state
->u
.tex
.first_level
;
722 last_level
= state
->u
.tex
.last_level
- offset_level
;
723 width
= width_first_level
;
724 height
= height_first_level
;
725 depth
= u_minify(texture
->depth0
, offset_level
);
726 pitch
= tmp
->surface
.u
.legacy
.level
[offset_level
].nblk_x
* util_format_get_blockwidth(state
->format
);
728 if (texture
->target
== PIPE_TEXTURE_1D_ARRAY
) {
730 depth
= texture
->array_size
;
731 } else if (texture
->target
== PIPE_TEXTURE_2D_ARRAY
) {
732 depth
= texture
->array_size
;
733 } else if (texture
->target
== PIPE_TEXTURE_CUBE_ARRAY
)
734 depth
= texture
->array_size
/ 6;
736 switch (tmp
->surface
.u
.legacy
.level
[offset_level
].mode
) {
738 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
739 array_mode
= V_038000_ARRAY_LINEAR_ALIGNED
;
741 case RADEON_SURF_MODE_1D
:
742 array_mode
= V_038000_ARRAY_1D_TILED_THIN1
;
744 case RADEON_SURF_MODE_2D
:
745 array_mode
= V_038000_ARRAY_2D_TILED_THIN1
;
749 view
->tex_resource
= &tmp
->resource
;
750 view
->tex_resource_words
[0] = (S_038000_DIM(r600_tex_dim(texture
->target
, texture
->nr_samples
)) |
751 S_038000_TILE_MODE(array_mode
) |
752 S_038000_TILE_TYPE(tmp
->non_disp_tiling
) |
753 S_038000_PITCH((pitch
/ 8) - 1) |
754 S_038000_TEX_WIDTH(width
- 1));
755 view
->tex_resource_words
[1] = (S_038004_TEX_HEIGHT(height
- 1) |
756 S_038004_TEX_DEPTH(depth
- 1) |
757 S_038004_DATA_FORMAT(format
));
758 view
->tex_resource_words
[2] = tmp
->surface
.u
.legacy
.level
[offset_level
].offset
>> 8;
759 if (offset_level
>= tmp
->resource
.b
.b
.last_level
) {
760 view
->tex_resource_words
[3] = tmp
->surface
.u
.legacy
.level
[offset_level
].offset
>> 8;
762 view
->tex_resource_words
[3] = tmp
->surface
.u
.legacy
.level
[offset_level
+ 1].offset
>> 8;
764 view
->tex_resource_words
[4] = (word4
|
765 S_038010_REQUEST_SIZE(1) |
766 S_038010_ENDIAN_SWAP(endian
) |
767 S_038010_BASE_LEVEL(0));
768 view
->tex_resource_words
[5] = (S_038014_BASE_ARRAY(state
->u
.tex
.first_layer
) |
769 S_038014_LAST_ARRAY(state
->u
.tex
.last_layer
));
770 if (texture
->nr_samples
> 1) {
771 /* LAST_LEVEL holds log2(nr_samples) for multisample textures */
772 view
->tex_resource_words
[5] |= S_038014_LAST_LEVEL(util_logbase2(texture
->nr_samples
));
774 view
->tex_resource_words
[5] |= S_038014_LAST_LEVEL(last_level
);
776 view
->tex_resource_words
[6] = (S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_TEXTURE
) |
777 S_038018_MAX_ANISO(4 /* max 16 samples */));
781 static struct pipe_sampler_view
*
782 r600_create_sampler_view(struct pipe_context
*ctx
,
783 struct pipe_resource
*tex
,
784 const struct pipe_sampler_view
*state
)
786 return r600_create_sampler_view_custom(ctx
, tex
, state
,
787 u_minify(tex
->width0
, state
->u
.tex
.first_level
),
788 u_minify(tex
->height0
, state
->u
.tex
.first_level
));
791 static void r600_emit_clip_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
793 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
794 struct pipe_clip_state
*state
= &rctx
->clip_state
.state
;
796 radeon_set_context_reg_seq(cs
, R_028E20_PA_CL_UCP0_X
, 6*4);
797 radeon_emit_array(cs
, (unsigned*)state
, 6*4);
800 static void r600_set_polygon_stipple(struct pipe_context
*ctx
,
801 const struct pipe_poly_stipple
*state
)
805 static void r600_init_color_surface(struct r600_context
*rctx
,
806 struct r600_surface
*surf
,
807 bool force_cmask_fmask
)
809 struct r600_screen
*rscreen
= rctx
->screen
;
810 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->base
.texture
;
811 unsigned level
= surf
->base
.u
.tex
.level
;
812 unsigned pitch
, slice
;
815 unsigned format
, swap
, ntype
, endian
;
817 const struct util_format_description
*desc
;
819 bool blend_bypass
= 0, blend_clamp
= 1, do_endian_swap
= FALSE
;
821 if (rtex
->db_compatible
&& !r600_can_sample_zs(rtex
, false)) {
822 r600_init_flushed_depth_texture(&rctx
->b
.b
, surf
->base
.texture
, NULL
);
823 rtex
= rtex
->flushed_depth_texture
;
827 offset
= rtex
->surface
.u
.legacy
.level
[level
].offset
;
828 color_view
= S_028080_SLICE_START(surf
->base
.u
.tex
.first_layer
) |
829 S_028080_SLICE_MAX(surf
->base
.u
.tex
.last_layer
);
831 pitch
= rtex
->surface
.u
.legacy
.level
[level
].nblk_x
/ 8 - 1;
832 slice
= (rtex
->surface
.u
.legacy
.level
[level
].nblk_x
* rtex
->surface
.u
.legacy
.level
[level
].nblk_y
) / 64;
837 switch (rtex
->surface
.u
.legacy
.level
[level
].mode
) {
839 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
840 color_info
= S_0280A0_ARRAY_MODE(V_038000_ARRAY_LINEAR_ALIGNED
);
842 case RADEON_SURF_MODE_1D
:
843 color_info
= S_0280A0_ARRAY_MODE(V_038000_ARRAY_1D_TILED_THIN1
);
845 case RADEON_SURF_MODE_2D
:
846 color_info
= S_0280A0_ARRAY_MODE(V_038000_ARRAY_2D_TILED_THIN1
);
850 desc
= util_format_description(surf
->base
.format
);
852 for (i
= 0; i
< 4; i
++) {
853 if (desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
) {
858 ntype
= V_0280A0_NUMBER_UNORM
;
859 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
)
860 ntype
= V_0280A0_NUMBER_SRGB
;
861 else if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_SIGNED
) {
862 if (desc
->channel
[i
].normalized
)
863 ntype
= V_0280A0_NUMBER_SNORM
;
864 else if (desc
->channel
[i
].pure_integer
)
865 ntype
= V_0280A0_NUMBER_SINT
;
866 } else if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_UNSIGNED
) {
867 if (desc
->channel
[i
].normalized
)
868 ntype
= V_0280A0_NUMBER_UNORM
;
869 else if (desc
->channel
[i
].pure_integer
)
870 ntype
= V_0280A0_NUMBER_UINT
;
874 do_endian_swap
= !rtex
->db_compatible
;
876 format
= r600_translate_colorformat(rctx
->b
.chip_class
, surf
->base
.format
,
878 assert(format
!= ~0);
880 swap
= r600_translate_colorswap(surf
->base
.format
, do_endian_swap
);
883 endian
= r600_colorformat_endian_swap(format
, do_endian_swap
);
885 /* set blend bypass according to docs if SINT/UINT or
886 8/24 COLOR variants */
887 if (ntype
== V_0280A0_NUMBER_UINT
|| ntype
== V_0280A0_NUMBER_SINT
||
888 format
== V_0280A0_COLOR_8_24
|| format
== V_0280A0_COLOR_24_8
||
889 format
== V_0280A0_COLOR_X24_8_32_FLOAT
) {
894 surf
->alphatest_bypass
= ntype
== V_0280A0_NUMBER_UINT
|| ntype
== V_0280A0_NUMBER_SINT
;
896 color_info
|= S_0280A0_FORMAT(format
) |
897 S_0280A0_COMP_SWAP(swap
) |
898 S_0280A0_BLEND_BYPASS(blend_bypass
) |
899 S_0280A0_BLEND_CLAMP(blend_clamp
) |
900 S_0280A0_NUMBER_TYPE(ntype
) |
901 S_0280A0_ENDIAN(endian
);
903 /* EXPORT_NORM is an optimzation that can be enabled for better
904 * performance in certain cases
906 if (rctx
->b
.chip_class
== R600
) {
907 /* EXPORT_NORM can be enabled if:
908 * - 11-bit or smaller UNORM/SNORM/SRGB
909 * - BLEND_CLAMP is enabled
910 * - BLEND_FLOAT32 is disabled
912 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_ZS
&&
913 (desc
->channel
[i
].size
< 12 &&
914 desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_FLOAT
&&
915 ntype
!= V_0280A0_NUMBER_UINT
&&
916 ntype
!= V_0280A0_NUMBER_SINT
) &&
917 G_0280A0_BLEND_CLAMP(color_info
) &&
918 !G_0280A0_BLEND_FLOAT32(color_info
)) {
919 color_info
|= S_0280A0_SOURCE_FORMAT(V_0280A0_EXPORT_NORM
);
920 surf
->export_16bpc
= true;
923 /* EXPORT_NORM can be enabled if:
924 * - 11-bit or smaller UNORM/SNORM/SRGB
925 * - 16-bit or smaller FLOAT
927 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_ZS
&&
928 ((desc
->channel
[i
].size
< 12 &&
929 desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_FLOAT
&&
930 ntype
!= V_0280A0_NUMBER_UINT
&& ntype
!= V_0280A0_NUMBER_SINT
) ||
931 (desc
->channel
[i
].size
< 17 &&
932 desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_FLOAT
))) {
933 color_info
|= S_0280A0_SOURCE_FORMAT(V_0280A0_EXPORT_NORM
);
934 surf
->export_16bpc
= true;
938 /* These might not always be initialized to zero. */
939 surf
->cb_color_base
= offset
>> 8;
940 surf
->cb_color_size
= S_028060_PITCH_TILE_MAX(pitch
) |
941 S_028060_SLICE_TILE_MAX(slice
);
942 surf
->cb_color_fmask
= surf
->cb_color_base
;
943 surf
->cb_color_cmask
= surf
->cb_color_base
;
944 surf
->cb_color_mask
= 0;
946 r600_resource_reference(&surf
->cb_buffer_cmask
, &rtex
->resource
);
947 r600_resource_reference(&surf
->cb_buffer_fmask
, &rtex
->resource
);
949 if (rtex
->cmask
.size
) {
950 surf
->cb_color_cmask
= rtex
->cmask
.offset
>> 8;
951 surf
->cb_color_mask
|= S_028100_CMASK_BLOCK_MAX(rtex
->cmask
.slice_tile_max
);
953 if (rtex
->fmask
.size
) {
954 color_info
|= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE
);
955 surf
->cb_color_fmask
= rtex
->fmask
.offset
>> 8;
956 surf
->cb_color_mask
|= S_028100_FMASK_TILE_MAX(rtex
->fmask
.slice_tile_max
);
957 } else { /* cmask only */
958 color_info
|= S_0280A0_TILE_MODE(V_0280A0_CLEAR_ENABLE
);
960 } else if (force_cmask_fmask
) {
961 /* Allocate dummy FMASK and CMASK if they aren't allocated already.
963 * R6xx needs FMASK and CMASK for the destination buffer of color resolve,
964 * otherwise it hangs. We don't have FMASK and CMASK pre-allocated,
965 * because it's not an MSAA buffer.
967 struct r600_cmask_info cmask
;
968 struct r600_fmask_info fmask
;
970 r600_texture_get_cmask_info(&rscreen
->b
, rtex
, &cmask
);
971 r600_texture_get_fmask_info(&rscreen
->b
, rtex
, 8, &fmask
);
974 if (!rctx
->dummy_cmask
||
975 rctx
->dummy_cmask
->b
.b
.width0
< cmask
.size
||
976 rctx
->dummy_cmask
->buf
->alignment
% cmask
.alignment
!= 0) {
977 struct pipe_transfer
*transfer
;
980 r600_resource_reference(&rctx
->dummy_cmask
, NULL
);
981 rctx
->dummy_cmask
= (struct r600_resource
*)
982 r600_aligned_buffer_create(&rscreen
->b
.b
, 0,
984 cmask
.size
, cmask
.alignment
);
986 if (unlikely(!rctx
->dummy_cmask
)) {
987 surf
->color_initialized
= false;
991 /* Set the contents to 0xCC. */
992 ptr
= pipe_buffer_map(&rctx
->b
.b
, &rctx
->dummy_cmask
->b
.b
, PIPE_TRANSFER_WRITE
, &transfer
);
993 memset(ptr
, 0xCC, cmask
.size
);
994 pipe_buffer_unmap(&rctx
->b
.b
, transfer
);
996 r600_resource_reference(&surf
->cb_buffer_cmask
, rctx
->dummy_cmask
);
999 if (!rctx
->dummy_fmask
||
1000 rctx
->dummy_fmask
->b
.b
.width0
< fmask
.size
||
1001 rctx
->dummy_fmask
->buf
->alignment
% fmask
.alignment
!= 0) {
1002 r600_resource_reference(&rctx
->dummy_fmask
, NULL
);
1003 rctx
->dummy_fmask
= (struct r600_resource
*)
1004 r600_aligned_buffer_create(&rscreen
->b
.b
, 0,
1006 fmask
.size
, fmask
.alignment
);
1008 if (unlikely(!rctx
->dummy_fmask
)) {
1009 surf
->color_initialized
= false;
1013 r600_resource_reference(&surf
->cb_buffer_fmask
, rctx
->dummy_fmask
);
1015 /* Init the registers. */
1016 color_info
|= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE
);
1017 surf
->cb_color_cmask
= 0;
1018 surf
->cb_color_fmask
= 0;
1019 surf
->cb_color_mask
= S_028100_CMASK_BLOCK_MAX(cmask
.slice_tile_max
) |
1020 S_028100_FMASK_TILE_MAX(fmask
.slice_tile_max
);
1023 surf
->cb_color_info
= color_info
;
1024 surf
->cb_color_view
= color_view
;
1025 surf
->color_initialized
= true;
1028 static void r600_init_depth_surface(struct r600_context
*rctx
,
1029 struct r600_surface
*surf
)
1031 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->base
.texture
;
1032 unsigned level
, pitch
, slice
, format
, offset
, array_mode
;
1034 level
= surf
->base
.u
.tex
.level
;
1035 offset
= rtex
->surface
.u
.legacy
.level
[level
].offset
;
1036 pitch
= rtex
->surface
.u
.legacy
.level
[level
].nblk_x
/ 8 - 1;
1037 slice
= (rtex
->surface
.u
.legacy
.level
[level
].nblk_x
* rtex
->surface
.u
.legacy
.level
[level
].nblk_y
) / 64;
1041 switch (rtex
->surface
.u
.legacy
.level
[level
].mode
) {
1042 case RADEON_SURF_MODE_2D
:
1043 array_mode
= V_0280A0_ARRAY_2D_TILED_THIN1
;
1045 case RADEON_SURF_MODE_1D
:
1046 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
1048 array_mode
= V_0280A0_ARRAY_1D_TILED_THIN1
;
1052 format
= r600_translate_dbformat(surf
->base
.format
);
1053 assert(format
!= ~0);
1055 surf
->db_depth_info
= S_028010_ARRAY_MODE(array_mode
) | S_028010_FORMAT(format
);
1056 surf
->db_depth_base
= offset
>> 8;
1057 surf
->db_depth_view
= S_028004_SLICE_START(surf
->base
.u
.tex
.first_layer
) |
1058 S_028004_SLICE_MAX(surf
->base
.u
.tex
.last_layer
);
1059 surf
->db_depth_size
= S_028000_PITCH_TILE_MAX(pitch
) | S_028000_SLICE_TILE_MAX(slice
);
1060 surf
->db_prefetch_limit
= (rtex
->surface
.u
.legacy
.level
[level
].nblk_y
/ 8) - 1;
1062 /* use htile only for first level */
1063 if (rtex
->htile_buffer
&& !level
) {
1064 surf
->db_htile_data_base
= 0;
1065 surf
->db_htile_surface
= S_028D24_HTILE_WIDTH(1) |
1066 S_028D24_HTILE_HEIGHT(1) |
1067 S_028D24_FULL_CACHE(1);
1068 /* preload is not working properly on r6xx/r7xx */
1069 surf
->db_depth_info
|= S_028010_TILE_SURFACE_ENABLE(1);
1072 surf
->depth_initialized
= true;
1075 static void r600_set_framebuffer_state(struct pipe_context
*ctx
,
1076 const struct pipe_framebuffer_state
*state
)
1078 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1079 struct r600_surface
*surf
;
1080 struct r600_texture
*rtex
;
1083 /* Flush TC when changing the framebuffer state, because the only
1084 * client not using TC that can change textures is the framebuffer.
1085 * Other places don't typically have to flush TC.
1087 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
|
1088 R600_CONTEXT_FLUSH_AND_INV
|
1089 R600_CONTEXT_FLUSH_AND_INV_CB
|
1090 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
1091 R600_CONTEXT_FLUSH_AND_INV_DB
|
1092 R600_CONTEXT_FLUSH_AND_INV_DB_META
|
1093 R600_CONTEXT_INV_TEX_CACHE
;
1095 /* Set the new state. */
1096 util_copy_framebuffer_state(&rctx
->framebuffer
.state
, state
);
1098 rctx
->framebuffer
.export_16bpc
= state
->nr_cbufs
!= 0;
1099 rctx
->framebuffer
.cb0_is_integer
= state
->nr_cbufs
&& state
->cbufs
[0] &&
1100 util_format_is_pure_integer(state
->cbufs
[0]->format
);
1101 rctx
->framebuffer
.compressed_cb_mask
= 0;
1102 rctx
->framebuffer
.is_msaa_resolve
= state
->nr_cbufs
== 2 &&
1103 state
->cbufs
[0] && state
->cbufs
[1] &&
1104 state
->cbufs
[0]->texture
->nr_samples
> 1 &&
1105 state
->cbufs
[1]->texture
->nr_samples
<= 1;
1106 rctx
->framebuffer
.nr_samples
= util_framebuffer_get_num_samples(state
);
1109 for (i
= 0; i
< state
->nr_cbufs
; i
++) {
1110 /* The resolve buffer must have CMASK and FMASK to prevent hardlocks on R6xx. */
1111 bool force_cmask_fmask
= rctx
->b
.chip_class
== R600
&&
1112 rctx
->framebuffer
.is_msaa_resolve
&&
1115 surf
= (struct r600_surface
*)state
->cbufs
[i
];
1119 rtex
= (struct r600_texture
*)surf
->base
.texture
;
1120 r600_context_add_resource_size(ctx
, state
->cbufs
[i
]->texture
);
1122 if (!surf
->color_initialized
|| force_cmask_fmask
) {
1123 r600_init_color_surface(rctx
, surf
, force_cmask_fmask
);
1124 if (force_cmask_fmask
) {
1125 /* re-initialize later without compression */
1126 surf
->color_initialized
= false;
1130 if (!surf
->export_16bpc
) {
1131 rctx
->framebuffer
.export_16bpc
= false;
1134 if (rtex
->fmask
.size
) {
1135 rctx
->framebuffer
.compressed_cb_mask
|= 1 << i
;
1139 /* Update alpha-test state dependencies.
1140 * Alpha-test is done on the first colorbuffer only. */
1141 if (state
->nr_cbufs
) {
1142 bool alphatest_bypass
= false;
1144 surf
= (struct r600_surface
*)state
->cbufs
[0];
1146 alphatest_bypass
= surf
->alphatest_bypass
;
1149 if (rctx
->alphatest_state
.bypass
!= alphatest_bypass
) {
1150 rctx
->alphatest_state
.bypass
= alphatest_bypass
;
1151 r600_mark_atom_dirty(rctx
, &rctx
->alphatest_state
.atom
);
1157 surf
= (struct r600_surface
*)state
->zsbuf
;
1159 r600_context_add_resource_size(ctx
, state
->zsbuf
->texture
);
1161 if (!surf
->depth_initialized
) {
1162 r600_init_depth_surface(rctx
, surf
);
1165 if (state
->zsbuf
->format
!= rctx
->poly_offset_state
.zs_format
) {
1166 rctx
->poly_offset_state
.zs_format
= state
->zsbuf
->format
;
1167 r600_mark_atom_dirty(rctx
, &rctx
->poly_offset_state
.atom
);
1170 if (rctx
->db_state
.rsurf
!= surf
) {
1171 rctx
->db_state
.rsurf
= surf
;
1172 r600_mark_atom_dirty(rctx
, &rctx
->db_state
.atom
);
1173 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
1175 } else if (rctx
->db_state
.rsurf
) {
1176 rctx
->db_state
.rsurf
= NULL
;
1177 r600_mark_atom_dirty(rctx
, &rctx
->db_state
.atom
);
1178 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
1181 if (rctx
->cb_misc_state
.nr_cbufs
!= state
->nr_cbufs
) {
1182 rctx
->cb_misc_state
.nr_cbufs
= state
->nr_cbufs
;
1183 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
1186 if (state
->nr_cbufs
== 0 && rctx
->alphatest_state
.bypass
) {
1187 rctx
->alphatest_state
.bypass
= false;
1188 r600_mark_atom_dirty(rctx
, &rctx
->alphatest_state
.atom
);
1191 /* Calculate the CS size. */
1192 rctx
->framebuffer
.atom
.num_dw
=
1193 10 /*COLOR_INFO*/ + 4 /*SCISSOR*/ + 3 /*SHADER_CONTROL*/ + 8 /*MSAA*/;
1195 if (rctx
->framebuffer
.state
.nr_cbufs
) {
1196 rctx
->framebuffer
.atom
.num_dw
+= 15 * rctx
->framebuffer
.state
.nr_cbufs
;
1197 rctx
->framebuffer
.atom
.num_dw
+= 3 * (2 + rctx
->framebuffer
.state
.nr_cbufs
);
1199 if (rctx
->framebuffer
.state
.zsbuf
) {
1200 rctx
->framebuffer
.atom
.num_dw
+= 16;
1201 } else if (rctx
->screen
->b
.info
.drm_minor
>= 18) {
1202 rctx
->framebuffer
.atom
.num_dw
+= 3;
1204 if (rctx
->b
.family
> CHIP_R600
&& rctx
->b
.family
< CHIP_RV770
) {
1205 rctx
->framebuffer
.atom
.num_dw
+= 2;
1208 r600_mark_atom_dirty(rctx
, &rctx
->framebuffer
.atom
);
1210 r600_set_sample_locations_constant_buffer(rctx
);
1213 static uint32_t sample_locs_2x
[] = {
1214 FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4),
1215 FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4),
1217 static unsigned max_dist_2x
= 4;
1219 static uint32_t sample_locs_4x
[] = {
1220 FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6),
1221 FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6),
1223 static unsigned max_dist_4x
= 6;
1224 static uint32_t sample_locs_8x
[] = {
1225 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3),
1226 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7),
1228 static unsigned max_dist_8x
= 7;
1230 static void r600_get_sample_position(struct pipe_context
*ctx
,
1231 unsigned sample_count
,
1232 unsigned sample_index
,
1239 switch (sample_count
) {
1242 out_value
[0] = out_value
[1] = 0.5;
1245 offset
= 4 * (sample_index
* 2);
1246 val
.idx
= (sample_locs_2x
[0] >> offset
) & 0xf;
1247 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1248 val
.idx
= (sample_locs_2x
[0] >> (offset
+ 4)) & 0xf;
1249 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1252 offset
= 4 * (sample_index
* 2);
1253 val
.idx
= (sample_locs_4x
[0] >> offset
) & 0xf;
1254 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1255 val
.idx
= (sample_locs_4x
[0] >> (offset
+ 4)) & 0xf;
1256 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1259 offset
= 4 * (sample_index
% 4 * 2);
1260 index
= (sample_index
/ 4);
1261 val
.idx
= (sample_locs_8x
[index
] >> offset
) & 0xf;
1262 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1263 val
.idx
= (sample_locs_8x
[index
] >> (offset
+ 4)) & 0xf;
1264 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1269 static void r600_emit_msaa_state(struct r600_context
*rctx
, int nr_samples
)
1271 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1272 unsigned max_dist
= 0;
1274 if (rctx
->b
.family
== CHIP_R600
) {
1275 switch (nr_samples
) {
1280 radeon_set_config_reg(cs
, R_008B40_PA_SC_AA_SAMPLE_LOCS_2S
, sample_locs_2x
[0]);
1281 max_dist
= max_dist_2x
;
1284 radeon_set_config_reg(cs
, R_008B44_PA_SC_AA_SAMPLE_LOCS_4S
, sample_locs_4x
[0]);
1285 max_dist
= max_dist_4x
;
1288 radeon_set_config_reg_seq(cs
, R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0
, 2);
1289 radeon_emit(cs
, sample_locs_8x
[0]); /* R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0 */
1290 radeon_emit(cs
, sample_locs_8x
[1]); /* R_008B4C_PA_SC_AA_SAMPLE_LOCS_8S_WD1 */
1291 max_dist
= max_dist_8x
;
1295 switch (nr_samples
) {
1297 radeon_set_context_reg_seq(cs
, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX
, 2);
1298 radeon_emit(cs
, 0); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
1299 radeon_emit(cs
, 0); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
1303 radeon_set_context_reg_seq(cs
, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX
, 2);
1304 radeon_emit(cs
, sample_locs_2x
[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
1305 radeon_emit(cs
, sample_locs_2x
[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
1306 max_dist
= max_dist_2x
;
1309 radeon_set_context_reg_seq(cs
, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX
, 2);
1310 radeon_emit(cs
, sample_locs_4x
[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
1311 radeon_emit(cs
, sample_locs_4x
[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
1312 max_dist
= max_dist_4x
;
1315 radeon_set_context_reg_seq(cs
, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX
, 2);
1316 radeon_emit(cs
, sample_locs_8x
[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
1317 radeon_emit(cs
, sample_locs_8x
[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
1318 max_dist
= max_dist_8x
;
1323 if (nr_samples
> 1) {
1324 radeon_set_context_reg_seq(cs
, R_028C00_PA_SC_LINE_CNTL
, 2);
1325 radeon_emit(cs
, S_028C00_LAST_PIXEL(1) |
1326 S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */
1327 radeon_emit(cs
, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples
)) |
1328 S_028C04_MAX_SAMPLE_DIST(max_dist
)); /* R_028C04_PA_SC_AA_CONFIG */
1330 radeon_set_context_reg_seq(cs
, R_028C00_PA_SC_LINE_CNTL
, 2);
1331 radeon_emit(cs
, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */
1332 radeon_emit(cs
, 0); /* R_028C04_PA_SC_AA_CONFIG */
1336 static void r600_emit_framebuffer_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1338 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1339 struct pipe_framebuffer_state
*state
= &rctx
->framebuffer
.state
;
1340 unsigned nr_cbufs
= state
->nr_cbufs
;
1341 struct r600_surface
**cb
= (struct r600_surface
**)&state
->cbufs
[0];
1342 unsigned i
, sbu
= 0;
1345 radeon_set_context_reg_seq(cs
, R_0280A0_CB_COLOR0_INFO
, 8);
1346 for (i
= 0; i
< nr_cbufs
; i
++) {
1347 radeon_emit(cs
, cb
[i
] ? cb
[i
]->cb_color_info
: 0);
1349 /* set CB_COLOR1_INFO for possible dual-src blending */
1350 if (rctx
->framebuffer
.dual_src_blend
&& i
== 1 && cb
[0]) {
1351 radeon_emit(cs
, cb
[0]->cb_color_info
);
1354 for (; i
< 8; i
++) {
1359 for (i
= 0; i
< nr_cbufs
; i
++) {
1366 radeon_set_context_reg(cs
, R_028040_CB_COLOR0_BASE
+ i
*4, cb
[i
]->cb_color_base
);
1368 reloc
= radeon_add_to_buffer_list(&rctx
->b
,
1370 (struct r600_resource
*)cb
[i
]->base
.texture
,
1371 RADEON_USAGE_READWRITE
,
1372 cb
[i
]->base
.texture
->nr_samples
> 1 ?
1373 RADEON_PRIO_COLOR_BUFFER_MSAA
:
1374 RADEON_PRIO_COLOR_BUFFER
);
1375 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1376 radeon_emit(cs
, reloc
);
1379 radeon_set_context_reg(cs
, R_0280E0_CB_COLOR0_FRAG
+ i
*4, cb
[i
]->cb_color_fmask
);
1381 reloc
= radeon_add_to_buffer_list(&rctx
->b
,
1383 cb
[i
]->cb_buffer_fmask
,
1384 RADEON_USAGE_READWRITE
,
1385 cb
[i
]->base
.texture
->nr_samples
> 1 ?
1386 RADEON_PRIO_COLOR_BUFFER_MSAA
:
1387 RADEON_PRIO_COLOR_BUFFER
);
1388 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1389 radeon_emit(cs
, reloc
);
1392 radeon_set_context_reg(cs
, R_0280C0_CB_COLOR0_TILE
+ i
*4, cb
[i
]->cb_color_cmask
);
1394 reloc
= radeon_add_to_buffer_list(&rctx
->b
,
1396 cb
[i
]->cb_buffer_cmask
,
1397 RADEON_USAGE_READWRITE
,
1398 cb
[i
]->base
.texture
->nr_samples
> 1 ?
1399 RADEON_PRIO_COLOR_BUFFER_MSAA
:
1400 RADEON_PRIO_COLOR_BUFFER
);
1401 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1402 radeon_emit(cs
, reloc
);
1405 radeon_set_context_reg_seq(cs
, R_028060_CB_COLOR0_SIZE
, nr_cbufs
);
1406 for (i
= 0; i
< nr_cbufs
; i
++) {
1407 radeon_emit(cs
, cb
[i
] ? cb
[i
]->cb_color_size
: 0);
1410 radeon_set_context_reg_seq(cs
, R_028080_CB_COLOR0_VIEW
, nr_cbufs
);
1411 for (i
= 0; i
< nr_cbufs
; i
++) {
1412 radeon_emit(cs
, cb
[i
] ? cb
[i
]->cb_color_view
: 0);
1415 radeon_set_context_reg_seq(cs
, R_028100_CB_COLOR0_MASK
, nr_cbufs
);
1416 for (i
= 0; i
< nr_cbufs
; i
++) {
1417 radeon_emit(cs
, cb
[i
] ? cb
[i
]->cb_color_mask
: 0);
1420 sbu
|= SURFACE_BASE_UPDATE_COLOR_NUM(nr_cbufs
);
1423 /* SURFACE_BASE_UPDATE */
1424 if (rctx
->b
.family
> CHIP_R600
&& rctx
->b
.family
< CHIP_RV770
&& sbu
) {
1425 radeon_emit(cs
, PKT3(PKT3_SURFACE_BASE_UPDATE
, 0, 0));
1426 radeon_emit(cs
, sbu
);
1432 struct r600_surface
*surf
= (struct r600_surface
*)state
->zsbuf
;
1433 unsigned reloc
= radeon_add_to_buffer_list(&rctx
->b
,
1435 (struct r600_resource
*)state
->zsbuf
->texture
,
1436 RADEON_USAGE_READWRITE
,
1437 surf
->base
.texture
->nr_samples
> 1 ?
1438 RADEON_PRIO_DEPTH_BUFFER_MSAA
:
1439 RADEON_PRIO_DEPTH_BUFFER
);
1441 radeon_set_context_reg_seq(cs
, R_028000_DB_DEPTH_SIZE
, 2);
1442 radeon_emit(cs
, surf
->db_depth_size
); /* R_028000_DB_DEPTH_SIZE */
1443 radeon_emit(cs
, surf
->db_depth_view
); /* R_028004_DB_DEPTH_VIEW */
1444 radeon_set_context_reg_seq(cs
, R_02800C_DB_DEPTH_BASE
, 2);
1445 radeon_emit(cs
, surf
->db_depth_base
); /* R_02800C_DB_DEPTH_BASE */
1446 radeon_emit(cs
, surf
->db_depth_info
); /* R_028010_DB_DEPTH_INFO */
1448 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1449 radeon_emit(cs
, reloc
);
1451 radeon_set_context_reg(cs
, R_028D34_DB_PREFETCH_LIMIT
, surf
->db_prefetch_limit
);
1453 sbu
|= SURFACE_BASE_UPDATE_DEPTH
;
1454 } else if (rctx
->screen
->b
.info
.drm_minor
>= 18) {
1455 /* DRM 2.6.18 allows the INVALID format to disable depth/stencil.
1456 * Older kernels are out of luck. */
1457 radeon_set_context_reg(cs
, R_028010_DB_DEPTH_INFO
, S_028010_FORMAT(V_028010_DEPTH_INVALID
));
1460 /* SURFACE_BASE_UPDATE */
1461 if (rctx
->b
.family
> CHIP_R600
&& rctx
->b
.family
< CHIP_RV770
&& sbu
) {
1462 radeon_emit(cs
, PKT3(PKT3_SURFACE_BASE_UPDATE
, 0, 0));
1463 radeon_emit(cs
, sbu
);
1467 /* Framebuffer dimensions. */
1468 radeon_set_context_reg_seq(cs
, R_028204_PA_SC_WINDOW_SCISSOR_TL
, 2);
1469 radeon_emit(cs
, S_028240_TL_X(0) | S_028240_TL_Y(0) |
1470 S_028240_WINDOW_OFFSET_DISABLE(1)); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */
1471 radeon_emit(cs
, S_028244_BR_X(state
->width
) |
1472 S_028244_BR_Y(state
->height
)); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */
1474 if (rctx
->framebuffer
.is_msaa_resolve
) {
1475 radeon_set_context_reg(cs
, R_0287A0_CB_SHADER_CONTROL
, 1);
1477 /* Always enable the first colorbuffer in CB_SHADER_CONTROL. This
1478 * will assure that the alpha-test will work even if there is
1479 * no colorbuffer bound. */
1480 radeon_set_context_reg(cs
, R_0287A0_CB_SHADER_CONTROL
,
1481 (1ull << MAX2(nr_cbufs
, 1)) - 1);
1484 r600_emit_msaa_state(rctx
, rctx
->framebuffer
.nr_samples
);
1487 static void r600_set_min_samples(struct pipe_context
*ctx
, unsigned min_samples
)
1489 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1491 if (rctx
->ps_iter_samples
== min_samples
)
1494 rctx
->ps_iter_samples
= min_samples
;
1495 if (rctx
->framebuffer
.nr_samples
> 1) {
1496 r600_mark_atom_dirty(rctx
, &rctx
->rasterizer_state
.atom
);
1497 if (rctx
->b
.chip_class
== R600
)
1498 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
1502 static void r600_emit_cb_misc_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1504 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1505 struct r600_cb_misc_state
*a
= (struct r600_cb_misc_state
*)atom
;
1507 if (G_028808_SPECIAL_OP(a
->cb_color_control
) == V_028808_SPECIAL_RESOLVE_BOX
) {
1508 radeon_set_context_reg_seq(cs
, R_028238_CB_TARGET_MASK
, 2);
1509 if (rctx
->b
.chip_class
== R600
) {
1510 radeon_emit(cs
, 0xff); /* R_028238_CB_TARGET_MASK */
1511 radeon_emit(cs
, 0xff); /* R_02823C_CB_SHADER_MASK */
1513 radeon_emit(cs
, 0xf); /* R_028238_CB_TARGET_MASK */
1514 radeon_emit(cs
, 0xf); /* R_02823C_CB_SHADER_MASK */
1516 radeon_set_context_reg(cs
, R_028808_CB_COLOR_CONTROL
, a
->cb_color_control
);
1518 unsigned fb_colormask
= (1ULL << ((unsigned)a
->nr_cbufs
* 4)) - 1;
1519 unsigned ps_colormask
= (1ULL << ((unsigned)a
->nr_ps_color_outputs
* 4)) - 1;
1520 unsigned multiwrite
= a
->multiwrite
&& a
->nr_cbufs
> 1;
1522 radeon_set_context_reg_seq(cs
, R_028238_CB_TARGET_MASK
, 2);
1523 radeon_emit(cs
, a
->blend_colormask
& fb_colormask
); /* R_028238_CB_TARGET_MASK */
1524 /* Always enable the first color output to make sure alpha-test works even without one. */
1525 radeon_emit(cs
, 0xf | (multiwrite
? fb_colormask
: ps_colormask
)); /* R_02823C_CB_SHADER_MASK */
1526 radeon_set_context_reg(cs
, R_028808_CB_COLOR_CONTROL
,
1527 a
->cb_color_control
|
1528 S_028808_MULTIWRITE_ENABLE(multiwrite
));
1532 static void r600_emit_db_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1534 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1535 struct r600_db_state
*a
= (struct r600_db_state
*)atom
;
1537 if (a
->rsurf
&& a
->rsurf
->db_htile_surface
) {
1538 struct r600_texture
*rtex
= (struct r600_texture
*)a
->rsurf
->base
.texture
;
1541 radeon_set_context_reg(cs
, R_02802C_DB_DEPTH_CLEAR
, fui(rtex
->depth_clear_value
));
1542 radeon_set_context_reg(cs
, R_028D24_DB_HTILE_SURFACE
, a
->rsurf
->db_htile_surface
);
1543 radeon_set_context_reg(cs
, R_028014_DB_HTILE_DATA_BASE
, a
->rsurf
->db_htile_data_base
);
1544 reloc_idx
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rtex
->htile_buffer
,
1545 RADEON_USAGE_READWRITE
, RADEON_PRIO_HTILE
);
1546 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1547 radeon_emit(cs
, reloc_idx
);
1549 radeon_set_context_reg(cs
, R_028D24_DB_HTILE_SURFACE
, 0);
1553 static void r600_emit_db_misc_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1555 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1556 struct r600_db_misc_state
*a
= (struct r600_db_misc_state
*)atom
;
1557 unsigned db_render_control
= 0;
1558 unsigned db_render_override
=
1559 S_028D10_FORCE_HIS_ENABLE0(V_028D10_FORCE_DISABLE
) |
1560 S_028D10_FORCE_HIS_ENABLE1(V_028D10_FORCE_DISABLE
);
1562 if (rctx
->b
.chip_class
>= R700
) {
1563 switch (a
->ps_conservative_z
) {
1564 default: /* fall through */
1565 case TGSI_FS_DEPTH_LAYOUT_ANY
:
1566 db_render_control
|= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_ANY_Z
);
1568 case TGSI_FS_DEPTH_LAYOUT_GREATER
:
1569 db_render_control
|= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_GREATER_THAN_Z
);
1571 case TGSI_FS_DEPTH_LAYOUT_LESS
:
1572 db_render_control
|= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_LESS_THAN_Z
);
1577 if (rctx
->b
.num_occlusion_queries
> 0 &&
1578 !a
->occlusion_queries_disabled
) {
1579 if (rctx
->b
.chip_class
>= R700
) {
1580 db_render_control
|= S_028D0C_R700_PERFECT_ZPASS_COUNTS(1);
1582 db_render_override
|= S_028D10_NOOP_CULL_DISABLE(1);
1584 db_render_control
|= S_028D0C_ZPASS_INCREMENT_DISABLE(1);
1587 if (rctx
->db_state
.rsurf
&& rctx
->db_state
.rsurf
->db_htile_surface
) {
1588 /* FORCE_OFF means HiZ/HiS are determined by DB_SHADER_CONTROL */
1589 db_render_override
|= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_OFF
);
1590 /* This is to fix a lockup when hyperz and alpha test are enabled at
1591 * the same time somehow GPU get confuse on which order to pick for
1594 if (rctx
->alphatest_state
.sx_alpha_test_control
) {
1595 db_render_override
|= S_028D10_FORCE_SHADER_Z_ORDER(1);
1598 db_render_override
|= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE
);
1600 if (rctx
->b
.chip_class
== R600
&& rctx
->framebuffer
.nr_samples
> 1 && rctx
->ps_iter_samples
> 0) {
1601 /* sample shading and hyperz causes lockups on R6xx chips */
1602 db_render_override
|= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE
);
1604 if (a
->flush_depthstencil_through_cb
) {
1605 assert(a
->copy_depth
|| a
->copy_stencil
);
1607 db_render_control
|= S_028D0C_DEPTH_COPY_ENABLE(a
->copy_depth
) |
1608 S_028D0C_STENCIL_COPY_ENABLE(a
->copy_stencil
) |
1609 S_028D0C_COPY_CENTROID(1) |
1610 S_028D0C_COPY_SAMPLE(a
->copy_sample
);
1612 if (rctx
->b
.chip_class
== R600
)
1613 db_render_override
|= S_028D10_NOOP_CULL_DISABLE(1);
1615 if (rctx
->b
.family
== CHIP_RV610
|| rctx
->b
.family
== CHIP_RV630
||
1616 rctx
->b
.family
== CHIP_RV620
|| rctx
->b
.family
== CHIP_RV635
)
1617 db_render_override
|= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE
);
1618 } else if (a
->flush_depth_inplace
|| a
->flush_stencil_inplace
) {
1619 db_render_control
|= S_028D0C_DEPTH_COMPRESS_DISABLE(a
->flush_depth_inplace
) |
1620 S_028D0C_STENCIL_COMPRESS_DISABLE(a
->flush_stencil_inplace
);
1621 db_render_override
|= S_028D10_NOOP_CULL_DISABLE(1);
1623 if (a
->htile_clear
) {
1624 db_render_control
|= S_028D0C_DEPTH_CLEAR_ENABLE(1);
1627 /* RV770 workaround for a hang with 8x MSAA. */
1628 if (rctx
->b
.family
== CHIP_RV770
&& a
->log_samples
== 3) {
1629 db_render_override
|= S_028D10_MAX_TILES_IN_DTT(6);
1632 radeon_set_context_reg_seq(cs
, R_028D0C_DB_RENDER_CONTROL
, 2);
1633 radeon_emit(cs
, db_render_control
); /* R_028D0C_DB_RENDER_CONTROL */
1634 radeon_emit(cs
, db_render_override
); /* R_028D10_DB_RENDER_OVERRIDE */
1635 radeon_set_context_reg(cs
, R_02880C_DB_SHADER_CONTROL
, a
->db_shader_control
);
1638 static void r600_emit_config_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1640 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1641 struct r600_config_state
*a
= (struct r600_config_state
*)atom
;
1643 radeon_set_config_reg(cs
, R_008C04_SQ_GPR_RESOURCE_MGMT_1
, a
->sq_gpr_resource_mgmt_1
);
1644 radeon_set_config_reg(cs
, R_008C08_SQ_GPR_RESOURCE_MGMT_2
, a
->sq_gpr_resource_mgmt_2
);
1647 static void r600_emit_vertex_buffers(struct r600_context
*rctx
, struct r600_atom
*atom
)
1649 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1650 uint32_t dirty_mask
= rctx
->vertex_buffer_state
.dirty_mask
;
1652 while (dirty_mask
) {
1653 struct pipe_vertex_buffer
*vb
;
1654 struct r600_resource
*rbuffer
;
1656 unsigned buffer_index
= u_bit_scan(&dirty_mask
);
1658 vb
= &rctx
->vertex_buffer_state
.vb
[buffer_index
];
1659 rbuffer
= (struct r600_resource
*)vb
->buffer
;
1662 offset
= vb
->buffer_offset
;
1664 /* fetch resources start at index 320 (OFFSET_FS) */
1665 radeon_emit(cs
, PKT3(PKT3_SET_RESOURCE
, 7, 0));
1666 radeon_emit(cs
, (R600_FETCH_CONSTANTS_OFFSET_FS
+ buffer_index
) * 7);
1667 radeon_emit(cs
, offset
); /* RESOURCEi_WORD0 */
1668 radeon_emit(cs
, rbuffer
->b
.b
.width0
- offset
- 1); /* RESOURCEi_WORD1 */
1669 radeon_emit(cs
, /* RESOURCEi_WORD2 */
1670 S_038008_ENDIAN_SWAP(r600_endian_swap(32)) |
1671 S_038008_STRIDE(vb
->stride
));
1672 radeon_emit(cs
, 0); /* RESOURCEi_WORD3 */
1673 radeon_emit(cs
, 0); /* RESOURCEi_WORD4 */
1674 radeon_emit(cs
, 0); /* RESOURCEi_WORD5 */
1675 radeon_emit(cs
, 0xc0000000); /* RESOURCEi_WORD6 */
1677 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1678 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rbuffer
,
1679 RADEON_USAGE_READ
, RADEON_PRIO_VERTEX_BUFFER
));
1683 static void r600_emit_constant_buffers(struct r600_context
*rctx
,
1684 struct r600_constbuf_state
*state
,
1685 unsigned buffer_id_base
,
1686 unsigned reg_alu_constbuf_size
,
1687 unsigned reg_alu_const_cache
)
1689 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1690 uint32_t dirty_mask
= state
->dirty_mask
;
1692 while (dirty_mask
) {
1693 struct pipe_constant_buffer
*cb
;
1694 struct r600_resource
*rbuffer
;
1696 unsigned buffer_index
= ffs(dirty_mask
) - 1;
1697 unsigned gs_ring_buffer
= (buffer_index
== R600_GS_RING_CONST_BUFFER
);
1698 cb
= &state
->cb
[buffer_index
];
1699 rbuffer
= (struct r600_resource
*)cb
->buffer
;
1702 offset
= cb
->buffer_offset
;
1704 if (!gs_ring_buffer
) {
1705 radeon_set_context_reg(cs
, reg_alu_constbuf_size
+ buffer_index
* 4,
1706 DIV_ROUND_UP(cb
->buffer_size
, 256));
1707 radeon_set_context_reg(cs
, reg_alu_const_cache
+ buffer_index
* 4, offset
>> 8);
1710 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1711 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rbuffer
,
1712 RADEON_USAGE_READ
, RADEON_PRIO_CONST_BUFFER
));
1714 radeon_emit(cs
, PKT3(PKT3_SET_RESOURCE
, 7, 0));
1715 radeon_emit(cs
, (buffer_id_base
+ buffer_index
) * 7);
1716 radeon_emit(cs
, offset
); /* RESOURCEi_WORD0 */
1717 radeon_emit(cs
, rbuffer
->b
.b
.width0
- offset
- 1); /* RESOURCEi_WORD1 */
1718 radeon_emit(cs
, /* RESOURCEi_WORD2 */
1719 S_038008_ENDIAN_SWAP(gs_ring_buffer
? ENDIAN_NONE
: r600_endian_swap(32)) |
1720 S_038008_STRIDE(gs_ring_buffer
? 4 : 16));
1721 radeon_emit(cs
, 0); /* RESOURCEi_WORD3 */
1722 radeon_emit(cs
, 0); /* RESOURCEi_WORD4 */
1723 radeon_emit(cs
, 0); /* RESOURCEi_WORD5 */
1724 radeon_emit(cs
, 0xc0000000); /* RESOURCEi_WORD6 */
1726 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1727 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rbuffer
,
1728 RADEON_USAGE_READ
, RADEON_PRIO_CONST_BUFFER
));
1730 dirty_mask
&= ~(1 << buffer_index
);
1732 state
->dirty_mask
= 0;
1735 static void r600_emit_vs_constant_buffers(struct r600_context
*rctx
, struct r600_atom
*atom
)
1737 r600_emit_constant_buffers(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_VERTEX
],
1738 R600_FETCH_CONSTANTS_OFFSET_VS
,
1739 R_028180_ALU_CONST_BUFFER_SIZE_VS_0
,
1740 R_028980_ALU_CONST_CACHE_VS_0
);
1743 static void r600_emit_gs_constant_buffers(struct r600_context
*rctx
, struct r600_atom
*atom
)
1745 r600_emit_constant_buffers(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_GEOMETRY
],
1746 R600_FETCH_CONSTANTS_OFFSET_GS
,
1747 R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0
,
1748 R_0289C0_ALU_CONST_CACHE_GS_0
);
1751 static void r600_emit_ps_constant_buffers(struct r600_context
*rctx
, struct r600_atom
*atom
)
1753 r600_emit_constant_buffers(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_FRAGMENT
],
1754 R600_FETCH_CONSTANTS_OFFSET_PS
,
1755 R_028140_ALU_CONST_BUFFER_SIZE_PS_0
,
1756 R_028940_ALU_CONST_CACHE_PS_0
);
1759 static void r600_emit_sampler_views(struct r600_context
*rctx
,
1760 struct r600_samplerview_state
*state
,
1761 unsigned resource_id_base
)
1763 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1764 uint32_t dirty_mask
= state
->dirty_mask
;
1766 while (dirty_mask
) {
1767 struct r600_pipe_sampler_view
*rview
;
1768 unsigned resource_index
= u_bit_scan(&dirty_mask
);
1771 rview
= state
->views
[resource_index
];
1774 radeon_emit(cs
, PKT3(PKT3_SET_RESOURCE
, 7, 0));
1775 radeon_emit(cs
, (resource_id_base
+ resource_index
) * 7);
1776 radeon_emit_array(cs
, rview
->tex_resource_words
, 7);
1778 reloc
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rview
->tex_resource
,
1780 r600_get_sampler_view_priority(rview
->tex_resource
));
1781 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1782 radeon_emit(cs
, reloc
);
1783 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1784 radeon_emit(cs
, reloc
);
1786 state
->dirty_mask
= 0;
1790 static void r600_emit_vs_sampler_views(struct r600_context
*rctx
, struct r600_atom
*atom
)
1792 r600_emit_sampler_views(rctx
, &rctx
->samplers
[PIPE_SHADER_VERTEX
].views
, R600_FETCH_CONSTANTS_OFFSET_VS
+ R600_MAX_CONST_BUFFERS
);
1795 static void r600_emit_gs_sampler_views(struct r600_context
*rctx
, struct r600_atom
*atom
)
1797 r600_emit_sampler_views(rctx
, &rctx
->samplers
[PIPE_SHADER_GEOMETRY
].views
, R600_FETCH_CONSTANTS_OFFSET_GS
+ R600_MAX_CONST_BUFFERS
);
1800 static void r600_emit_ps_sampler_views(struct r600_context
*rctx
, struct r600_atom
*atom
)
1802 r600_emit_sampler_views(rctx
, &rctx
->samplers
[PIPE_SHADER_FRAGMENT
].views
, R600_FETCH_CONSTANTS_OFFSET_PS
+ R600_MAX_CONST_BUFFERS
);
1805 static void r600_emit_sampler_states(struct r600_context
*rctx
,
1806 struct r600_textures_info
*texinfo
,
1807 unsigned resource_id_base
,
1808 unsigned border_color_reg
)
1810 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1811 uint32_t dirty_mask
= texinfo
->states
.dirty_mask
;
1813 while (dirty_mask
) {
1814 struct r600_pipe_sampler_state
*rstate
;
1815 struct r600_pipe_sampler_view
*rview
;
1816 unsigned i
= u_bit_scan(&dirty_mask
);
1818 rstate
= texinfo
->states
.states
[i
];
1820 rview
= texinfo
->views
.views
[i
];
1822 /* TEX_ARRAY_OVERRIDE must be set for array textures to disable
1823 * filtering between layers.
1824 * Don't update TEX_ARRAY_OVERRIDE if we don't have the sampler view.
1827 enum pipe_texture_target target
= rview
->base
.texture
->target
;
1828 if (target
== PIPE_TEXTURE_1D_ARRAY
||
1829 target
== PIPE_TEXTURE_2D_ARRAY
) {
1830 rstate
->tex_sampler_words
[0] |= S_03C000_TEX_ARRAY_OVERRIDE(1);
1831 texinfo
->is_array_sampler
[i
] = true;
1833 rstate
->tex_sampler_words
[0] &= C_03C000_TEX_ARRAY_OVERRIDE
;
1834 texinfo
->is_array_sampler
[i
] = false;
1838 radeon_emit(cs
, PKT3(PKT3_SET_SAMPLER
, 3, 0));
1839 radeon_emit(cs
, (resource_id_base
+ i
) * 3);
1840 radeon_emit_array(cs
, rstate
->tex_sampler_words
, 3);
1842 if (rstate
->border_color_use
) {
1845 offset
= border_color_reg
;
1847 radeon_set_config_reg_seq(cs
, offset
, 4);
1848 radeon_emit_array(cs
, rstate
->border_color
.ui
, 4);
1851 texinfo
->states
.dirty_mask
= 0;
1854 static void r600_emit_vs_sampler_states(struct r600_context
*rctx
, struct r600_atom
*atom
)
1856 r600_emit_sampler_states(rctx
, &rctx
->samplers
[PIPE_SHADER_VERTEX
], 18, R_00A600_TD_VS_SAMPLER0_BORDER_RED
);
1859 static void r600_emit_gs_sampler_states(struct r600_context
*rctx
, struct r600_atom
*atom
)
1861 r600_emit_sampler_states(rctx
, &rctx
->samplers
[PIPE_SHADER_GEOMETRY
], 36, R_00A800_TD_GS_SAMPLER0_BORDER_RED
);
1864 static void r600_emit_ps_sampler_states(struct r600_context
*rctx
, struct r600_atom
*atom
)
1866 r600_emit_sampler_states(rctx
, &rctx
->samplers
[PIPE_SHADER_FRAGMENT
], 0, R_00A400_TD_PS_SAMPLER0_BORDER_RED
);
1869 static void r600_emit_seamless_cube_map(struct r600_context
*rctx
, struct r600_atom
*atom
)
1871 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1874 tmp
= S_009508_DISABLE_CUBE_ANISO(1) |
1875 S_009508_SYNC_GRADIENT(1) |
1876 S_009508_SYNC_WALKER(1) |
1877 S_009508_SYNC_ALIGNER(1);
1878 if (!rctx
->seamless_cube_map
.enabled
) {
1879 tmp
|= S_009508_DISABLE_CUBE_WRAP(1);
1881 radeon_set_config_reg(cs
, R_009508_TA_CNTL_AUX
, tmp
);
1884 static void r600_emit_sample_mask(struct r600_context
*rctx
, struct r600_atom
*a
)
1886 struct r600_sample_mask
*s
= (struct r600_sample_mask
*)a
;
1887 uint8_t mask
= s
->sample_mask
;
1889 radeon_set_context_reg(rctx
->b
.gfx
.cs
, R_028C48_PA_SC_AA_MASK
,
1890 mask
| (mask
<< 8) | (mask
<< 16) | (mask
<< 24));
1893 static void r600_emit_vertex_fetch_shader(struct r600_context
*rctx
, struct r600_atom
*a
)
1895 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1896 struct r600_cso_state
*state
= (struct r600_cso_state
*)a
;
1897 struct r600_fetch_shader
*shader
= (struct r600_fetch_shader
*)state
->cso
;
1899 radeon_set_context_reg(cs
, R_028894_SQ_PGM_START_FS
, shader
->offset
>> 8);
1900 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1901 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, shader
->buffer
,
1903 RADEON_PRIO_SHADER_BINARY
));
1906 static void r600_emit_shader_stages(struct r600_context
*rctx
, struct r600_atom
*a
)
1908 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1909 struct r600_shader_stages_state
*state
= (struct r600_shader_stages_state
*)a
;
1911 uint32_t v2
= 0, primid
= 0;
1913 if (rctx
->vs_shader
->current
->shader
.vs_as_gs_a
) {
1914 v2
= S_028A40_MODE(V_028A40_GS_SCENARIO_A
);
1918 if (state
->geom_enable
) {
1921 if (rctx
->gs_shader
->gs_max_out_vertices
<= 128)
1922 cut_val
= V_028A40_GS_CUT_128
;
1923 else if (rctx
->gs_shader
->gs_max_out_vertices
<= 256)
1924 cut_val
= V_028A40_GS_CUT_256
;
1925 else if (rctx
->gs_shader
->gs_max_out_vertices
<= 512)
1926 cut_val
= V_028A40_GS_CUT_512
;
1928 cut_val
= V_028A40_GS_CUT_1024
;
1930 v2
= S_028A40_MODE(V_028A40_GS_SCENARIO_G
) |
1931 S_028A40_CUT_MODE(cut_val
);
1933 if (rctx
->gs_shader
->current
->shader
.gs_prim_id_input
)
1937 radeon_set_context_reg(cs
, R_028A40_VGT_GS_MODE
, v2
);
1938 radeon_set_context_reg(cs
, R_028A84_VGT_PRIMITIVEID_EN
, primid
);
1941 static void r600_emit_gs_rings(struct r600_context
*rctx
, struct r600_atom
*a
)
1943 struct radeon_winsys_cs
*cs
= rctx
->b
.gfx
.cs
;
1944 struct r600_gs_rings_state
*state
= (struct r600_gs_rings_state
*)a
;
1945 struct r600_resource
*rbuffer
;
1947 radeon_set_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
1948 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1949 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH
));
1951 if (state
->enable
) {
1952 rbuffer
=(struct r600_resource
*)state
->esgs_ring
.buffer
;
1953 radeon_set_config_reg(cs
, R_008C40_SQ_ESGS_RING_BASE
, 0);
1954 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1955 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rbuffer
,
1956 RADEON_USAGE_READWRITE
,
1957 RADEON_PRIO_SHADER_RINGS
));
1958 radeon_set_config_reg(cs
, R_008C44_SQ_ESGS_RING_SIZE
,
1959 state
->esgs_ring
.buffer_size
>> 8);
1961 rbuffer
=(struct r600_resource
*)state
->gsvs_ring
.buffer
;
1962 radeon_set_config_reg(cs
, R_008C48_SQ_GSVS_RING_BASE
, 0);
1963 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1964 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rbuffer
,
1965 RADEON_USAGE_READWRITE
,
1966 RADEON_PRIO_SHADER_RINGS
));
1967 radeon_set_config_reg(cs
, R_008C4C_SQ_GSVS_RING_SIZE
,
1968 state
->gsvs_ring
.buffer_size
>> 8);
1970 radeon_set_config_reg(cs
, R_008C44_SQ_ESGS_RING_SIZE
, 0);
1971 radeon_set_config_reg(cs
, R_008C4C_SQ_GSVS_RING_SIZE
, 0);
1974 radeon_set_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
1975 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1976 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH
));
1979 /* Adjust GPR allocation on R6xx/R7xx */
1980 bool r600_adjust_gprs(struct r600_context
*rctx
)
1982 unsigned num_gprs
[R600_NUM_HW_STAGES
];
1983 unsigned new_gprs
[R600_NUM_HW_STAGES
];
1984 unsigned cur_gprs
[R600_NUM_HW_STAGES
];
1985 unsigned def_gprs
[R600_NUM_HW_STAGES
];
1986 unsigned def_num_clause_temp_gprs
= rctx
->r6xx_num_clause_temp_gprs
;
1990 bool need_recalc
= false, use_default
= true;
1992 /* hardware will reserve twice num_clause_temp_gprs */
1993 max_gprs
= def_num_clause_temp_gprs
* 2;
1994 for (i
= 0; i
< R600_NUM_HW_STAGES
; i
++) {
1995 def_gprs
[i
] = rctx
->default_gprs
[i
];
1996 max_gprs
+= def_gprs
[i
];
1999 cur_gprs
[R600_HW_STAGE_PS
] = G_008C04_NUM_PS_GPRS(rctx
->config_state
.sq_gpr_resource_mgmt_1
);
2000 cur_gprs
[R600_HW_STAGE_VS
] = G_008C04_NUM_VS_GPRS(rctx
->config_state
.sq_gpr_resource_mgmt_1
);
2001 cur_gprs
[R600_HW_STAGE_GS
] = G_008C08_NUM_GS_GPRS(rctx
->config_state
.sq_gpr_resource_mgmt_2
);
2002 cur_gprs
[R600_HW_STAGE_ES
] = G_008C08_NUM_ES_GPRS(rctx
->config_state
.sq_gpr_resource_mgmt_2
);
2004 num_gprs
[R600_HW_STAGE_PS
] = rctx
->ps_shader
->current
->shader
.bc
.ngpr
;
2005 if (rctx
->gs_shader
) {
2006 num_gprs
[R600_HW_STAGE_ES
] = rctx
->vs_shader
->current
->shader
.bc
.ngpr
;
2007 num_gprs
[R600_HW_STAGE_GS
] = rctx
->gs_shader
->current
->shader
.bc
.ngpr
;
2008 num_gprs
[R600_HW_STAGE_VS
] = rctx
->gs_shader
->current
->gs_copy_shader
->shader
.bc
.ngpr
;
2010 num_gprs
[R600_HW_STAGE_ES
] = 0;
2011 num_gprs
[R600_HW_STAGE_GS
] = 0;
2012 num_gprs
[R600_HW_STAGE_VS
] = rctx
->vs_shader
->current
->shader
.bc
.ngpr
;
2015 for (i
= 0; i
< R600_NUM_HW_STAGES
; i
++) {
2016 new_gprs
[i
] = num_gprs
[i
];
2017 if (new_gprs
[i
] > cur_gprs
[i
])
2019 if (new_gprs
[i
] > def_gprs
[i
])
2020 use_default
= false;
2023 /* the sum of all SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS must <= to max_gprs */
2027 /* try to use switch back to default */
2029 /* always privilege vs stage so that at worst we have the
2030 * pixel stage producing wrong output (not the vertex
2032 new_gprs
[R600_HW_STAGE_PS
] = max_gprs
- def_num_clause_temp_gprs
* 2;
2033 for (i
= R600_HW_STAGE_VS
; i
< R600_NUM_HW_STAGES
; i
++)
2034 new_gprs
[R600_HW_STAGE_PS
] -= new_gprs
[i
];
2036 for (i
= 0; i
< R600_NUM_HW_STAGES
; i
++)
2037 new_gprs
[i
] = def_gprs
[i
];
2040 /* SQ_PGM_RESOURCES_*.NUM_GPRS must always be program to a value <=
2041 * SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS otherwise the GPU will lockup
2042 * Also if a shader use more gpr than SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS
2043 * it will lockup. So in this case just discard the draw command
2044 * and don't change the current gprs repartitions.
2046 for (i
= 0; i
< R600_NUM_HW_STAGES
; i
++) {
2047 if (num_gprs
[i
] > new_gprs
[i
]) {
2048 R600_ERR("shaders require too many register (%d + %d + %d + %d) "
2049 "for a combined maximum of %d\n",
2050 num_gprs
[R600_HW_STAGE_PS
], num_gprs
[R600_HW_STAGE_VS
], num_gprs
[R600_HW_STAGE_ES
], num_gprs
[R600_HW_STAGE_GS
], max_gprs
);
2055 /* in some case we endup recomputing the current value */
2056 tmp
= S_008C04_NUM_PS_GPRS(new_gprs
[R600_HW_STAGE_PS
]) |
2057 S_008C04_NUM_VS_GPRS(new_gprs
[R600_HW_STAGE_VS
]) |
2058 S_008C04_NUM_CLAUSE_TEMP_GPRS(def_num_clause_temp_gprs
);
2060 tmp2
= S_008C08_NUM_ES_GPRS(new_gprs
[R600_HW_STAGE_ES
]) |
2061 S_008C08_NUM_GS_GPRS(new_gprs
[R600_HW_STAGE_GS
]);
2062 if (rctx
->config_state
.sq_gpr_resource_mgmt_1
!= tmp
|| rctx
->config_state
.sq_gpr_resource_mgmt_2
!= tmp2
) {
2063 rctx
->config_state
.sq_gpr_resource_mgmt_1
= tmp
;
2064 rctx
->config_state
.sq_gpr_resource_mgmt_2
= tmp2
;
2065 r600_mark_atom_dirty(rctx
, &rctx
->config_state
.atom
);
2066 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
2071 void r600_init_atom_start_cs(struct r600_context
*rctx
)
2086 int num_ps_stack_entries
;
2087 int num_vs_stack_entries
;
2088 int num_gs_stack_entries
;
2089 int num_es_stack_entries
;
2090 enum radeon_family family
;
2091 struct r600_command_buffer
*cb
= &rctx
->start_cs_cmd
;
2094 r600_init_command_buffer(cb
, 256);
2096 /* R6xx requires this packet at the start of each command buffer */
2097 if (rctx
->b
.chip_class
== R600
) {
2098 r600_store_value(cb
, PKT3(PKT3_START_3D_CMDBUF
, 0, 0));
2099 r600_store_value(cb
, 0);
2101 /* All asics require this one */
2102 r600_store_value(cb
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
2103 r600_store_value(cb
, 0x80000000);
2104 r600_store_value(cb
, 0x80000000);
2106 /* We're setting config registers here. */
2107 r600_store_value(cb
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
2108 r600_store_value(cb
, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
2110 /* This enables pipeline stat & streamout queries.
2111 * They are only disabled by blits.
2113 r600_store_value(cb
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
2114 r600_store_value(cb
, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START
) | EVENT_INDEX(0));
2116 family
= rctx
->b
.family
;
2128 num_ps_threads
= 136;
2129 num_vs_threads
= 48;
2132 num_ps_stack_entries
= 128;
2133 num_vs_stack_entries
= 128;
2134 num_gs_stack_entries
= 0;
2135 num_es_stack_entries
= 0;
2144 num_ps_threads
= 144;
2145 num_vs_threads
= 40;
2148 num_ps_stack_entries
= 40;
2149 num_vs_stack_entries
= 40;
2150 num_gs_stack_entries
= 32;
2151 num_es_stack_entries
= 16;
2163 /* use limits 40 VS and at least 16 ES/GS */
2164 num_ps_threads
= 120;
2165 num_vs_threads
= 40;
2166 num_gs_threads
= 16;
2167 num_es_threads
= 16;
2168 num_ps_stack_entries
= 40;
2169 num_vs_stack_entries
= 40;
2170 num_gs_stack_entries
= 32;
2171 num_es_stack_entries
= 16;
2179 num_ps_threads
= 136;
2180 num_vs_threads
= 48;
2183 num_ps_stack_entries
= 40;
2184 num_vs_stack_entries
= 40;
2185 num_gs_stack_entries
= 32;
2186 num_es_stack_entries
= 16;
2194 num_ps_threads
= 180;
2195 num_vs_threads
= 60;
2198 num_ps_stack_entries
= 128;
2199 num_vs_stack_entries
= 128;
2200 num_gs_stack_entries
= 128;
2201 num_es_stack_entries
= 128;
2210 num_ps_threads
= 180;
2211 num_vs_threads
= 60;
2214 num_ps_stack_entries
= 128;
2215 num_vs_stack_entries
= 128;
2216 num_gs_stack_entries
= 0;
2217 num_es_stack_entries
= 0;
2225 num_ps_threads
= 136;
2226 num_vs_threads
= 48;
2229 num_ps_stack_entries
= 128;
2230 num_vs_stack_entries
= 128;
2231 num_gs_stack_entries
= 0;
2232 num_es_stack_entries
= 0;
2236 rctx
->default_gprs
[R600_HW_STAGE_PS
] = num_ps_gprs
;
2237 rctx
->default_gprs
[R600_HW_STAGE_VS
] = num_vs_gprs
;
2238 rctx
->default_gprs
[R600_HW_STAGE_GS
] = 0;
2239 rctx
->default_gprs
[R600_HW_STAGE_ES
] = 0;
2241 rctx
->r6xx_num_clause_temp_gprs
= num_temp_gprs
;
2253 tmp
|= S_008C00_VC_ENABLE(1);
2256 tmp
|= S_008C00_DX9_CONSTS(0);
2257 tmp
|= S_008C00_ALU_INST_PREFER_VECTOR(1);
2258 tmp
|= S_008C00_PS_PRIO(ps_prio
);
2259 tmp
|= S_008C00_VS_PRIO(vs_prio
);
2260 tmp
|= S_008C00_GS_PRIO(gs_prio
);
2261 tmp
|= S_008C00_ES_PRIO(es_prio
);
2262 r600_store_config_reg(cb
, R_008C00_SQ_CONFIG
, tmp
);
2264 /* SQ_GPR_RESOURCE_MGMT_2 */
2265 tmp
= S_008C08_NUM_GS_GPRS(num_gs_gprs
);
2266 tmp
|= S_008C08_NUM_ES_GPRS(num_es_gprs
);
2267 r600_store_config_reg_seq(cb
, R_008C08_SQ_GPR_RESOURCE_MGMT_2
, 4);
2268 r600_store_value(cb
, tmp
);
2270 /* SQ_THREAD_RESOURCE_MGMT */
2271 tmp
= S_008C0C_NUM_PS_THREADS(num_ps_threads
);
2272 tmp
|= S_008C0C_NUM_VS_THREADS(num_vs_threads
);
2273 tmp
|= S_008C0C_NUM_GS_THREADS(num_gs_threads
);
2274 tmp
|= S_008C0C_NUM_ES_THREADS(num_es_threads
);
2275 r600_store_value(cb
, tmp
); /* R_008C0C_SQ_THREAD_RESOURCE_MGMT */
2277 /* SQ_STACK_RESOURCE_MGMT_1 */
2278 tmp
= S_008C10_NUM_PS_STACK_ENTRIES(num_ps_stack_entries
);
2279 tmp
|= S_008C10_NUM_VS_STACK_ENTRIES(num_vs_stack_entries
);
2280 r600_store_value(cb
, tmp
); /* R_008C10_SQ_STACK_RESOURCE_MGMT_1 */
2282 /* SQ_STACK_RESOURCE_MGMT_2 */
2283 tmp
= S_008C14_NUM_GS_STACK_ENTRIES(num_gs_stack_entries
);
2284 tmp
|= S_008C14_NUM_ES_STACK_ENTRIES(num_es_stack_entries
);
2285 r600_store_value(cb
, tmp
); /* R_008C14_SQ_STACK_RESOURCE_MGMT_2 */
2287 r600_store_config_reg(cb
, R_009714_VC_ENHANCE
, 0);
2289 if (rctx
->b
.chip_class
>= R700
) {
2290 r600_store_context_reg(cb
, R_028A50_VGT_ENHANCE
, 4);
2291 r600_store_config_reg(cb
, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
, 0x00004000);
2292 r600_store_config_reg(cb
, R_009830_DB_DEBUG
, 0);
2293 r600_store_config_reg(cb
, R_009838_DB_WATERMARKS
, 0x00420204);
2294 r600_store_context_reg(cb
, R_0286C8_SPI_THREAD_GROUPING
, 0);
2296 r600_store_config_reg(cb
, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
, 0);
2297 r600_store_config_reg(cb
, R_009830_DB_DEBUG
, 0x82000000);
2298 r600_store_config_reg(cb
, R_009838_DB_WATERMARKS
, 0x01020204);
2299 r600_store_context_reg(cb
, R_0286C8_SPI_THREAD_GROUPING
, 1);
2301 r600_store_context_reg_seq(cb
, R_0288A8_SQ_ESGS_RING_ITEMSIZE
, 9);
2302 r600_store_value(cb
, 0); /* R_0288A8_SQ_ESGS_RING_ITEMSIZE */
2303 r600_store_value(cb
, 0); /* R_0288AC_SQ_GSVS_RING_ITEMSIZE */
2304 r600_store_value(cb
, 0); /* R_0288B0_SQ_ESTMP_RING_ITEMSIZE */
2305 r600_store_value(cb
, 0); /* R_0288B4_SQ_GSTMP_RING_ITEMSIZE */
2306 r600_store_value(cb
, 0); /* R_0288B8_SQ_VSTMP_RING_ITEMSIZE */
2307 r600_store_value(cb
, 0); /* R_0288BC_SQ_PSTMP_RING_ITEMSIZE */
2308 r600_store_value(cb
, 0); /* R_0288C0_SQ_FBUF_RING_ITEMSIZE */
2309 r600_store_value(cb
, 0); /* R_0288C4_SQ_REDUC_RING_ITEMSIZE */
2310 r600_store_value(cb
, 0); /* R_0288C8_SQ_GS_VERT_ITEMSIZE */
2312 /* to avoid GPU doing any preloading of constant from random address */
2313 r600_store_context_reg_seq(cb
, R_028140_ALU_CONST_BUFFER_SIZE_PS_0
, 16);
2314 for (i
= 0; i
< 16; i
++)
2315 r600_store_value(cb
, 0);
2317 r600_store_context_reg_seq(cb
, R_028180_ALU_CONST_BUFFER_SIZE_VS_0
, 16);
2318 for (i
= 0; i
< 16; i
++)
2319 r600_store_value(cb
, 0);
2321 r600_store_context_reg_seq(cb
, R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0
, 16);
2322 for (i
= 0; i
< 16; i
++)
2323 r600_store_value(cb
, 0);
2325 r600_store_context_reg_seq(cb
, R_028A10_VGT_OUTPUT_PATH_CNTL
, 13);
2326 r600_store_value(cb
, 0); /* R_028A10_VGT_OUTPUT_PATH_CNTL */
2327 r600_store_value(cb
, 0); /* R_028A14_VGT_HOS_CNTL */
2328 r600_store_value(cb
, 0); /* R_028A18_VGT_HOS_MAX_TESS_LEVEL */
2329 r600_store_value(cb
, 0); /* R_028A1C_VGT_HOS_MIN_TESS_LEVEL */
2330 r600_store_value(cb
, 0); /* R_028A20_VGT_HOS_REUSE_DEPTH */
2331 r600_store_value(cb
, 0); /* R_028A24_VGT_GROUP_PRIM_TYPE */
2332 r600_store_value(cb
, 0); /* R_028A28_VGT_GROUP_FIRST_DECR */
2333 r600_store_value(cb
, 0); /* R_028A2C_VGT_GROUP_DECR */
2334 r600_store_value(cb
, 0); /* R_028A30_VGT_GROUP_VECT_0_CNTL */
2335 r600_store_value(cb
, 0); /* R_028A34_VGT_GROUP_VECT_1_CNTL */
2336 r600_store_value(cb
, 0); /* R_028A38_VGT_GROUP_VECT_0_FMT_CNTL */
2337 r600_store_value(cb
, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */
2338 r600_store_value(cb
, 0); /* R_028A40_VGT_GS_MODE, 0); */
2340 r600_store_context_reg(cb
, R_028A84_VGT_PRIMITIVEID_EN
, 0);
2341 r600_store_context_reg(cb
, R_028AA0_VGT_INSTANCE_STEP_RATE_0
, 0);
2342 r600_store_context_reg(cb
, R_028AA4_VGT_INSTANCE_STEP_RATE_1
, 0);
2344 r600_store_context_reg_seq(cb
, R_028AB4_VGT_REUSE_OFF
, 2);
2345 r600_store_value(cb
, 1); /* R_028AB4_VGT_REUSE_OFF */
2346 r600_store_value(cb
, 0); /* R_028AB8_VGT_VTX_CNT_EN */
2348 r600_store_context_reg(cb
, R_028B20_VGT_STRMOUT_BUFFER_EN
, 0);
2350 r600_store_ctl_const(cb
, R_03CFF0_SQ_VTX_BASE_VTX_LOC
, 0);
2352 r600_store_context_reg(cb
, R_028028_DB_STENCIL_CLEAR
, 0);
2354 r600_store_context_reg_seq(cb
, R_0286DC_SPI_FOG_CNTL
, 3);
2355 r600_store_value(cb
, 0); /* R_0286DC_SPI_FOG_CNTL */
2356 r600_store_value(cb
, 0); /* R_0286E0_SPI_FOG_FUNC_SCALE */
2357 r600_store_value(cb
, 0); /* R_0286E4_SPI_FOG_FUNC_BIAS */
2359 r600_store_context_reg_seq(cb
, R_028D28_DB_SRESULTS_COMPARE_STATE0
, 3);
2360 r600_store_value(cb
, 0); /* R_028D28_DB_SRESULTS_COMPARE_STATE0 */
2361 r600_store_value(cb
, 0); /* R_028D2C_DB_SRESULTS_COMPARE_STATE1 */
2362 r600_store_value(cb
, 0); /* R_028D30_DB_PRELOAD_CONTROL */
2364 r600_store_context_reg(cb
, R_028820_PA_CL_NANINF_CNTL
, 0);
2365 r600_store_context_reg(cb
, R_028A48_PA_SC_MPASS_PS_CNTL
, 0);
2367 r600_store_context_reg(cb
, R_028200_PA_SC_WINDOW_OFFSET
, 0);
2368 r600_store_context_reg(cb
, R_02820C_PA_SC_CLIPRECT_RULE
, 0xFFFF);
2370 if (rctx
->b
.chip_class
>= R700
) {
2371 r600_store_context_reg(cb
, R_028230_PA_SC_EDGERULE
, 0xAAAAAAAA);
2374 r600_store_context_reg_seq(cb
, R_028C30_CB_CLRCMP_CONTROL
, 4);
2375 r600_store_value(cb
, 0x1000000); /* R_028C30_CB_CLRCMP_CONTROL */
2376 r600_store_value(cb
, 0); /* R_028C34_CB_CLRCMP_SRC */
2377 r600_store_value(cb
, 0xFF); /* R_028C38_CB_CLRCMP_DST */
2378 r600_store_value(cb
, 0xFFFFFFFF); /* R_028C3C_CB_CLRCMP_MSK */
2380 r600_store_context_reg_seq(cb
, R_028030_PA_SC_SCREEN_SCISSOR_TL
, 2);
2381 r600_store_value(cb
, 0); /* R_028030_PA_SC_SCREEN_SCISSOR_TL */
2382 r600_store_value(cb
, S_028034_BR_X(8192) | S_028034_BR_Y(8192)); /* R_028034_PA_SC_SCREEN_SCISSOR_BR */
2384 r600_store_context_reg_seq(cb
, R_028240_PA_SC_GENERIC_SCISSOR_TL
, 2);
2385 r600_store_value(cb
, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */
2386 r600_store_value(cb
, S_028244_BR_X(8192) | S_028244_BR_Y(8192)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */
2388 r600_store_context_reg_seq(cb
, R_0288CC_SQ_PGM_CF_OFFSET_PS
, 5);
2389 r600_store_value(cb
, 0); /* R_0288CC_SQ_PGM_CF_OFFSET_PS */
2390 r600_store_value(cb
, 0); /* R_0288D0_SQ_PGM_CF_OFFSET_VS */
2391 r600_store_value(cb
, 0); /* R_0288D4_SQ_PGM_CF_OFFSET_GS */
2392 r600_store_value(cb
, 0); /* R_0288D8_SQ_PGM_CF_OFFSET_ES */
2393 r600_store_value(cb
, 0); /* R_0288DC_SQ_PGM_CF_OFFSET_FS */
2395 r600_store_context_reg(cb
, R_0288E0_SQ_VTX_SEMANTIC_CLEAR
, ~0);
2397 r600_store_context_reg_seq(cb
, R_028400_VGT_MAX_VTX_INDX
, 2);
2398 r600_store_value(cb
, ~0); /* R_028400_VGT_MAX_VTX_INDX */
2399 r600_store_value(cb
, 0); /* R_028404_VGT_MIN_VTX_INDX */
2401 r600_store_context_reg(cb
, R_0288A4_SQ_PGM_RESOURCES_FS
, 0);
2403 if (rctx
->b
.chip_class
== R700
)
2404 r600_store_context_reg(cb
, R_028350_SX_MISC
, 0);
2405 if (rctx
->b
.chip_class
== R700
&& rctx
->screen
->b
.has_streamout
)
2406 r600_store_context_reg(cb
, R_028354_SX_SURFACE_SYNC
, S_028354_SURFACE_SYNC_MASK(0xf));
2408 r600_store_context_reg(cb
, R_028800_DB_DEPTH_CONTROL
, 0);
2409 if (rctx
->screen
->b
.has_streamout
) {
2410 r600_store_context_reg(cb
, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET
, 0);
2413 r600_store_loop_const(cb
, R_03E200_SQ_LOOP_CONST_0
, 0x1000FFF);
2414 r600_store_loop_const(cb
, R_03E200_SQ_LOOP_CONST_0
+ (32 * 4), 0x1000FFF);
2415 r600_store_loop_const(cb
, R_03E200_SQ_LOOP_CONST_0
+ (64 * 4), 0x1000FFF);
2418 void r600_update_ps_state(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
2420 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2421 struct r600_command_buffer
*cb
= &shader
->command_buffer
;
2422 struct r600_shader
*rshader
= &shader
->shader
;
2423 unsigned i
, exports_ps
, num_cout
, spi_ps_in_control_0
, spi_input_z
, spi_ps_in_control_1
, db_shader_control
;
2424 int pos_index
= -1, face_index
= -1, fixed_pt_position_index
= -1;
2425 unsigned tmp
, sid
, ufi
= 0;
2426 int need_linear
= 0;
2427 unsigned z_export
= 0, stencil_export
= 0, mask_export
= 0;
2428 unsigned sprite_coord_enable
= rctx
->rasterizer
? rctx
->rasterizer
->sprite_coord_enable
: 0;
2431 r600_init_command_buffer(cb
, 64);
2436 r600_store_context_reg_seq(cb
, R_028644_SPI_PS_INPUT_CNTL_0
, rshader
->ninput
);
2437 for (i
= 0; i
< rshader
->ninput
; i
++) {
2438 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_POSITION
)
2440 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_FACE
&& face_index
== -1)
2442 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_SAMPLEID
)
2443 fixed_pt_position_index
= i
;
2445 sid
= rshader
->input
[i
].spi_sid
;
2447 tmp
= S_028644_SEMANTIC(sid
);
2449 /* D3D 9 behaviour. GL is undefined */
2450 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_COLOR
&& rshader
->input
[i
].sid
== 0)
2451 tmp
|= S_028644_DEFAULT_VAL(3);
2453 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_POSITION
||
2454 rshader
->input
[i
].interpolate
== TGSI_INTERPOLATE_CONSTANT
||
2455 (rshader
->input
[i
].interpolate
== TGSI_INTERPOLATE_COLOR
&&
2456 rctx
->rasterizer
&& rctx
->rasterizer
->flatshade
))
2457 tmp
|= S_028644_FLAT_SHADE(1);
2459 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_GENERIC
&&
2460 sprite_coord_enable
& (1 << rshader
->input
[i
].sid
)) {
2461 tmp
|= S_028644_PT_SPRITE_TEX(1);
2464 if (rshader
->input
[i
].interpolate_location
== TGSI_INTERPOLATE_LOC_CENTROID
)
2465 tmp
|= S_028644_SEL_CENTROID(1);
2467 if (rshader
->input
[i
].interpolate_location
== TGSI_INTERPOLATE_LOC_SAMPLE
)
2468 tmp
|= S_028644_SEL_SAMPLE(1);
2470 if (rshader
->input
[i
].interpolate
== TGSI_INTERPOLATE_LINEAR
) {
2472 tmp
|= S_028644_SEL_LINEAR(1);
2475 r600_store_value(cb
, tmp
);
2478 db_shader_control
= 0;
2479 for (i
= 0; i
< rshader
->noutput
; i
++) {
2480 if (rshader
->output
[i
].name
== TGSI_SEMANTIC_POSITION
)
2482 if (rshader
->output
[i
].name
== TGSI_SEMANTIC_STENCIL
)
2484 if (rshader
->output
[i
].name
== TGSI_SEMANTIC_SAMPLEMASK
&&
2485 rctx
->framebuffer
.nr_samples
> 1 && rctx
->ps_iter_samples
> 0)
2488 db_shader_control
|= S_02880C_Z_EXPORT_ENABLE(z_export
);
2489 db_shader_control
|= S_02880C_STENCIL_REF_EXPORT_ENABLE(stencil_export
);
2490 db_shader_control
|= S_02880C_MASK_EXPORT_ENABLE(mask_export
);
2491 if (rshader
->uses_kill
)
2492 db_shader_control
|= S_02880C_KILL_ENABLE(1);
2495 for (i
= 0; i
< rshader
->noutput
; i
++) {
2496 if (rshader
->output
[i
].name
== TGSI_SEMANTIC_POSITION
||
2497 rshader
->output
[i
].name
== TGSI_SEMANTIC_STENCIL
||
2498 rshader
->output
[i
].name
== TGSI_SEMANTIC_SAMPLEMASK
) {
2502 num_cout
= rshader
->nr_ps_color_exports
;
2503 exports_ps
|= S_028854_EXPORT_COLORS(num_cout
);
2505 /* always at least export 1 component per pixel */
2509 shader
->nr_ps_color_outputs
= num_cout
;
2511 spi_ps_in_control_0
= S_0286CC_NUM_INTERP(rshader
->ninput
) |
2512 S_0286CC_PERSP_GRADIENT_ENA(1)|
2513 S_0286CC_LINEAR_GRADIENT_ENA(need_linear
);
2515 if (pos_index
!= -1) {
2516 spi_ps_in_control_0
|= (S_0286CC_POSITION_ENA(1) |
2517 S_0286CC_POSITION_CENTROID(rshader
->input
[pos_index
].interpolate_location
== TGSI_INTERPOLATE_LOC_CENTROID
) |
2518 S_0286CC_POSITION_ADDR(rshader
->input
[pos_index
].gpr
) |
2519 S_0286CC_BARYC_SAMPLE_CNTL(1)) |
2520 S_0286CC_POSITION_SAMPLE(rshader
->input
[pos_index
].interpolate_location
== TGSI_INTERPOLATE_LOC_SAMPLE
);
2521 spi_input_z
|= S_0286D8_PROVIDE_Z_TO_SPI(1);
2524 spi_ps_in_control_1
= 0;
2525 if (face_index
!= -1) {
2526 spi_ps_in_control_1
|= S_0286D0_FRONT_FACE_ENA(1) |
2527 S_0286D0_FRONT_FACE_ADDR(rshader
->input
[face_index
].gpr
);
2529 if (fixed_pt_position_index
!= -1) {
2530 spi_ps_in_control_1
|= S_0286D0_FIXED_PT_POSITION_ENA(1) |
2531 S_0286D0_FIXED_PT_POSITION_ADDR(rshader
->input
[fixed_pt_position_index
].gpr
);
2534 /* HW bug in original R600 */
2535 if (rctx
->b
.family
== CHIP_R600
)
2538 r600_store_context_reg_seq(cb
, R_0286CC_SPI_PS_IN_CONTROL_0
, 2);
2539 r600_store_value(cb
, spi_ps_in_control_0
); /* R_0286CC_SPI_PS_IN_CONTROL_0 */
2540 r600_store_value(cb
, spi_ps_in_control_1
); /* R_0286D0_SPI_PS_IN_CONTROL_1 */
2542 r600_store_context_reg(cb
, R_0286D8_SPI_INPUT_Z
, spi_input_z
);
2544 r600_store_context_reg_seq(cb
, R_028850_SQ_PGM_RESOURCES_PS
, 2);
2545 r600_store_value(cb
, /* R_028850_SQ_PGM_RESOURCES_PS*/
2546 S_028850_NUM_GPRS(rshader
->bc
.ngpr
) |
2547 S_028850_STACK_SIZE(rshader
->bc
.nstack
) |
2548 S_028850_UNCACHED_FIRST_INST(ufi
));
2549 r600_store_value(cb
, exports_ps
); /* R_028854_SQ_PGM_EXPORTS_PS */
2551 r600_store_context_reg(cb
, R_028840_SQ_PGM_START_PS
, 0);
2552 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
2554 /* only set some bits here, the other bits are set in the dsa state */
2555 shader
->db_shader_control
= db_shader_control
;
2556 shader
->ps_depth_export
= z_export
| stencil_export
| mask_export
;
2558 shader
->sprite_coord_enable
= sprite_coord_enable
;
2559 if (rctx
->rasterizer
)
2560 shader
->flatshade
= rctx
->rasterizer
->flatshade
;
2563 void r600_update_vs_state(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
2565 struct r600_command_buffer
*cb
= &shader
->command_buffer
;
2566 struct r600_shader
*rshader
= &shader
->shader
;
2567 unsigned spi_vs_out_id
[10] = {};
2568 unsigned i
, tmp
, nparams
= 0;
2570 for (i
= 0; i
< rshader
->noutput
; i
++) {
2571 if (rshader
->output
[i
].spi_sid
) {
2572 tmp
= rshader
->output
[i
].spi_sid
<< ((nparams
& 3) * 8);
2573 spi_vs_out_id
[nparams
/ 4] |= tmp
;
2578 r600_init_command_buffer(cb
, 32);
2580 r600_store_context_reg_seq(cb
, R_028614_SPI_VS_OUT_ID_0
, 10);
2581 for (i
= 0; i
< 10; i
++) {
2582 r600_store_value(cb
, spi_vs_out_id
[i
]);
2585 /* Certain attributes (position, psize, etc.) don't count as params.
2586 * VS is required to export at least one param and r600_shader_from_tgsi()
2587 * takes care of adding a dummy export.
2592 r600_store_context_reg(cb
, R_0286C4_SPI_VS_OUT_CONFIG
,
2593 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
2594 r600_store_context_reg(cb
, R_028868_SQ_PGM_RESOURCES_VS
,
2595 S_028868_NUM_GPRS(rshader
->bc
.ngpr
) |
2596 S_028868_STACK_SIZE(rshader
->bc
.nstack
));
2597 if (rshader
->vs_position_window_space
) {
2598 r600_store_context_reg(cb
, R_028818_PA_CL_VTE_CNTL
,
2599 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
2601 r600_store_context_reg(cb
, R_028818_PA_CL_VTE_CNTL
,
2602 S_028818_VTX_W0_FMT(1) |
2603 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
2604 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
2605 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
2608 r600_store_context_reg(cb
, R_028858_SQ_PGM_START_VS
, 0);
2609 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
2611 shader
->pa_cl_vs_out_cntl
=
2612 S_02881C_VS_OUT_CCDIST0_VEC_ENA((rshader
->clip_dist_write
& 0x0F) != 0) |
2613 S_02881C_VS_OUT_CCDIST1_VEC_ENA((rshader
->clip_dist_write
& 0xF0) != 0) |
2614 S_02881C_VS_OUT_MISC_VEC_ENA(rshader
->vs_out_misc_write
) |
2615 S_02881C_USE_VTX_POINT_SIZE(rshader
->vs_out_point_size
) |
2616 S_02881C_USE_VTX_EDGE_FLAG(rshader
->vs_out_edgeflag
) |
2617 S_02881C_USE_VTX_RENDER_TARGET_INDX(rshader
->vs_out_layer
) |
2618 S_02881C_USE_VTX_VIEWPORT_INDX(rshader
->vs_out_viewport
);
2621 #define RV610_GSVS_ALIGN 32
2622 #define R600_GSVS_ALIGN 16
2624 void r600_update_gs_state(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
2626 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2627 struct r600_command_buffer
*cb
= &shader
->command_buffer
;
2628 struct r600_shader
*rshader
= &shader
->shader
;
2629 struct r600_shader
*cp_shader
= &shader
->gs_copy_shader
->shader
;
2630 unsigned gsvs_itemsize
=
2631 (cp_shader
->ring_item_sizes
[0] * shader
->selector
->gs_max_out_vertices
) >> 2;
2633 /* some r600s needs gsvs itemsize aligned to cacheline size
2634 this was fixed in rs780 and above. */
2635 switch (rctx
->b
.family
) {
2637 gsvs_itemsize
= align(gsvs_itemsize
, RV610_GSVS_ALIGN
);
2644 gsvs_itemsize
= align(gsvs_itemsize
, R600_GSVS_ALIGN
);
2650 r600_init_command_buffer(cb
, 64);
2652 /* VGT_GS_MODE is written by r600_emit_shader_stages */
2653 r600_store_context_reg(cb
, R_028AB8_VGT_VTX_CNT_EN
, 1);
2655 if (rctx
->b
.chip_class
>= R700
) {
2656 r600_store_context_reg(cb
, R_028B38_VGT_GS_MAX_VERT_OUT
,
2657 S_028B38_MAX_VERT_OUT(shader
->selector
->gs_max_out_vertices
));
2659 r600_store_context_reg(cb
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
,
2660 r600_conv_prim_to_gs_out(shader
->selector
->gs_output_prim
));
2662 r600_store_context_reg(cb
, R_0288C8_SQ_GS_VERT_ITEMSIZE
,
2663 cp_shader
->ring_item_sizes
[0] >> 2);
2665 r600_store_context_reg(cb
, R_0288A8_SQ_ESGS_RING_ITEMSIZE
,
2666 (rshader
->ring_item_sizes
[0]) >> 2);
2668 r600_store_context_reg(cb
, R_0288AC_SQ_GSVS_RING_ITEMSIZE
,
2671 /* FIXME calculate these values somehow ??? */
2672 r600_store_config_reg_seq(cb
, R_0088C8_VGT_GS_PER_ES
, 2);
2673 r600_store_value(cb
, 0x80); /* GS_PER_ES */
2674 r600_store_value(cb
, 0x100); /* ES_PER_GS */
2675 r600_store_config_reg_seq(cb
, R_0088E8_VGT_GS_PER_VS
, 1);
2676 r600_store_value(cb
, 0x2); /* GS_PER_VS */
2678 r600_store_context_reg(cb
, R_02887C_SQ_PGM_RESOURCES_GS
,
2679 S_02887C_NUM_GPRS(rshader
->bc
.ngpr
) |
2680 S_02887C_STACK_SIZE(rshader
->bc
.nstack
));
2681 r600_store_context_reg(cb
, R_02886C_SQ_PGM_START_GS
, 0);
2682 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
2685 void r600_update_es_state(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
2687 struct r600_command_buffer
*cb
= &shader
->command_buffer
;
2688 struct r600_shader
*rshader
= &shader
->shader
;
2690 r600_init_command_buffer(cb
, 32);
2692 r600_store_context_reg(cb
, R_028890_SQ_PGM_RESOURCES_ES
,
2693 S_028890_NUM_GPRS(rshader
->bc
.ngpr
) |
2694 S_028890_STACK_SIZE(rshader
->bc
.nstack
));
2695 r600_store_context_reg(cb
, R_028880_SQ_PGM_START_ES
, 0);
2696 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
2700 void *r600_create_resolve_blend(struct r600_context
*rctx
)
2702 struct pipe_blend_state blend
;
2705 memset(&blend
, 0, sizeof(blend
));
2706 blend
.independent_blend_enable
= true;
2707 for (i
= 0; i
< 2; i
++) {
2708 blend
.rt
[i
].colormask
= 0xf;
2709 blend
.rt
[i
].blend_enable
= 1;
2710 blend
.rt
[i
].rgb_func
= PIPE_BLEND_ADD
;
2711 blend
.rt
[i
].alpha_func
= PIPE_BLEND_ADD
;
2712 blend
.rt
[i
].rgb_src_factor
= PIPE_BLENDFACTOR_ZERO
;
2713 blend
.rt
[i
].rgb_dst_factor
= PIPE_BLENDFACTOR_ZERO
;
2714 blend
.rt
[i
].alpha_src_factor
= PIPE_BLENDFACTOR_ZERO
;
2715 blend
.rt
[i
].alpha_dst_factor
= PIPE_BLENDFACTOR_ZERO
;
2717 return r600_create_blend_state_mode(&rctx
->b
.b
, &blend
, V_028808_SPECIAL_RESOLVE_BOX
);
2720 void *r700_create_resolve_blend(struct r600_context
*rctx
)
2722 struct pipe_blend_state blend
;
2724 memset(&blend
, 0, sizeof(blend
));
2725 blend
.independent_blend_enable
= true;
2726 blend
.rt
[0].colormask
= 0xf;
2727 return r600_create_blend_state_mode(&rctx
->b
.b
, &blend
, V_028808_SPECIAL_RESOLVE_BOX
);
2730 void *r600_create_decompress_blend(struct r600_context
*rctx
)
2732 struct pipe_blend_state blend
;
2734 memset(&blend
, 0, sizeof(blend
));
2735 blend
.independent_blend_enable
= true;
2736 blend
.rt
[0].colormask
= 0xf;
2737 return r600_create_blend_state_mode(&rctx
->b
.b
, &blend
, V_028808_SPECIAL_EXPAND_SAMPLES
);
2740 void *r600_create_db_flush_dsa(struct r600_context
*rctx
)
2742 struct pipe_depth_stencil_alpha_state dsa
;
2743 boolean quirk
= false;
2745 if (rctx
->b
.family
== CHIP_RV610
|| rctx
->b
.family
== CHIP_RV630
||
2746 rctx
->b
.family
== CHIP_RV620
|| rctx
->b
.family
== CHIP_RV635
)
2749 memset(&dsa
, 0, sizeof(dsa
));
2752 dsa
.depth
.enabled
= 1;
2753 dsa
.depth
.func
= PIPE_FUNC_LEQUAL
;
2754 dsa
.stencil
[0].enabled
= 1;
2755 dsa
.stencil
[0].func
= PIPE_FUNC_ALWAYS
;
2756 dsa
.stencil
[0].zpass_op
= PIPE_STENCIL_OP_KEEP
;
2757 dsa
.stencil
[0].zfail_op
= PIPE_STENCIL_OP_INCR
;
2758 dsa
.stencil
[0].writemask
= 0xff;
2761 return rctx
->b
.b
.create_depth_stencil_alpha_state(&rctx
->b
.b
, &dsa
);
2764 void r600_update_db_shader_control(struct r600_context
* rctx
)
2767 unsigned db_shader_control
;
2768 uint8_t ps_conservative_z
;
2770 if (!rctx
->ps_shader
) {
2774 dual_export
= rctx
->framebuffer
.export_16bpc
&&
2775 !rctx
->ps_shader
->current
->ps_depth_export
;
2777 db_shader_control
= rctx
->ps_shader
->current
->db_shader_control
|
2778 S_02880C_DUAL_EXPORT_ENABLE(dual_export
);
2780 ps_conservative_z
= rctx
->ps_shader
->current
->shader
.ps_conservative_z
;
2782 /* When alpha test is enabled we can't trust the hw to make the proper
2783 * decision on the order in which ztest should be run related to fragment
2786 * If alpha test is enabled perform z test after fragment. RE_Z (early
2787 * z test but no write to the zbuffer) seems to cause lockup on r6xx/r7xx
2789 if (rctx
->alphatest_state
.sx_alpha_test_control
) {
2790 db_shader_control
|= S_02880C_Z_ORDER(V_02880C_LATE_Z
);
2792 db_shader_control
|= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z
);
2795 if (db_shader_control
!= rctx
->db_misc_state
.db_shader_control
||
2796 ps_conservative_z
!= rctx
->db_misc_state
.ps_conservative_z
) {
2797 rctx
->db_misc_state
.db_shader_control
= db_shader_control
;
2798 rctx
->db_misc_state
.ps_conservative_z
= ps_conservative_z
;
2799 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
2803 static inline unsigned r600_array_mode(unsigned mode
)
2807 case RADEON_SURF_MODE_LINEAR_ALIGNED
: return V_0280A0_ARRAY_LINEAR_ALIGNED
;
2809 case RADEON_SURF_MODE_1D
: return V_0280A0_ARRAY_1D_TILED_THIN1
;
2811 case RADEON_SURF_MODE_2D
: return V_0280A0_ARRAY_2D_TILED_THIN1
;
2815 static boolean
r600_dma_copy_tile(struct r600_context
*rctx
,
2816 struct pipe_resource
*dst
,
2821 struct pipe_resource
*src
,
2826 unsigned copy_height
,
2830 struct radeon_winsys_cs
*cs
= rctx
->b
.dma
.cs
;
2831 struct r600_texture
*rsrc
= (struct r600_texture
*)src
;
2832 struct r600_texture
*rdst
= (struct r600_texture
*)dst
;
2833 unsigned array_mode
, lbpp
, pitch_tile_max
, slice_tile_max
, size
;
2834 unsigned ncopy
, height
, cheight
, detile
, i
, x
, y
, z
, src_mode
, dst_mode
;
2835 uint64_t base
, addr
;
2837 dst_mode
= rdst
->surface
.u
.legacy
.level
[dst_level
].mode
;
2838 src_mode
= rsrc
->surface
.u
.legacy
.level
[src_level
].mode
;
2839 assert(dst_mode
!= src_mode
);
2842 lbpp
= util_logbase2(bpp
);
2843 pitch_tile_max
= ((pitch
/ bpp
) / 8) - 1;
2845 if (dst_mode
== RADEON_SURF_MODE_LINEAR_ALIGNED
) {
2847 array_mode
= r600_array_mode(src_mode
);
2848 slice_tile_max
= (rsrc
->surface
.u
.legacy
.level
[src_level
].nblk_x
* rsrc
->surface
.u
.legacy
.level
[src_level
].nblk_y
) / (8*8);
2849 slice_tile_max
= slice_tile_max
? slice_tile_max
- 1 : 0;
2850 /* linear height must be the same as the slice tile max height, it's ok even
2851 * if the linear destination/source have smaller heigh as the size of the
2852 * dma packet will be using the copy_height which is always smaller or equal
2853 * to the linear height
2855 height
= u_minify(rsrc
->resource
.b
.b
.height0
, src_level
);
2860 base
= rsrc
->surface
.u
.legacy
.level
[src_level
].offset
;
2861 addr
= rdst
->surface
.u
.legacy
.level
[dst_level
].offset
;
2862 addr
+= rdst
->surface
.u
.legacy
.level
[dst_level
].slice_size
* dst_z
;
2863 addr
+= dst_y
* pitch
+ dst_x
* bpp
;
2866 array_mode
= r600_array_mode(dst_mode
);
2867 slice_tile_max
= (rdst
->surface
.u
.legacy
.level
[dst_level
].nblk_x
* rdst
->surface
.u
.legacy
.level
[dst_level
].nblk_y
) / (8*8);
2868 slice_tile_max
= slice_tile_max
? slice_tile_max
- 1 : 0;
2869 /* linear height must be the same as the slice tile max height, it's ok even
2870 * if the linear destination/source have smaller heigh as the size of the
2871 * dma packet will be using the copy_height which is always smaller or equal
2872 * to the linear height
2874 height
= u_minify(rdst
->resource
.b
.b
.height0
, dst_level
);
2879 base
= rdst
->surface
.u
.legacy
.level
[dst_level
].offset
;
2880 addr
= rsrc
->surface
.u
.legacy
.level
[src_level
].offset
;
2881 addr
+= rsrc
->surface
.u
.legacy
.level
[src_level
].slice_size
* src_z
;
2882 addr
+= src_y
* pitch
+ src_x
* bpp
;
2884 /* check that we are in dw/base alignment constraint */
2885 if (addr
% 4 || base
% 256) {
2889 /* It's a r6xx/r7xx limitation, the blit must be on 8 boundary for number
2890 * line in the blit. Compute max 8 line we can copy in the size limit
2892 cheight
= ((R600_DMA_COPY_MAX_SIZE_DW
* 4) / pitch
) & 0xfffffff8;
2893 ncopy
= (copy_height
/ cheight
) + !!(copy_height
% cheight
);
2894 r600_need_dma_space(&rctx
->b
, ncopy
* 7, &rdst
->resource
, &rsrc
->resource
);
2896 for (i
= 0; i
< ncopy
; i
++) {
2897 cheight
= cheight
> copy_height
? copy_height
: cheight
;
2898 size
= (cheight
* pitch
) / 4;
2899 /* emit reloc before writing cs so that cs is always in consistent state */
2900 radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.dma
, &rsrc
->resource
, RADEON_USAGE_READ
,
2901 RADEON_PRIO_SDMA_TEXTURE
);
2902 radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.dma
, &rdst
->resource
, RADEON_USAGE_WRITE
,
2903 RADEON_PRIO_SDMA_TEXTURE
);
2904 radeon_emit(cs
, DMA_PACKET(DMA_PACKET_COPY
, 1, 0, size
));
2905 radeon_emit(cs
, base
>> 8);
2906 radeon_emit(cs
, (detile
<< 31) | (array_mode
<< 27) |
2907 (lbpp
<< 24) | ((height
- 1) << 10) |
2909 radeon_emit(cs
, (slice_tile_max
<< 12) | (z
<< 0));
2910 radeon_emit(cs
, (x
<< 3) | (y
<< 17));
2911 radeon_emit(cs
, addr
& 0xfffffffc);
2912 radeon_emit(cs
, (addr
>> 32UL) & 0xff);
2913 copy_height
-= cheight
;
2914 addr
+= cheight
* pitch
;
2920 static void r600_dma_copy(struct pipe_context
*ctx
,
2921 struct pipe_resource
*dst
,
2923 unsigned dstx
, unsigned dsty
, unsigned dstz
,
2924 struct pipe_resource
*src
,
2926 const struct pipe_box
*src_box
)
2928 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2929 struct r600_texture
*rsrc
= (struct r600_texture
*)src
;
2930 struct r600_texture
*rdst
= (struct r600_texture
*)dst
;
2931 unsigned dst_pitch
, src_pitch
, bpp
, dst_mode
, src_mode
, copy_height
;
2932 unsigned src_w
, dst_w
;
2933 unsigned src_x
, src_y
;
2934 unsigned dst_x
= dstx
, dst_y
= dsty
, dst_z
= dstz
;
2936 if (rctx
->b
.dma
.cs
== NULL
) {
2940 if (dst
->target
== PIPE_BUFFER
&& src
->target
== PIPE_BUFFER
) {
2941 if (dst_x
% 4 || src_box
->x
% 4 || src_box
->width
% 4)
2944 r600_dma_copy_buffer(rctx
, dst
, src
, dst_x
, src_box
->x
, src_box
->width
);
2948 if (src_box
->depth
> 1 ||
2949 !r600_prepare_for_dma_blit(&rctx
->b
, rdst
, dst_level
, dstx
, dsty
,
2950 dstz
, rsrc
, src_level
, src_box
))
2953 src_x
= util_format_get_nblocksx(src
->format
, src_box
->x
);
2954 dst_x
= util_format_get_nblocksx(src
->format
, dst_x
);
2955 src_y
= util_format_get_nblocksy(src
->format
, src_box
->y
);
2956 dst_y
= util_format_get_nblocksy(src
->format
, dst_y
);
2958 bpp
= rdst
->surface
.bpe
;
2959 dst_pitch
= rdst
->surface
.u
.legacy
.level
[dst_level
].nblk_x
* rdst
->surface
.bpe
;
2960 src_pitch
= rsrc
->surface
.u
.legacy
.level
[src_level
].nblk_x
* rsrc
->surface
.bpe
;
2961 src_w
= u_minify(rsrc
->resource
.b
.b
.width0
, src_level
);
2962 dst_w
= u_minify(rdst
->resource
.b
.b
.width0
, dst_level
);
2963 copy_height
= src_box
->height
/ rsrc
->surface
.blk_h
;
2965 dst_mode
= rdst
->surface
.u
.legacy
.level
[dst_level
].mode
;
2966 src_mode
= rsrc
->surface
.u
.legacy
.level
[src_level
].mode
;
2968 if (src_pitch
!= dst_pitch
|| src_box
->x
|| dst_x
|| src_w
!= dst_w
) {
2969 /* strict requirement on r6xx/r7xx */
2972 /* lot of constraint on alignment this should capture them all */
2973 if (src_pitch
% 8 || src_box
->y
% 8 || dst_y
% 8) {
2977 if (src_mode
== dst_mode
) {
2978 uint64_t dst_offset
, src_offset
, size
;
2980 /* simple dma blit would do NOTE code here assume :
2983 * dst_pitch == src_pitch
2985 src_offset
= rsrc
->surface
.u
.legacy
.level
[src_level
].offset
;
2986 src_offset
+= rsrc
->surface
.u
.legacy
.level
[src_level
].slice_size
* src_box
->z
;
2987 src_offset
+= src_y
* src_pitch
+ src_x
* bpp
;
2988 dst_offset
= rdst
->surface
.u
.legacy
.level
[dst_level
].offset
;
2989 dst_offset
+= rdst
->surface
.u
.legacy
.level
[dst_level
].slice_size
* dst_z
;
2990 dst_offset
+= dst_y
* dst_pitch
+ dst_x
* bpp
;
2991 size
= src_box
->height
* src_pitch
;
2992 /* must be dw aligned */
2993 if (dst_offset
% 4 || src_offset
% 4 || size
% 4) {
2996 r600_dma_copy_buffer(rctx
, dst
, src
, dst_offset
, src_offset
, size
);
2998 if (!r600_dma_copy_tile(rctx
, dst
, dst_level
, dst_x
, dst_y
, dst_z
,
2999 src
, src_level
, src_x
, src_y
, src_box
->z
,
3000 copy_height
, dst_pitch
, bpp
)) {
3007 r600_resource_copy_region(ctx
, dst
, dst_level
, dstx
, dsty
, dstz
,
3008 src
, src_level
, src_box
);
3011 void r600_init_state_functions(struct r600_context
*rctx
)
3016 * To avoid GPU lockup registers must be emited in a specific order
3017 * (no kidding ...). The order below is important and have been
3018 * partialy infered from analyzing fglrx command stream.
3020 * Don't reorder atom without carefully checking the effect (GPU lockup
3021 * or piglit regression).
3025 r600_init_atom(rctx
, &rctx
->framebuffer
.atom
, id
++, r600_emit_framebuffer_state
, 0);
3028 r600_init_atom(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_VERTEX
].atom
, id
++, r600_emit_vs_constant_buffers
, 0);
3029 r600_init_atom(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_GEOMETRY
].atom
, id
++, r600_emit_gs_constant_buffers
, 0);
3030 r600_init_atom(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_FRAGMENT
].atom
, id
++, r600_emit_ps_constant_buffers
, 0);
3032 /* sampler must be emited before TA_CNTL_AUX otherwise DISABLE_CUBE_WRAP change
3033 * does not take effect (TA_CNTL_AUX emited by r600_emit_seamless_cube_map)
3035 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_VERTEX
].states
.atom
, id
++, r600_emit_vs_sampler_states
, 0);
3036 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_GEOMETRY
].states
.atom
, id
++, r600_emit_gs_sampler_states
, 0);
3037 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_FRAGMENT
].states
.atom
, id
++, r600_emit_ps_sampler_states
, 0);
3039 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_VERTEX
].views
.atom
, id
++, r600_emit_vs_sampler_views
, 0);
3040 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_GEOMETRY
].views
.atom
, id
++, r600_emit_gs_sampler_views
, 0);
3041 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_FRAGMENT
].views
.atom
, id
++, r600_emit_ps_sampler_views
, 0);
3042 r600_init_atom(rctx
, &rctx
->vertex_buffer_state
.atom
, id
++, r600_emit_vertex_buffers
, 0);
3044 r600_init_atom(rctx
, &rctx
->vgt_state
.atom
, id
++, r600_emit_vgt_state
, 10);
3046 r600_init_atom(rctx
, &rctx
->seamless_cube_map
.atom
, id
++, r600_emit_seamless_cube_map
, 3);
3047 r600_init_atom(rctx
, &rctx
->sample_mask
.atom
, id
++, r600_emit_sample_mask
, 3);
3048 rctx
->sample_mask
.sample_mask
= ~0;
3050 r600_init_atom(rctx
, &rctx
->alphatest_state
.atom
, id
++, r600_emit_alphatest_state
, 6);
3051 r600_init_atom(rctx
, &rctx
->blend_color
.atom
, id
++, r600_emit_blend_color
, 6);
3052 r600_init_atom(rctx
, &rctx
->blend_state
.atom
, id
++, r600_emit_cso_state
, 0);
3053 r600_init_atom(rctx
, &rctx
->cb_misc_state
.atom
, id
++, r600_emit_cb_misc_state
, 7);
3054 r600_init_atom(rctx
, &rctx
->clip_misc_state
.atom
, id
++, r600_emit_clip_misc_state
, 6);
3055 r600_init_atom(rctx
, &rctx
->clip_state
.atom
, id
++, r600_emit_clip_state
, 26);
3056 r600_init_atom(rctx
, &rctx
->db_misc_state
.atom
, id
++, r600_emit_db_misc_state
, 7);
3057 r600_init_atom(rctx
, &rctx
->db_state
.atom
, id
++, r600_emit_db_state
, 11);
3058 r600_init_atom(rctx
, &rctx
->dsa_state
.atom
, id
++, r600_emit_cso_state
, 0);
3059 r600_init_atom(rctx
, &rctx
->poly_offset_state
.atom
, id
++, r600_emit_polygon_offset
, 9);
3060 r600_init_atom(rctx
, &rctx
->rasterizer_state
.atom
, id
++, r600_emit_cso_state
, 0);
3061 r600_add_atom(rctx
, &rctx
->b
.scissors
.atom
, id
++);
3062 r600_add_atom(rctx
, &rctx
->b
.viewports
.atom
, id
++);
3063 r600_init_atom(rctx
, &rctx
->config_state
.atom
, id
++, r600_emit_config_state
, 3);
3064 r600_init_atom(rctx
, &rctx
->stencil_ref
.atom
, id
++, r600_emit_stencil_ref
, 4);
3065 r600_init_atom(rctx
, &rctx
->vertex_fetch_shader
.atom
, id
++, r600_emit_vertex_fetch_shader
, 5);
3066 r600_add_atom(rctx
, &rctx
->b
.render_cond_atom
, id
++);
3067 r600_add_atom(rctx
, &rctx
->b
.streamout
.begin_atom
, id
++);
3068 r600_add_atom(rctx
, &rctx
->b
.streamout
.enable_atom
, id
++);
3069 for (i
= 0; i
< R600_NUM_HW_STAGES
; i
++)
3070 r600_init_atom(rctx
, &rctx
->hw_shader_stages
[i
].atom
, id
++, r600_emit_shader
, 0);
3071 r600_init_atom(rctx
, &rctx
->shader_stages
.atom
, id
++, r600_emit_shader_stages
, 0);
3072 r600_init_atom(rctx
, &rctx
->gs_rings
.atom
, id
++, r600_emit_gs_rings
, 0);
3074 rctx
->b
.b
.create_blend_state
= r600_create_blend_state
;
3075 rctx
->b
.b
.create_depth_stencil_alpha_state
= r600_create_dsa_state
;
3076 rctx
->b
.b
.create_rasterizer_state
= r600_create_rs_state
;
3077 rctx
->b
.b
.create_sampler_state
= r600_create_sampler_state
;
3078 rctx
->b
.b
.create_sampler_view
= r600_create_sampler_view
;
3079 rctx
->b
.b
.set_framebuffer_state
= r600_set_framebuffer_state
;
3080 rctx
->b
.b
.set_polygon_stipple
= r600_set_polygon_stipple
;
3081 rctx
->b
.b
.set_min_samples
= r600_set_min_samples
;
3082 rctx
->b
.b
.get_sample_position
= r600_get_sample_position
;
3083 rctx
->b
.dma_copy
= r600_dma_copy
;
3085 /* this function must be last */