2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "r600_formats.h"
24 #include "r600_shader.h"
27 #include "pipe/p_shader_tokens.h"
28 #include "util/u_pack_color.h"
29 #include "util/u_memory.h"
30 #include "util/u_framebuffer.h"
31 #include "util/u_dual_blend.h"
33 static uint32_t r600_translate_blend_function(int blend_func
)
37 return V_028804_COMB_DST_PLUS_SRC
;
38 case PIPE_BLEND_SUBTRACT
:
39 return V_028804_COMB_SRC_MINUS_DST
;
40 case PIPE_BLEND_REVERSE_SUBTRACT
:
41 return V_028804_COMB_DST_MINUS_SRC
;
43 return V_028804_COMB_MIN_DST_SRC
;
45 return V_028804_COMB_MAX_DST_SRC
;
47 R600_ERR("Unknown blend function %d\n", blend_func
);
54 static uint32_t r600_translate_blend_factor(int blend_fact
)
57 case PIPE_BLENDFACTOR_ONE
:
58 return V_028804_BLEND_ONE
;
59 case PIPE_BLENDFACTOR_SRC_COLOR
:
60 return V_028804_BLEND_SRC_COLOR
;
61 case PIPE_BLENDFACTOR_SRC_ALPHA
:
62 return V_028804_BLEND_SRC_ALPHA
;
63 case PIPE_BLENDFACTOR_DST_ALPHA
:
64 return V_028804_BLEND_DST_ALPHA
;
65 case PIPE_BLENDFACTOR_DST_COLOR
:
66 return V_028804_BLEND_DST_COLOR
;
67 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
:
68 return V_028804_BLEND_SRC_ALPHA_SATURATE
;
69 case PIPE_BLENDFACTOR_CONST_COLOR
:
70 return V_028804_BLEND_CONST_COLOR
;
71 case PIPE_BLENDFACTOR_CONST_ALPHA
:
72 return V_028804_BLEND_CONST_ALPHA
;
73 case PIPE_BLENDFACTOR_ZERO
:
74 return V_028804_BLEND_ZERO
;
75 case PIPE_BLENDFACTOR_INV_SRC_COLOR
:
76 return V_028804_BLEND_ONE_MINUS_SRC_COLOR
;
77 case PIPE_BLENDFACTOR_INV_SRC_ALPHA
:
78 return V_028804_BLEND_ONE_MINUS_SRC_ALPHA
;
79 case PIPE_BLENDFACTOR_INV_DST_ALPHA
:
80 return V_028804_BLEND_ONE_MINUS_DST_ALPHA
;
81 case PIPE_BLENDFACTOR_INV_DST_COLOR
:
82 return V_028804_BLEND_ONE_MINUS_DST_COLOR
;
83 case PIPE_BLENDFACTOR_INV_CONST_COLOR
:
84 return V_028804_BLEND_ONE_MINUS_CONST_COLOR
;
85 case PIPE_BLENDFACTOR_INV_CONST_ALPHA
:
86 return V_028804_BLEND_ONE_MINUS_CONST_ALPHA
;
87 case PIPE_BLENDFACTOR_SRC1_COLOR
:
88 return V_028804_BLEND_SRC1_COLOR
;
89 case PIPE_BLENDFACTOR_SRC1_ALPHA
:
90 return V_028804_BLEND_SRC1_ALPHA
;
91 case PIPE_BLENDFACTOR_INV_SRC1_COLOR
:
92 return V_028804_BLEND_INV_SRC1_COLOR
;
93 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA
:
94 return V_028804_BLEND_INV_SRC1_ALPHA
;
96 R600_ERR("Bad blend factor %d not supported!\n", blend_fact
);
103 static unsigned r600_tex_dim(unsigned dim
, unsigned nr_samples
)
107 case PIPE_TEXTURE_1D
:
108 return V_038000_SQ_TEX_DIM_1D
;
109 case PIPE_TEXTURE_1D_ARRAY
:
110 return V_038000_SQ_TEX_DIM_1D_ARRAY
;
111 case PIPE_TEXTURE_2D
:
112 case PIPE_TEXTURE_RECT
:
113 return nr_samples
> 1 ? V_038000_SQ_TEX_DIM_2D_MSAA
:
114 V_038000_SQ_TEX_DIM_2D
;
115 case PIPE_TEXTURE_2D_ARRAY
:
116 return nr_samples
> 1 ? V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA
:
117 V_038000_SQ_TEX_DIM_2D_ARRAY
;
118 case PIPE_TEXTURE_3D
:
119 return V_038000_SQ_TEX_DIM_3D
;
120 case PIPE_TEXTURE_CUBE
:
121 case PIPE_TEXTURE_CUBE_ARRAY
:
122 return V_038000_SQ_TEX_DIM_CUBEMAP
;
126 static uint32_t r600_translate_dbformat(enum pipe_format format
)
129 case PIPE_FORMAT_Z16_UNORM
:
130 return V_028010_DEPTH_16
;
131 case PIPE_FORMAT_Z24X8_UNORM
:
132 return V_028010_DEPTH_X8_24
;
133 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
134 return V_028010_DEPTH_8_24
;
135 case PIPE_FORMAT_Z32_FLOAT
:
136 return V_028010_DEPTH_32_FLOAT
;
137 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
138 return V_028010_DEPTH_X24_8_32_FLOAT
;
144 static bool r600_is_sampler_format_supported(struct pipe_screen
*screen
, enum pipe_format format
)
146 return r600_translate_texformat(screen
, format
, NULL
, NULL
, NULL
) != ~0U;
149 static bool r600_is_colorbuffer_format_supported(enum chip_class chip
, enum pipe_format format
)
151 return r600_translate_colorformat(chip
, format
) != ~0U &&
152 r600_translate_colorswap(format
) != ~0U;
155 static bool r600_is_zs_format_supported(enum pipe_format format
)
157 return r600_translate_dbformat(format
) != ~0U;
160 boolean
r600_is_format_supported(struct pipe_screen
*screen
,
161 enum pipe_format format
,
162 enum pipe_texture_target target
,
163 unsigned sample_count
,
166 struct r600_screen
*rscreen
= (struct r600_screen
*)screen
;
169 if (target
>= PIPE_MAX_TEXTURE_TYPES
) {
170 R600_ERR("r600: unsupported texture type %d\n", target
);
174 if (!util_format_is_supported(format
, usage
))
177 if (sample_count
> 1) {
178 if (!rscreen
->has_msaa
)
181 /* R11G11B10 is broken on R6xx. */
182 if (rscreen
->b
.chip_class
== R600
&&
183 format
== PIPE_FORMAT_R11G11B10_FLOAT
)
186 /* MSAA integer colorbuffers hang. */
187 if (util_format_is_pure_integer(format
) &&
188 !util_format_is_depth_or_stencil(format
))
191 switch (sample_count
) {
201 if (usage
& PIPE_BIND_SAMPLER_VIEW
) {
202 if (target
== PIPE_BUFFER
) {
203 if (r600_is_vertex_format_supported(format
))
204 retval
|= PIPE_BIND_SAMPLER_VIEW
;
206 if (r600_is_sampler_format_supported(screen
, format
))
207 retval
|= PIPE_BIND_SAMPLER_VIEW
;
211 if ((usage
& (PIPE_BIND_RENDER_TARGET
|
212 PIPE_BIND_DISPLAY_TARGET
|
215 PIPE_BIND_BLENDABLE
)) &&
216 r600_is_colorbuffer_format_supported(rscreen
->b
.chip_class
, format
)) {
218 (PIPE_BIND_RENDER_TARGET
|
219 PIPE_BIND_DISPLAY_TARGET
|
222 if (!util_format_is_pure_integer(format
) &&
223 !util_format_is_depth_or_stencil(format
))
224 retval
|= usage
& PIPE_BIND_BLENDABLE
;
227 if ((usage
& PIPE_BIND_DEPTH_STENCIL
) &&
228 r600_is_zs_format_supported(format
)) {
229 retval
|= PIPE_BIND_DEPTH_STENCIL
;
232 if ((usage
& PIPE_BIND_VERTEX_BUFFER
) &&
233 r600_is_vertex_format_supported(format
)) {
234 retval
|= PIPE_BIND_VERTEX_BUFFER
;
237 if (usage
& PIPE_BIND_TRANSFER_READ
)
238 retval
|= PIPE_BIND_TRANSFER_READ
;
239 if (usage
& PIPE_BIND_TRANSFER_WRITE
)
240 retval
|= PIPE_BIND_TRANSFER_WRITE
;
242 return retval
== usage
;
245 static void r600_emit_polygon_offset(struct r600_context
*rctx
, struct r600_atom
*a
)
247 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
248 struct r600_poly_offset_state
*state
= (struct r600_poly_offset_state
*)a
;
249 float offset_units
= state
->offset_units
;
250 float offset_scale
= state
->offset_scale
;
252 switch (state
->zs_format
) {
253 case PIPE_FORMAT_Z24X8_UNORM
:
254 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
255 offset_units
*= 2.0f
;
257 case PIPE_FORMAT_Z16_UNORM
:
258 offset_units
*= 4.0f
;
263 radeon_set_context_reg_seq(cs
, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE
, 4);
264 radeon_emit(cs
, fui(offset_scale
));
265 radeon_emit(cs
, fui(offset_units
));
266 radeon_emit(cs
, fui(offset_scale
));
267 radeon_emit(cs
, fui(offset_units
));
270 static uint32_t r600_get_blend_control(const struct pipe_blend_state
*state
, unsigned i
)
272 int j
= state
->independent_blend_enable
? i
: 0;
274 unsigned eqRGB
= state
->rt
[j
].rgb_func
;
275 unsigned srcRGB
= state
->rt
[j
].rgb_src_factor
;
276 unsigned dstRGB
= state
->rt
[j
].rgb_dst_factor
;
278 unsigned eqA
= state
->rt
[j
].alpha_func
;
279 unsigned srcA
= state
->rt
[j
].alpha_src_factor
;
280 unsigned dstA
= state
->rt
[j
].alpha_dst_factor
;
283 if (!state
->rt
[j
].blend_enable
)
286 bc
|= S_028804_COLOR_COMB_FCN(r600_translate_blend_function(eqRGB
));
287 bc
|= S_028804_COLOR_SRCBLEND(r600_translate_blend_factor(srcRGB
));
288 bc
|= S_028804_COLOR_DESTBLEND(r600_translate_blend_factor(dstRGB
));
290 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
) {
291 bc
|= S_028804_SEPARATE_ALPHA_BLEND(1);
292 bc
|= S_028804_ALPHA_COMB_FCN(r600_translate_blend_function(eqA
));
293 bc
|= S_028804_ALPHA_SRCBLEND(r600_translate_blend_factor(srcA
));
294 bc
|= S_028804_ALPHA_DESTBLEND(r600_translate_blend_factor(dstA
));
299 static void *r600_create_blend_state_mode(struct pipe_context
*ctx
,
300 const struct pipe_blend_state
*state
,
303 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
304 uint32_t color_control
= 0, target_mask
= 0;
305 struct r600_blend_state
*blend
= CALLOC_STRUCT(r600_blend_state
);
311 r600_init_command_buffer(&blend
->buffer
, 20);
312 r600_init_command_buffer(&blend
->buffer_no_blend
, 20);
314 /* R600 does not support per-MRT blends */
315 if (rctx
->b
.family
> CHIP_R600
)
316 color_control
|= S_028808_PER_MRT_BLEND(1);
318 if (state
->logicop_enable
) {
319 color_control
|= (state
->logicop_func
<< 16) | (state
->logicop_func
<< 20);
321 color_control
|= (0xcc << 16);
323 /* we pretend 8 buffer are used, CB_SHADER_MASK will disable unused one */
324 if (state
->independent_blend_enable
) {
325 for (int i
= 0; i
< 8; i
++) {
326 if (state
->rt
[i
].blend_enable
) {
327 color_control
|= S_028808_TARGET_BLEND_ENABLE(1 << i
);
329 target_mask
|= (state
->rt
[i
].colormask
<< (4 * i
));
332 for (int i
= 0; i
< 8; i
++) {
333 if (state
->rt
[0].blend_enable
) {
334 color_control
|= S_028808_TARGET_BLEND_ENABLE(1 << i
);
336 target_mask
|= (state
->rt
[0].colormask
<< (4 * i
));
341 color_control
|= S_028808_SPECIAL_OP(mode
);
343 color_control
|= S_028808_SPECIAL_OP(V_028808_DISABLE
);
345 /* only MRT0 has dual src blend */
346 blend
->dual_src_blend
= util_blend_state_is_dual(state
, 0);
347 blend
->cb_target_mask
= target_mask
;
348 blend
->cb_color_control
= color_control
;
349 blend
->cb_color_control_no_blend
= color_control
& C_028808_TARGET_BLEND_ENABLE
;
350 blend
->alpha_to_one
= state
->alpha_to_one
;
352 r600_store_context_reg(&blend
->buffer
, R_028D44_DB_ALPHA_TO_MASK
,
353 S_028D44_ALPHA_TO_MASK_ENABLE(state
->alpha_to_coverage
) |
354 S_028D44_ALPHA_TO_MASK_OFFSET0(2) |
355 S_028D44_ALPHA_TO_MASK_OFFSET1(2) |
356 S_028D44_ALPHA_TO_MASK_OFFSET2(2) |
357 S_028D44_ALPHA_TO_MASK_OFFSET3(2));
359 /* Copy over the registers set so far into buffer_no_blend. */
360 memcpy(blend
->buffer_no_blend
.buf
, blend
->buffer
.buf
, blend
->buffer
.num_dw
* 4);
361 blend
->buffer_no_blend
.num_dw
= blend
->buffer
.num_dw
;
363 /* Only add blend registers if blending is enabled. */
364 if (!G_028808_TARGET_BLEND_ENABLE(color_control
)) {
368 /* The first R600 does not support per-MRT blends */
369 r600_store_context_reg(&blend
->buffer
, R_028804_CB_BLEND_CONTROL
,
370 r600_get_blend_control(state
, 0));
372 if (rctx
->b
.family
> CHIP_R600
) {
373 r600_store_context_reg_seq(&blend
->buffer
, R_028780_CB_BLEND0_CONTROL
, 8);
374 for (int i
= 0; i
< 8; i
++) {
375 r600_store_value(&blend
->buffer
, r600_get_blend_control(state
, i
));
381 static void *r600_create_blend_state(struct pipe_context
*ctx
,
382 const struct pipe_blend_state
*state
)
384 return r600_create_blend_state_mode(ctx
, state
, V_028808_SPECIAL_NORMAL
);
387 static void *r600_create_dsa_state(struct pipe_context
*ctx
,
388 const struct pipe_depth_stencil_alpha_state
*state
)
390 unsigned db_depth_control
, alpha_test_control
, alpha_ref
;
391 struct r600_dsa_state
*dsa
= CALLOC_STRUCT(r600_dsa_state
);
397 r600_init_command_buffer(&dsa
->buffer
, 3);
399 dsa
->valuemask
[0] = state
->stencil
[0].valuemask
;
400 dsa
->valuemask
[1] = state
->stencil
[1].valuemask
;
401 dsa
->writemask
[0] = state
->stencil
[0].writemask
;
402 dsa
->writemask
[1] = state
->stencil
[1].writemask
;
403 dsa
->zwritemask
= state
->depth
.writemask
;
405 db_depth_control
= S_028800_Z_ENABLE(state
->depth
.enabled
) |
406 S_028800_Z_WRITE_ENABLE(state
->depth
.writemask
) |
407 S_028800_ZFUNC(state
->depth
.func
);
410 if (state
->stencil
[0].enabled
) {
411 db_depth_control
|= S_028800_STENCIL_ENABLE(1);
412 db_depth_control
|= S_028800_STENCILFUNC(state
->stencil
[0].func
); /* translates straight */
413 db_depth_control
|= S_028800_STENCILFAIL(r600_translate_stencil_op(state
->stencil
[0].fail_op
));
414 db_depth_control
|= S_028800_STENCILZPASS(r600_translate_stencil_op(state
->stencil
[0].zpass_op
));
415 db_depth_control
|= S_028800_STENCILZFAIL(r600_translate_stencil_op(state
->stencil
[0].zfail_op
));
417 if (state
->stencil
[1].enabled
) {
418 db_depth_control
|= S_028800_BACKFACE_ENABLE(1);
419 db_depth_control
|= S_028800_STENCILFUNC_BF(state
->stencil
[1].func
); /* translates straight */
420 db_depth_control
|= S_028800_STENCILFAIL_BF(r600_translate_stencil_op(state
->stencil
[1].fail_op
));
421 db_depth_control
|= S_028800_STENCILZPASS_BF(r600_translate_stencil_op(state
->stencil
[1].zpass_op
));
422 db_depth_control
|= S_028800_STENCILZFAIL_BF(r600_translate_stencil_op(state
->stencil
[1].zfail_op
));
427 alpha_test_control
= 0;
429 if (state
->alpha
.enabled
) {
430 alpha_test_control
= S_028410_ALPHA_FUNC(state
->alpha
.func
);
431 alpha_test_control
|= S_028410_ALPHA_TEST_ENABLE(1);
432 alpha_ref
= fui(state
->alpha
.ref_value
);
434 dsa
->sx_alpha_test_control
= alpha_test_control
& 0xff;
435 dsa
->alpha_ref
= alpha_ref
;
437 r600_store_context_reg(&dsa
->buffer
, R_028800_DB_DEPTH_CONTROL
, db_depth_control
);
441 static void *r600_create_rs_state(struct pipe_context
*ctx
,
442 const struct pipe_rasterizer_state
*state
)
444 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
445 unsigned tmp
, sc_mode_cntl
, spi_interp
;
446 float psize_min
, psize_max
;
447 struct r600_rasterizer_state
*rs
= CALLOC_STRUCT(r600_rasterizer_state
);
453 r600_init_command_buffer(&rs
->buffer
, 30);
455 rs
->flatshade
= state
->flatshade
;
456 rs
->sprite_coord_enable
= state
->sprite_coord_enable
;
457 rs
->two_side
= state
->light_twoside
;
458 rs
->clip_plane_enable
= state
->clip_plane_enable
;
459 rs
->pa_sc_line_stipple
= state
->line_stipple_enable
?
460 S_028A0C_LINE_PATTERN(state
->line_stipple_pattern
) |
461 S_028A0C_REPEAT_COUNT(state
->line_stipple_factor
) : 0;
462 rs
->pa_cl_clip_cntl
=
463 S_028810_PS_UCP_MODE(3) |
464 S_028810_DX_CLIP_SPACE_DEF(state
->clip_halfz
) |
465 S_028810_ZCLIP_NEAR_DISABLE(!state
->depth_clip
) |
466 S_028810_ZCLIP_FAR_DISABLE(!state
->depth_clip
) |
467 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1);
468 if (rctx
->b
.chip_class
== R700
) {
469 rs
->pa_cl_clip_cntl
|=
470 S_028810_DX_RASTERIZATION_KILL(state
->rasterizer_discard
);
472 rs
->multisample_enable
= state
->multisample
;
475 rs
->offset_units
= state
->offset_units
;
476 rs
->offset_scale
= state
->offset_scale
* 16.0f
;
477 rs
->offset_enable
= state
->offset_point
|| state
->offset_line
|| state
->offset_tri
;
479 if (state
->point_size_per_vertex
) {
480 psize_min
= util_get_min_point_size(state
);
483 /* Force the point size to be as if the vertex output was disabled. */
484 psize_min
= state
->point_size
;
485 psize_max
= state
->point_size
;
488 sc_mode_cntl
= S_028A4C_MSAA_ENABLE(state
->multisample
) |
489 S_028A4C_LINE_STIPPLE_ENABLE(state
->line_stipple_enable
) |
490 S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
491 S_028A4C_PS_ITER_SAMPLE(state
->multisample
&& rctx
->ps_iter_samples
> 1);
492 if (rctx
->b
.family
== CHIP_RV770
) {
493 /* workaround possible rendering corruption on RV770 with hyperz together with sample shading */
494 sc_mode_cntl
|= S_028A4C_TILE_COVER_DISABLE(state
->multisample
&& rctx
->ps_iter_samples
> 1);
496 if (rctx
->b
.chip_class
>= R700
) {
497 sc_mode_cntl
|= S_028A4C_FORCE_EOV_REZ_ENABLE(1) |
498 S_028A4C_R700_ZMM_LINE_OFFSET(1) |
499 S_028A4C_R700_VPORT_SCISSOR_ENABLE(state
->scissor
);
501 sc_mode_cntl
|= S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1);
502 rs
->scissor_enable
= state
->scissor
;
505 spi_interp
= S_0286D4_FLAT_SHADE_ENA(1);
506 if (state
->sprite_coord_enable
) {
507 spi_interp
|= S_0286D4_PNT_SPRITE_ENA(1) |
508 S_0286D4_PNT_SPRITE_OVRD_X(2) |
509 S_0286D4_PNT_SPRITE_OVRD_Y(3) |
510 S_0286D4_PNT_SPRITE_OVRD_Z(0) |
511 S_0286D4_PNT_SPRITE_OVRD_W(1);
512 if (state
->sprite_coord_mode
!= PIPE_SPRITE_COORD_UPPER_LEFT
) {
513 spi_interp
|= S_0286D4_PNT_SPRITE_TOP_1(1);
517 r600_store_context_reg_seq(&rs
->buffer
, R_028A00_PA_SU_POINT_SIZE
, 3);
518 /* point size 12.4 fixed point (divide by two, because 0.5 = 1 pixel. */
519 tmp
= r600_pack_float_12p4(state
->point_size
/2);
520 r600_store_value(&rs
->buffer
, /* R_028A00_PA_SU_POINT_SIZE */
521 S_028A00_HEIGHT(tmp
) | S_028A00_WIDTH(tmp
));
522 r600_store_value(&rs
->buffer
, /* R_028A04_PA_SU_POINT_MINMAX */
523 S_028A04_MIN_SIZE(r600_pack_float_12p4(psize_min
/2)) |
524 S_028A04_MAX_SIZE(r600_pack_float_12p4(psize_max
/2)));
525 r600_store_value(&rs
->buffer
, /* R_028A08_PA_SU_LINE_CNTL */
526 S_028A08_WIDTH(r600_pack_float_12p4(state
->line_width
/2)));
528 r600_store_context_reg(&rs
->buffer
, R_0286D4_SPI_INTERP_CONTROL_0
, spi_interp
);
529 r600_store_context_reg(&rs
->buffer
, R_028A4C_PA_SC_MODE_CNTL
, sc_mode_cntl
);
530 r600_store_context_reg(&rs
->buffer
, R_028C08_PA_SU_VTX_CNTL
,
531 S_028C08_PIX_CENTER_HALF(state
->half_pixel_center
) |
532 S_028C08_QUANT_MODE(V_028C08_X_1_256TH
));
533 r600_store_context_reg(&rs
->buffer
, R_028DFC_PA_SU_POLY_OFFSET_CLAMP
, fui(state
->offset_clamp
));
535 rs
->pa_su_sc_mode_cntl
= S_028814_PROVOKING_VTX_LAST(!state
->flatshade_first
) |
536 S_028814_CULL_FRONT(state
->cull_face
& PIPE_FACE_FRONT
? 1 : 0) |
537 S_028814_CULL_BACK(state
->cull_face
& PIPE_FACE_BACK
? 1 : 0) |
538 S_028814_FACE(!state
->front_ccw
) |
539 S_028814_POLY_OFFSET_FRONT_ENABLE(util_get_offset(state
, state
->fill_front
)) |
540 S_028814_POLY_OFFSET_BACK_ENABLE(util_get_offset(state
, state
->fill_back
)) |
541 S_028814_POLY_OFFSET_PARA_ENABLE(state
->offset_point
|| state
->offset_line
) |
542 S_028814_POLY_MODE(state
->fill_front
!= PIPE_POLYGON_MODE_FILL
||
543 state
->fill_back
!= PIPE_POLYGON_MODE_FILL
) |
544 S_028814_POLYMODE_FRONT_PTYPE(r600_translate_fill(state
->fill_front
)) |
545 S_028814_POLYMODE_BACK_PTYPE(r600_translate_fill(state
->fill_back
));
546 if (rctx
->b
.chip_class
== R700
) {
547 r600_store_context_reg(&rs
->buffer
, R_028814_PA_SU_SC_MODE_CNTL
, rs
->pa_su_sc_mode_cntl
);
549 if (rctx
->b
.chip_class
== R600
) {
550 r600_store_context_reg(&rs
->buffer
, R_028350_SX_MISC
,
551 S_028350_MULTIPASS(state
->rasterizer_discard
));
556 static void *r600_create_sampler_state(struct pipe_context
*ctx
,
557 const struct pipe_sampler_state
*state
)
559 struct r600_pipe_sampler_state
*ss
= CALLOC_STRUCT(r600_pipe_sampler_state
);
560 unsigned aniso_flag_offset
= state
->max_anisotropy
> 1 ? 4 : 0;
566 ss
->seamless_cube_map
= state
->seamless_cube_map
;
567 ss
->border_color_use
= sampler_state_needs_border_color(state
);
569 /* R_03C000_SQ_TEX_SAMPLER_WORD0_0 */
570 ss
->tex_sampler_words
[0] =
571 S_03C000_CLAMP_X(r600_tex_wrap(state
->wrap_s
)) |
572 S_03C000_CLAMP_Y(r600_tex_wrap(state
->wrap_t
)) |
573 S_03C000_CLAMP_Z(r600_tex_wrap(state
->wrap_r
)) |
574 S_03C000_XY_MAG_FILTER(r600_tex_filter(state
->mag_img_filter
) | aniso_flag_offset
) |
575 S_03C000_XY_MIN_FILTER(r600_tex_filter(state
->min_img_filter
) | aniso_flag_offset
) |
576 S_03C000_MIP_FILTER(r600_tex_mipfilter(state
->min_mip_filter
)) |
577 S_03C000_MAX_ANISO(r600_tex_aniso_filter(state
->max_anisotropy
)) |
578 S_03C000_DEPTH_COMPARE_FUNCTION(r600_tex_compare(state
->compare_func
)) |
579 S_03C000_BORDER_COLOR_TYPE(ss
->border_color_use
? V_03C000_SQ_TEX_BORDER_COLOR_REGISTER
: 0);
580 /* R_03C004_SQ_TEX_SAMPLER_WORD1_0 */
581 ss
->tex_sampler_words
[1] =
582 S_03C004_MIN_LOD(S_FIXED(CLAMP(state
->min_lod
, 0, 15), 6)) |
583 S_03C004_MAX_LOD(S_FIXED(CLAMP(state
->max_lod
, 0, 15), 6)) |
584 S_03C004_LOD_BIAS(S_FIXED(CLAMP(state
->lod_bias
, -16, 16), 6));
585 /* R_03C008_SQ_TEX_SAMPLER_WORD2_0 */
586 ss
->tex_sampler_words
[2] = S_03C008_TYPE(1);
588 if (ss
->border_color_use
) {
589 memcpy(&ss
->border_color
, &state
->border_color
, sizeof(state
->border_color
));
594 static struct pipe_sampler_view
*
595 texture_buffer_sampler_view(struct r600_pipe_sampler_view
*view
,
596 unsigned width0
, unsigned height0
)
599 struct r600_texture
*tmp
= (struct r600_texture
*)view
->base
.texture
;
600 int stride
= util_format_get_blocksize(view
->base
.format
);
601 unsigned format
, num_format
, format_comp
, endian
;
602 uint64_t offset
= view
->base
.u
.buf
.first_element
* stride
;
603 unsigned size
= (view
->base
.u
.buf
.last_element
- view
->base
.u
.buf
.first_element
+ 1) * stride
;
605 r600_vertex_data_type(view
->base
.format
,
606 &format
, &num_format
, &format_comp
,
609 view
->tex_resource
= &tmp
->resource
;
610 view
->skip_mip_address_reloc
= true;
612 view
->tex_resource_words
[0] = offset
;
613 view
->tex_resource_words
[1] = size
- 1;
614 view
->tex_resource_words
[2] = S_038008_BASE_ADDRESS_HI(offset
>> 32UL) |
615 S_038008_STRIDE(stride
) |
616 S_038008_DATA_FORMAT(format
) |
617 S_038008_NUM_FORMAT_ALL(num_format
) |
618 S_038008_FORMAT_COMP_ALL(format_comp
) |
619 S_038008_ENDIAN_SWAP(endian
);
620 view
->tex_resource_words
[3] = 0;
622 * in theory dword 4 is for number of elements, for use with resinfo,
623 * but it seems to utterly fail to work, the amd gpu shader analyser
624 * uses a const buffer to store the element sizes for buffer txq
626 view
->tex_resource_words
[4] = 0;
627 view
->tex_resource_words
[5] = 0;
628 view
->tex_resource_words
[6] = S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_BUFFER
);
632 struct pipe_sampler_view
*
633 r600_create_sampler_view_custom(struct pipe_context
*ctx
,
634 struct pipe_resource
*texture
,
635 const struct pipe_sampler_view
*state
,
636 unsigned width_first_level
, unsigned height_first_level
)
638 struct r600_pipe_sampler_view
*view
= CALLOC_STRUCT(r600_pipe_sampler_view
);
639 struct r600_texture
*tmp
= (struct r600_texture
*)texture
;
640 unsigned format
, endian
;
641 uint32_t word4
= 0, yuv_format
= 0, pitch
= 0;
642 unsigned char swizzle
[4], array_mode
= 0;
643 unsigned width
, height
, depth
, offset_level
, last_level
;
648 /* initialize base object */
650 view
->base
.texture
= NULL
;
651 pipe_reference(NULL
, &texture
->reference
);
652 view
->base
.texture
= texture
;
653 view
->base
.reference
.count
= 1;
654 view
->base
.context
= ctx
;
656 if (texture
->target
== PIPE_BUFFER
)
657 return texture_buffer_sampler_view(view
, texture
->width0
, 1);
659 swizzle
[0] = state
->swizzle_r
;
660 swizzle
[1] = state
->swizzle_g
;
661 swizzle
[2] = state
->swizzle_b
;
662 swizzle
[3] = state
->swizzle_a
;
664 format
= r600_translate_texformat(ctx
->screen
, state
->format
,
666 &word4
, &yuv_format
);
667 assert(format
!= ~0);
673 if (tmp
->is_depth
&& !tmp
->is_flushing_texture
&& !r600_can_read_depth(tmp
)) {
674 if (!r600_init_flushed_depth_texture(ctx
, texture
, NULL
)) {
678 tmp
= tmp
->flushed_depth_texture
;
681 endian
= r600_colorformat_endian_swap(format
);
683 offset_level
= state
->u
.tex
.first_level
;
684 last_level
= state
->u
.tex
.last_level
- offset_level
;
685 width
= width_first_level
;
686 height
= height_first_level
;
687 depth
= u_minify(texture
->depth0
, offset_level
);
688 pitch
= tmp
->surface
.level
[offset_level
].nblk_x
* util_format_get_blockwidth(state
->format
);
690 if (texture
->target
== PIPE_TEXTURE_1D_ARRAY
) {
692 depth
= texture
->array_size
;
693 } else if (texture
->target
== PIPE_TEXTURE_2D_ARRAY
) {
694 depth
= texture
->array_size
;
695 } else if (texture
->target
== PIPE_TEXTURE_CUBE_ARRAY
)
696 depth
= texture
->array_size
/ 6;
697 switch (tmp
->surface
.level
[offset_level
].mode
) {
698 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
699 array_mode
= V_038000_ARRAY_LINEAR_ALIGNED
;
701 case RADEON_SURF_MODE_1D
:
702 array_mode
= V_038000_ARRAY_1D_TILED_THIN1
;
704 case RADEON_SURF_MODE_2D
:
705 array_mode
= V_038000_ARRAY_2D_TILED_THIN1
;
707 case RADEON_SURF_MODE_LINEAR
:
709 array_mode
= V_038000_ARRAY_LINEAR_GENERAL
;
713 if (state
->format
== PIPE_FORMAT_X24S8_UINT
||
714 state
->format
== PIPE_FORMAT_S8X24_UINT
||
715 state
->format
== PIPE_FORMAT_X32_S8X24_UINT
||
716 state
->format
== PIPE_FORMAT_S8_UINT
)
717 view
->is_stencil_sampler
= true;
719 view
->tex_resource
= &tmp
->resource
;
720 view
->tex_resource_words
[0] = (S_038000_DIM(r600_tex_dim(texture
->target
, texture
->nr_samples
)) |
721 S_038000_TILE_MODE(array_mode
) |
722 S_038000_TILE_TYPE(tmp
->non_disp_tiling
) |
723 S_038000_PITCH((pitch
/ 8) - 1) |
724 S_038000_TEX_WIDTH(width
- 1));
725 view
->tex_resource_words
[1] = (S_038004_TEX_HEIGHT(height
- 1) |
726 S_038004_TEX_DEPTH(depth
- 1) |
727 S_038004_DATA_FORMAT(format
));
728 view
->tex_resource_words
[2] = tmp
->surface
.level
[offset_level
].offset
>> 8;
729 if (offset_level
>= tmp
->surface
.last_level
) {
730 view
->tex_resource_words
[3] = tmp
->surface
.level
[offset_level
].offset
>> 8;
732 view
->tex_resource_words
[3] = tmp
->surface
.level
[offset_level
+ 1].offset
>> 8;
734 view
->tex_resource_words
[4] = (word4
|
735 S_038010_REQUEST_SIZE(1) |
736 S_038010_ENDIAN_SWAP(endian
) |
737 S_038010_BASE_LEVEL(0));
738 view
->tex_resource_words
[5] = (S_038014_BASE_ARRAY(state
->u
.tex
.first_layer
) |
739 S_038014_LAST_ARRAY(state
->u
.tex
.last_layer
));
740 if (texture
->nr_samples
> 1) {
741 /* LAST_LEVEL holds log2(nr_samples) for multisample textures */
742 view
->tex_resource_words
[5] |= S_038014_LAST_LEVEL(util_logbase2(texture
->nr_samples
));
744 view
->tex_resource_words
[5] |= S_038014_LAST_LEVEL(last_level
);
746 view
->tex_resource_words
[6] = (S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_TEXTURE
) |
747 S_038018_MAX_ANISO(4 /* max 16 samples */));
751 static struct pipe_sampler_view
*
752 r600_create_sampler_view(struct pipe_context
*ctx
,
753 struct pipe_resource
*tex
,
754 const struct pipe_sampler_view
*state
)
756 return r600_create_sampler_view_custom(ctx
, tex
, state
,
757 u_minify(tex
->width0
, state
->u
.tex
.first_level
),
758 u_minify(tex
->height0
, state
->u
.tex
.first_level
));
761 static void r600_emit_clip_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
763 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
764 struct pipe_clip_state
*state
= &rctx
->clip_state
.state
;
766 radeon_set_context_reg_seq(cs
, R_028E20_PA_CL_UCP0_X
, 6*4);
767 radeon_emit_array(cs
, (unsigned*)state
, 6*4);
770 static void r600_set_polygon_stipple(struct pipe_context
*ctx
,
771 const struct pipe_poly_stipple
*state
)
775 static void r600_emit_scissor_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
777 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
778 struct r600_scissor_state
*rstate
= &rctx
->scissor
;
779 struct pipe_scissor_state
*state
;
780 bool do_disable_workaround
= false;
785 if (rctx
->b
.chip_class
== R600
&& !rctx
->scissor
.enable
) {
786 tl
= S_028240_TL_X(0) | S_028240_TL_Y(0) | S_028240_WINDOW_OFFSET_DISABLE(1);
787 br
= S_028244_BR_X(8192) | S_028244_BR_Y(8192);
788 do_disable_workaround
= true;
791 dirty_mask
= rstate
->dirty_mask
;
792 while (dirty_mask
!= 0)
794 i
= u_bit_scan(&dirty_mask
);
796 radeon_set_context_reg_seq(cs
, R_028250_PA_SC_VPORT_SCISSOR_0_TL
+ offset
, 2);
797 if (!do_disable_workaround
) {
798 state
= &rstate
->scissor
[i
];
799 tl
= S_028240_TL_X(state
->minx
) | S_028240_TL_Y(state
->miny
) |
800 S_028240_WINDOW_OFFSET_DISABLE(1);
801 br
= S_028244_BR_X(state
->maxx
) | S_028244_BR_Y(state
->maxy
);
806 rstate
->dirty_mask
= 0;
807 rstate
->atom
.num_dw
= 0;
810 static void r600_set_scissor_states(struct pipe_context
*ctx
,
812 unsigned num_scissors
,
813 const struct pipe_scissor_state
*state
)
815 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
816 struct r600_scissor_state
*rstate
= &rctx
->scissor
;
819 for (i
= start_slot
; i
< start_slot
+ num_scissors
; i
++)
820 rstate
->scissor
[i
] = state
[i
- start_slot
];
821 rstate
->dirty_mask
|= ((1 << num_scissors
) - 1) << start_slot
;
822 rstate
->atom
.num_dw
= util_bitcount(rstate
->dirty_mask
) * 4;
824 if (rctx
->b
.chip_class
== R600
&& !rstate
->enable
)
827 r600_mark_atom_dirty(rctx
, &rstate
->atom
);
830 static struct r600_resource
*r600_buffer_create_helper(struct r600_screen
*rscreen
,
831 unsigned size
, unsigned alignment
)
833 struct pipe_resource buffer
;
835 memset(&buffer
, 0, sizeof buffer
);
836 buffer
.target
= PIPE_BUFFER
;
837 buffer
.format
= PIPE_FORMAT_R8_UNORM
;
838 buffer
.bind
= PIPE_BIND_CUSTOM
;
839 buffer
.usage
= PIPE_USAGE_DEFAULT
;
841 buffer
.width0
= size
;
844 buffer
.array_size
= 1;
846 return (struct r600_resource
*)
847 r600_buffer_create(&rscreen
->b
.b
, &buffer
, alignment
);
850 static void r600_init_color_surface(struct r600_context
*rctx
,
851 struct r600_surface
*surf
,
852 bool force_cmask_fmask
)
854 struct r600_screen
*rscreen
= rctx
->screen
;
855 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->base
.texture
;
856 unsigned level
= surf
->base
.u
.tex
.level
;
857 unsigned pitch
, slice
;
860 unsigned format
, swap
, ntype
, endian
;
862 const struct util_format_description
*desc
;
864 bool blend_bypass
= 0, blend_clamp
= 1;
866 if (rtex
->is_depth
&& !rtex
->is_flushing_texture
&& !r600_can_read_depth(rtex
)) {
867 r600_init_flushed_depth_texture(&rctx
->b
.b
, surf
->base
.texture
, NULL
);
868 rtex
= rtex
->flushed_depth_texture
;
872 offset
= rtex
->surface
.level
[level
].offset
;
873 if (rtex
->surface
.level
[level
].mode
== RADEON_SURF_MODE_LINEAR
) {
874 assert(surf
->base
.u
.tex
.first_layer
== surf
->base
.u
.tex
.last_layer
);
875 offset
+= rtex
->surface
.level
[level
].slice_size
*
876 surf
->base
.u
.tex
.first_layer
;
879 color_view
= S_028080_SLICE_START(surf
->base
.u
.tex
.first_layer
) |
880 S_028080_SLICE_MAX(surf
->base
.u
.tex
.last_layer
);
882 pitch
= rtex
->surface
.level
[level
].nblk_x
/ 8 - 1;
883 slice
= (rtex
->surface
.level
[level
].nblk_x
* rtex
->surface
.level
[level
].nblk_y
) / 64;
888 switch (rtex
->surface
.level
[level
].mode
) {
889 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
890 color_info
= S_0280A0_ARRAY_MODE(V_038000_ARRAY_LINEAR_ALIGNED
);
892 case RADEON_SURF_MODE_1D
:
893 color_info
= S_0280A0_ARRAY_MODE(V_038000_ARRAY_1D_TILED_THIN1
);
895 case RADEON_SURF_MODE_2D
:
896 color_info
= S_0280A0_ARRAY_MODE(V_038000_ARRAY_2D_TILED_THIN1
);
898 case RADEON_SURF_MODE_LINEAR
:
900 color_info
= S_0280A0_ARRAY_MODE(V_038000_ARRAY_LINEAR_GENERAL
);
904 desc
= util_format_description(surf
->base
.format
);
906 for (i
= 0; i
< 4; i
++) {
907 if (desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
) {
912 ntype
= V_0280A0_NUMBER_UNORM
;
913 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
)
914 ntype
= V_0280A0_NUMBER_SRGB
;
915 else if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_SIGNED
) {
916 if (desc
->channel
[i
].normalized
)
917 ntype
= V_0280A0_NUMBER_SNORM
;
918 else if (desc
->channel
[i
].pure_integer
)
919 ntype
= V_0280A0_NUMBER_SINT
;
920 } else if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_UNSIGNED
) {
921 if (desc
->channel
[i
].normalized
)
922 ntype
= V_0280A0_NUMBER_UNORM
;
923 else if (desc
->channel
[i
].pure_integer
)
924 ntype
= V_0280A0_NUMBER_UINT
;
927 format
= r600_translate_colorformat(rctx
->b
.chip_class
, surf
->base
.format
);
928 assert(format
!= ~0);
930 swap
= r600_translate_colorswap(surf
->base
.format
);
933 if (rtex
->resource
.b
.b
.usage
== PIPE_USAGE_STAGING
) {
934 endian
= ENDIAN_NONE
;
936 endian
= r600_colorformat_endian_swap(format
);
939 /* set blend bypass according to docs if SINT/UINT or
940 8/24 COLOR variants */
941 if (ntype
== V_0280A0_NUMBER_UINT
|| ntype
== V_0280A0_NUMBER_SINT
||
942 format
== V_0280A0_COLOR_8_24
|| format
== V_0280A0_COLOR_24_8
||
943 format
== V_0280A0_COLOR_X24_8_32_FLOAT
) {
948 surf
->alphatest_bypass
= ntype
== V_0280A0_NUMBER_UINT
|| ntype
== V_0280A0_NUMBER_SINT
;
950 color_info
|= S_0280A0_FORMAT(format
) |
951 S_0280A0_COMP_SWAP(swap
) |
952 S_0280A0_BLEND_BYPASS(blend_bypass
) |
953 S_0280A0_BLEND_CLAMP(blend_clamp
) |
954 S_0280A0_NUMBER_TYPE(ntype
) |
955 S_0280A0_ENDIAN(endian
);
957 /* EXPORT_NORM is an optimzation that can be enabled for better
958 * performance in certain cases
960 if (rctx
->b
.chip_class
== R600
) {
961 /* EXPORT_NORM can be enabled if:
962 * - 11-bit or smaller UNORM/SNORM/SRGB
963 * - BLEND_CLAMP is enabled
964 * - BLEND_FLOAT32 is disabled
966 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_ZS
&&
967 (desc
->channel
[i
].size
< 12 &&
968 desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_FLOAT
&&
969 ntype
!= V_0280A0_NUMBER_UINT
&&
970 ntype
!= V_0280A0_NUMBER_SINT
) &&
971 G_0280A0_BLEND_CLAMP(color_info
) &&
972 !G_0280A0_BLEND_FLOAT32(color_info
)) {
973 color_info
|= S_0280A0_SOURCE_FORMAT(V_0280A0_EXPORT_NORM
);
974 surf
->export_16bpc
= true;
977 /* EXPORT_NORM can be enabled if:
978 * - 11-bit or smaller UNORM/SNORM/SRGB
979 * - 16-bit or smaller FLOAT
981 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_ZS
&&
982 ((desc
->channel
[i
].size
< 12 &&
983 desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_FLOAT
&&
984 ntype
!= V_0280A0_NUMBER_UINT
&& ntype
!= V_0280A0_NUMBER_SINT
) ||
985 (desc
->channel
[i
].size
< 17 &&
986 desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_FLOAT
))) {
987 color_info
|= S_0280A0_SOURCE_FORMAT(V_0280A0_EXPORT_NORM
);
988 surf
->export_16bpc
= true;
992 /* These might not always be initialized to zero. */
993 surf
->cb_color_base
= offset
>> 8;
994 surf
->cb_color_size
= S_028060_PITCH_TILE_MAX(pitch
) |
995 S_028060_SLICE_TILE_MAX(slice
);
996 surf
->cb_color_fmask
= surf
->cb_color_base
;
997 surf
->cb_color_cmask
= surf
->cb_color_base
;
998 surf
->cb_color_mask
= 0;
1000 pipe_resource_reference((struct pipe_resource
**)&surf
->cb_buffer_cmask
,
1001 &rtex
->resource
.b
.b
);
1002 pipe_resource_reference((struct pipe_resource
**)&surf
->cb_buffer_fmask
,
1003 &rtex
->resource
.b
.b
);
1005 if (rtex
->cmask
.size
) {
1006 surf
->cb_color_cmask
= rtex
->cmask
.offset
>> 8;
1007 surf
->cb_color_mask
|= S_028100_CMASK_BLOCK_MAX(rtex
->cmask
.slice_tile_max
);
1009 if (rtex
->fmask
.size
) {
1010 color_info
|= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE
);
1011 surf
->cb_color_fmask
= rtex
->fmask
.offset
>> 8;
1012 surf
->cb_color_mask
|= S_028100_FMASK_TILE_MAX(rtex
->fmask
.slice_tile_max
);
1013 } else { /* cmask only */
1014 color_info
|= S_0280A0_TILE_MODE(V_0280A0_CLEAR_ENABLE
);
1016 } else if (force_cmask_fmask
) {
1017 /* Allocate dummy FMASK and CMASK if they aren't allocated already.
1019 * R6xx needs FMASK and CMASK for the destination buffer of color resolve,
1020 * otherwise it hangs. We don't have FMASK and CMASK pre-allocated,
1021 * because it's not an MSAA buffer.
1023 struct r600_cmask_info cmask
;
1024 struct r600_fmask_info fmask
;
1026 r600_texture_get_cmask_info(&rscreen
->b
, rtex
, &cmask
);
1027 r600_texture_get_fmask_info(&rscreen
->b
, rtex
, 8, &fmask
);
1030 if (!rctx
->dummy_cmask
||
1031 rctx
->dummy_cmask
->b
.b
.width0
< cmask
.size
||
1032 rctx
->dummy_cmask
->buf
->alignment
% cmask
.alignment
!= 0) {
1033 struct pipe_transfer
*transfer
;
1036 pipe_resource_reference((struct pipe_resource
**)&rctx
->dummy_cmask
, NULL
);
1037 rctx
->dummy_cmask
= r600_buffer_create_helper(rscreen
, cmask
.size
, cmask
.alignment
);
1039 /* Set the contents to 0xCC. */
1040 ptr
= pipe_buffer_map(&rctx
->b
.b
, &rctx
->dummy_cmask
->b
.b
, PIPE_TRANSFER_WRITE
, &transfer
);
1041 memset(ptr
, 0xCC, cmask
.size
);
1042 pipe_buffer_unmap(&rctx
->b
.b
, transfer
);
1044 pipe_resource_reference((struct pipe_resource
**)&surf
->cb_buffer_cmask
,
1045 &rctx
->dummy_cmask
->b
.b
);
1048 if (!rctx
->dummy_fmask
||
1049 rctx
->dummy_fmask
->b
.b
.width0
< fmask
.size
||
1050 rctx
->dummy_fmask
->buf
->alignment
% fmask
.alignment
!= 0) {
1051 pipe_resource_reference((struct pipe_resource
**)&rctx
->dummy_fmask
, NULL
);
1052 rctx
->dummy_fmask
= r600_buffer_create_helper(rscreen
, fmask
.size
, fmask
.alignment
);
1055 pipe_resource_reference((struct pipe_resource
**)&surf
->cb_buffer_fmask
,
1056 &rctx
->dummy_fmask
->b
.b
);
1058 /* Init the registers. */
1059 color_info
|= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE
);
1060 surf
->cb_color_cmask
= 0;
1061 surf
->cb_color_fmask
= 0;
1062 surf
->cb_color_mask
= S_028100_CMASK_BLOCK_MAX(cmask
.slice_tile_max
) |
1063 S_028100_FMASK_TILE_MAX(fmask
.slice_tile_max
);
1066 surf
->cb_color_info
= color_info
;
1067 surf
->cb_color_view
= color_view
;
1068 surf
->color_initialized
= true;
1071 static void r600_init_depth_surface(struct r600_context
*rctx
,
1072 struct r600_surface
*surf
)
1074 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->base
.texture
;
1075 unsigned level
, pitch
, slice
, format
, offset
, array_mode
;
1077 level
= surf
->base
.u
.tex
.level
;
1078 offset
= rtex
->surface
.level
[level
].offset
;
1079 pitch
= rtex
->surface
.level
[level
].nblk_x
/ 8 - 1;
1080 slice
= (rtex
->surface
.level
[level
].nblk_x
* rtex
->surface
.level
[level
].nblk_y
) / 64;
1084 switch (rtex
->surface
.level
[level
].mode
) {
1085 case RADEON_SURF_MODE_2D
:
1086 array_mode
= V_0280A0_ARRAY_2D_TILED_THIN1
;
1088 case RADEON_SURF_MODE_1D
:
1089 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
1090 case RADEON_SURF_MODE_LINEAR
:
1092 array_mode
= V_0280A0_ARRAY_1D_TILED_THIN1
;
1096 format
= r600_translate_dbformat(surf
->base
.format
);
1097 assert(format
!= ~0);
1099 surf
->db_depth_info
= S_028010_ARRAY_MODE(array_mode
) | S_028010_FORMAT(format
);
1100 surf
->db_depth_base
= offset
>> 8;
1101 surf
->db_depth_view
= S_028004_SLICE_START(surf
->base
.u
.tex
.first_layer
) |
1102 S_028004_SLICE_MAX(surf
->base
.u
.tex
.last_layer
);
1103 surf
->db_depth_size
= S_028000_PITCH_TILE_MAX(pitch
) | S_028000_SLICE_TILE_MAX(slice
);
1104 surf
->db_prefetch_limit
= (rtex
->surface
.level
[level
].nblk_y
/ 8) - 1;
1106 switch (surf
->base
.format
) {
1107 case PIPE_FORMAT_Z24X8_UNORM
:
1108 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
1109 surf
->pa_su_poly_offset_db_fmt_cntl
=
1110 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-24);
1112 case PIPE_FORMAT_Z32_FLOAT
:
1113 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
1114 surf
->pa_su_poly_offset_db_fmt_cntl
=
1115 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-23) |
1116 S_028DF8_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
1118 case PIPE_FORMAT_Z16_UNORM
:
1119 surf
->pa_su_poly_offset_db_fmt_cntl
=
1120 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-16);
1125 /* use htile only for first level */
1126 if (rtex
->htile_buffer
&& !level
) {
1127 surf
->db_htile_data_base
= 0;
1128 surf
->db_htile_surface
= S_028D24_HTILE_WIDTH(1) |
1129 S_028D24_HTILE_HEIGHT(1) |
1130 S_028D24_FULL_CACHE(1);
1131 /* preload is not working properly on r6xx/r7xx */
1132 surf
->db_depth_info
|= S_028010_TILE_SURFACE_ENABLE(1);
1135 surf
->depth_initialized
= true;
1138 static void r600_set_framebuffer_state(struct pipe_context
*ctx
,
1139 const struct pipe_framebuffer_state
*state
)
1141 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1142 struct r600_surface
*surf
;
1143 struct r600_texture
*rtex
;
1146 if (rctx
->framebuffer
.state
.nr_cbufs
) {
1147 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
| R600_CONTEXT_FLUSH_AND_INV
;
1148 rctx
->b
.flags
|= R600_CONTEXT_FLUSH_AND_INV_CB
|
1149 R600_CONTEXT_FLUSH_AND_INV_CB_META
;
1151 if (rctx
->framebuffer
.state
.zsbuf
) {
1152 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
| R600_CONTEXT_FLUSH_AND_INV
;
1153 rctx
->b
.flags
|= R600_CONTEXT_FLUSH_AND_INV_DB
;
1155 rtex
= (struct r600_texture
*)rctx
->framebuffer
.state
.zsbuf
->texture
;
1156 if (rctx
->b
.chip_class
>= R700
&& rtex
->htile_buffer
) {
1157 rctx
->b
.flags
|= R600_CONTEXT_FLUSH_AND_INV_DB_META
;
1161 /* Set the new state. */
1162 util_copy_framebuffer_state(&rctx
->framebuffer
.state
, state
);
1164 rctx
->framebuffer
.export_16bpc
= state
->nr_cbufs
!= 0;
1165 rctx
->framebuffer
.cb0_is_integer
= state
->nr_cbufs
&& state
->cbufs
[0] &&
1166 util_format_is_pure_integer(state
->cbufs
[0]->format
);
1167 rctx
->framebuffer
.compressed_cb_mask
= 0;
1168 rctx
->framebuffer
.is_msaa_resolve
= state
->nr_cbufs
== 2 &&
1169 state
->cbufs
[0] && state
->cbufs
[1] &&
1170 state
->cbufs
[0]->texture
->nr_samples
> 1 &&
1171 state
->cbufs
[1]->texture
->nr_samples
<= 1;
1172 rctx
->framebuffer
.nr_samples
= util_framebuffer_get_num_samples(state
);
1175 for (i
= 0; i
< state
->nr_cbufs
; i
++) {
1176 /* The resolve buffer must have CMASK and FMASK to prevent hardlocks on R6xx. */
1177 bool force_cmask_fmask
= rctx
->b
.chip_class
== R600
&&
1178 rctx
->framebuffer
.is_msaa_resolve
&&
1181 surf
= (struct r600_surface
*)state
->cbufs
[i
];
1185 rtex
= (struct r600_texture
*)surf
->base
.texture
;
1186 r600_context_add_resource_size(ctx
, state
->cbufs
[i
]->texture
);
1188 if (!surf
->color_initialized
|| force_cmask_fmask
) {
1189 r600_init_color_surface(rctx
, surf
, force_cmask_fmask
);
1190 if (force_cmask_fmask
) {
1191 /* re-initialize later without compression */
1192 surf
->color_initialized
= false;
1196 if (!surf
->export_16bpc
) {
1197 rctx
->framebuffer
.export_16bpc
= false;
1200 if (rtex
->fmask
.size
&& rtex
->cmask
.size
) {
1201 rctx
->framebuffer
.compressed_cb_mask
|= 1 << i
;
1205 /* Update alpha-test state dependencies.
1206 * Alpha-test is done on the first colorbuffer only. */
1207 if (state
->nr_cbufs
) {
1208 bool alphatest_bypass
= false;
1210 surf
= (struct r600_surface
*)state
->cbufs
[0];
1212 alphatest_bypass
= surf
->alphatest_bypass
;
1215 if (rctx
->alphatest_state
.bypass
!= alphatest_bypass
) {
1216 rctx
->alphatest_state
.bypass
= alphatest_bypass
;
1217 r600_mark_atom_dirty(rctx
, &rctx
->alphatest_state
.atom
);
1223 surf
= (struct r600_surface
*)state
->zsbuf
;
1225 r600_context_add_resource_size(ctx
, state
->zsbuf
->texture
);
1227 if (!surf
->depth_initialized
) {
1228 r600_init_depth_surface(rctx
, surf
);
1231 if (state
->zsbuf
->format
!= rctx
->poly_offset_state
.zs_format
) {
1232 rctx
->poly_offset_state
.zs_format
= state
->zsbuf
->format
;
1233 r600_mark_atom_dirty(rctx
, &rctx
->poly_offset_state
.atom
);
1236 if (rctx
->db_state
.rsurf
!= surf
) {
1237 rctx
->db_state
.rsurf
= surf
;
1238 r600_mark_atom_dirty(rctx
, &rctx
->db_state
.atom
);
1239 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
1241 } else if (rctx
->db_state
.rsurf
) {
1242 rctx
->db_state
.rsurf
= NULL
;
1243 r600_mark_atom_dirty(rctx
, &rctx
->db_state
.atom
);
1244 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
1247 if (rctx
->cb_misc_state
.nr_cbufs
!= state
->nr_cbufs
) {
1248 rctx
->cb_misc_state
.nr_cbufs
= state
->nr_cbufs
;
1249 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
1252 if (state
->nr_cbufs
== 0 && rctx
->alphatest_state
.bypass
) {
1253 rctx
->alphatest_state
.bypass
= false;
1254 r600_mark_atom_dirty(rctx
, &rctx
->alphatest_state
.atom
);
1257 /* Calculate the CS size. */
1258 rctx
->framebuffer
.atom
.num_dw
=
1259 10 /*COLOR_INFO*/ + 4 /*SCISSOR*/ + 3 /*SHADER_CONTROL*/ + 8 /*MSAA*/;
1261 if (rctx
->framebuffer
.state
.nr_cbufs
) {
1262 rctx
->framebuffer
.atom
.num_dw
+= 15 * rctx
->framebuffer
.state
.nr_cbufs
;
1263 rctx
->framebuffer
.atom
.num_dw
+= 3 * (2 + rctx
->framebuffer
.state
.nr_cbufs
);
1265 if (rctx
->framebuffer
.state
.zsbuf
) {
1266 rctx
->framebuffer
.atom
.num_dw
+= 16;
1267 } else if (rctx
->screen
->b
.info
.drm_minor
>= 18) {
1268 rctx
->framebuffer
.atom
.num_dw
+= 3;
1270 if (rctx
->b
.family
> CHIP_R600
&& rctx
->b
.family
< CHIP_RV770
) {
1271 rctx
->framebuffer
.atom
.num_dw
+= 2;
1274 r600_mark_atom_dirty(rctx
, &rctx
->framebuffer
.atom
);
1276 r600_set_sample_locations_constant_buffer(rctx
);
1279 static uint32_t sample_locs_2x
[] = {
1280 FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4),
1281 FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4),
1283 static unsigned max_dist_2x
= 4;
1285 static uint32_t sample_locs_4x
[] = {
1286 FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6),
1287 FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6),
1289 static unsigned max_dist_4x
= 6;
1290 static uint32_t sample_locs_8x
[] = {
1291 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3),
1292 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7),
1294 static unsigned max_dist_8x
= 7;
1296 static void r600_get_sample_position(struct pipe_context
*ctx
,
1297 unsigned sample_count
,
1298 unsigned sample_index
,
1305 switch (sample_count
) {
1308 out_value
[0] = out_value
[1] = 0.5;
1311 offset
= 4 * (sample_index
* 2);
1312 val
.idx
= (sample_locs_2x
[0] >> offset
) & 0xf;
1313 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1314 val
.idx
= (sample_locs_2x
[0] >> (offset
+ 4)) & 0xf;
1315 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1318 offset
= 4 * (sample_index
* 2);
1319 val
.idx
= (sample_locs_4x
[0] >> offset
) & 0xf;
1320 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1321 val
.idx
= (sample_locs_4x
[0] >> (offset
+ 4)) & 0xf;
1322 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1325 offset
= 4 * (sample_index
% 4 * 2);
1326 index
= (sample_index
/ 4);
1327 val
.idx
= (sample_locs_8x
[index
] >> offset
) & 0xf;
1328 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1329 val
.idx
= (sample_locs_8x
[index
] >> (offset
+ 4)) & 0xf;
1330 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1335 static void r600_emit_msaa_state(struct r600_context
*rctx
, int nr_samples
)
1337 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1338 unsigned max_dist
= 0;
1340 if (rctx
->b
.family
== CHIP_R600
) {
1341 switch (nr_samples
) {
1346 radeon_set_config_reg(cs
, R_008B40_PA_SC_AA_SAMPLE_LOCS_2S
, sample_locs_2x
[0]);
1347 max_dist
= max_dist_2x
;
1350 radeon_set_config_reg(cs
, R_008B44_PA_SC_AA_SAMPLE_LOCS_4S
, sample_locs_4x
[0]);
1351 max_dist
= max_dist_4x
;
1354 radeon_set_config_reg_seq(cs
, R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0
, 2);
1355 radeon_emit(cs
, sample_locs_8x
[0]); /* R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0 */
1356 radeon_emit(cs
, sample_locs_8x
[1]); /* R_008B4C_PA_SC_AA_SAMPLE_LOCS_8S_WD1 */
1357 max_dist
= max_dist_8x
;
1361 switch (nr_samples
) {
1363 radeon_set_context_reg_seq(cs
, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX
, 2);
1364 radeon_emit(cs
, 0); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
1365 radeon_emit(cs
, 0); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
1369 radeon_set_context_reg_seq(cs
, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX
, 2);
1370 radeon_emit(cs
, sample_locs_2x
[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
1371 radeon_emit(cs
, sample_locs_2x
[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
1372 max_dist
= max_dist_2x
;
1375 radeon_set_context_reg_seq(cs
, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX
, 2);
1376 radeon_emit(cs
, sample_locs_4x
[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
1377 radeon_emit(cs
, sample_locs_4x
[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
1378 max_dist
= max_dist_4x
;
1381 radeon_set_context_reg_seq(cs
, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX
, 2);
1382 radeon_emit(cs
, sample_locs_8x
[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
1383 radeon_emit(cs
, sample_locs_8x
[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
1384 max_dist
= max_dist_8x
;
1389 if (nr_samples
> 1) {
1390 radeon_set_context_reg_seq(cs
, R_028C00_PA_SC_LINE_CNTL
, 2);
1391 radeon_emit(cs
, S_028C00_LAST_PIXEL(1) |
1392 S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */
1393 radeon_emit(cs
, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples
)) |
1394 S_028C04_MAX_SAMPLE_DIST(max_dist
)); /* R_028C04_PA_SC_AA_CONFIG */
1396 radeon_set_context_reg_seq(cs
, R_028C00_PA_SC_LINE_CNTL
, 2);
1397 radeon_emit(cs
, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */
1398 radeon_emit(cs
, 0); /* R_028C04_PA_SC_AA_CONFIG */
1402 static void r600_emit_framebuffer_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1404 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1405 struct pipe_framebuffer_state
*state
= &rctx
->framebuffer
.state
;
1406 unsigned nr_cbufs
= state
->nr_cbufs
;
1407 struct r600_surface
**cb
= (struct r600_surface
**)&state
->cbufs
[0];
1408 unsigned i
, sbu
= 0;
1411 radeon_set_context_reg_seq(cs
, R_0280A0_CB_COLOR0_INFO
, 8);
1412 for (i
= 0; i
< nr_cbufs
; i
++) {
1413 radeon_emit(cs
, cb
[i
] ? cb
[i
]->cb_color_info
: 0);
1415 /* set CB_COLOR1_INFO for possible dual-src blending */
1416 if (i
== 1 && cb
[0]) {
1417 radeon_emit(cs
, cb
[0]->cb_color_info
);
1420 for (; i
< 8; i
++) {
1425 for (i
= 0; i
< nr_cbufs
; i
++) {
1432 radeon_set_context_reg(cs
, R_028040_CB_COLOR0_BASE
+ i
*4, cb
[i
]->cb_color_base
);
1434 reloc
= radeon_add_to_buffer_list(&rctx
->b
,
1436 (struct r600_resource
*)cb
[i
]->base
.texture
,
1437 RADEON_USAGE_READWRITE
,
1438 cb
[i
]->base
.texture
->nr_samples
> 1 ?
1439 RADEON_PRIO_COLOR_BUFFER_MSAA
:
1440 RADEON_PRIO_COLOR_BUFFER
);
1441 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1442 radeon_emit(cs
, reloc
);
1445 radeon_set_context_reg(cs
, R_0280E0_CB_COLOR0_FRAG
+ i
*4, cb
[i
]->cb_color_fmask
);
1447 reloc
= radeon_add_to_buffer_list(&rctx
->b
,
1449 cb
[i
]->cb_buffer_fmask
,
1450 RADEON_USAGE_READWRITE
,
1451 cb
[i
]->base
.texture
->nr_samples
> 1 ?
1452 RADEON_PRIO_COLOR_BUFFER_MSAA
:
1453 RADEON_PRIO_COLOR_BUFFER
);
1454 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1455 radeon_emit(cs
, reloc
);
1458 radeon_set_context_reg(cs
, R_0280C0_CB_COLOR0_TILE
+ i
*4, cb
[i
]->cb_color_cmask
);
1460 reloc
= radeon_add_to_buffer_list(&rctx
->b
,
1462 cb
[i
]->cb_buffer_cmask
,
1463 RADEON_USAGE_READWRITE
,
1464 cb
[i
]->base
.texture
->nr_samples
> 1 ?
1465 RADEON_PRIO_COLOR_BUFFER_MSAA
:
1466 RADEON_PRIO_COLOR_BUFFER
);
1467 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1468 radeon_emit(cs
, reloc
);
1471 radeon_set_context_reg_seq(cs
, R_028060_CB_COLOR0_SIZE
, nr_cbufs
);
1472 for (i
= 0; i
< nr_cbufs
; i
++) {
1473 radeon_emit(cs
, cb
[i
] ? cb
[i
]->cb_color_size
: 0);
1476 radeon_set_context_reg_seq(cs
, R_028080_CB_COLOR0_VIEW
, nr_cbufs
);
1477 for (i
= 0; i
< nr_cbufs
; i
++) {
1478 radeon_emit(cs
, cb
[i
] ? cb
[i
]->cb_color_view
: 0);
1481 radeon_set_context_reg_seq(cs
, R_028100_CB_COLOR0_MASK
, nr_cbufs
);
1482 for (i
= 0; i
< nr_cbufs
; i
++) {
1483 radeon_emit(cs
, cb
[i
] ? cb
[i
]->cb_color_mask
: 0);
1486 sbu
|= SURFACE_BASE_UPDATE_COLOR_NUM(nr_cbufs
);
1489 /* SURFACE_BASE_UPDATE */
1490 if (rctx
->b
.family
> CHIP_R600
&& rctx
->b
.family
< CHIP_RV770
&& sbu
) {
1491 radeon_emit(cs
, PKT3(PKT3_SURFACE_BASE_UPDATE
, 0, 0));
1492 radeon_emit(cs
, sbu
);
1498 struct r600_surface
*surf
= (struct r600_surface
*)state
->zsbuf
;
1499 unsigned reloc
= radeon_add_to_buffer_list(&rctx
->b
,
1501 (struct r600_resource
*)state
->zsbuf
->texture
,
1502 RADEON_USAGE_READWRITE
,
1503 surf
->base
.texture
->nr_samples
> 1 ?
1504 RADEON_PRIO_DEPTH_BUFFER_MSAA
:
1505 RADEON_PRIO_DEPTH_BUFFER
);
1507 radeon_set_context_reg(cs
, R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL
,
1508 surf
->pa_su_poly_offset_db_fmt_cntl
);
1510 radeon_set_context_reg_seq(cs
, R_028000_DB_DEPTH_SIZE
, 2);
1511 radeon_emit(cs
, surf
->db_depth_size
); /* R_028000_DB_DEPTH_SIZE */
1512 radeon_emit(cs
, surf
->db_depth_view
); /* R_028004_DB_DEPTH_VIEW */
1513 radeon_set_context_reg_seq(cs
, R_02800C_DB_DEPTH_BASE
, 2);
1514 radeon_emit(cs
, surf
->db_depth_base
); /* R_02800C_DB_DEPTH_BASE */
1515 radeon_emit(cs
, surf
->db_depth_info
); /* R_028010_DB_DEPTH_INFO */
1517 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1518 radeon_emit(cs
, reloc
);
1520 radeon_set_context_reg(cs
, R_028D34_DB_PREFETCH_LIMIT
, surf
->db_prefetch_limit
);
1522 sbu
|= SURFACE_BASE_UPDATE_DEPTH
;
1523 } else if (rctx
->screen
->b
.info
.drm_minor
>= 18) {
1524 /* DRM 2.6.18 allows the INVALID format to disable depth/stencil.
1525 * Older kernels are out of luck. */
1526 radeon_set_context_reg(cs
, R_028010_DB_DEPTH_INFO
, S_028010_FORMAT(V_028010_DEPTH_INVALID
));
1529 /* SURFACE_BASE_UPDATE */
1530 if (rctx
->b
.family
> CHIP_R600
&& rctx
->b
.family
< CHIP_RV770
&& sbu
) {
1531 radeon_emit(cs
, PKT3(PKT3_SURFACE_BASE_UPDATE
, 0, 0));
1532 radeon_emit(cs
, sbu
);
1536 /* Framebuffer dimensions. */
1537 radeon_set_context_reg_seq(cs
, R_028204_PA_SC_WINDOW_SCISSOR_TL
, 2);
1538 radeon_emit(cs
, S_028240_TL_X(0) | S_028240_TL_Y(0) |
1539 S_028240_WINDOW_OFFSET_DISABLE(1)); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */
1540 radeon_emit(cs
, S_028244_BR_X(state
->width
) |
1541 S_028244_BR_Y(state
->height
)); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */
1543 if (rctx
->framebuffer
.is_msaa_resolve
) {
1544 radeon_set_context_reg(cs
, R_0287A0_CB_SHADER_CONTROL
, 1);
1546 /* Always enable the first colorbuffer in CB_SHADER_CONTROL. This
1547 * will assure that the alpha-test will work even if there is
1548 * no colorbuffer bound. */
1549 radeon_set_context_reg(cs
, R_0287A0_CB_SHADER_CONTROL
,
1550 (1ull << MAX2(nr_cbufs
, 1)) - 1);
1553 r600_emit_msaa_state(rctx
, rctx
->framebuffer
.nr_samples
);
1556 static void r600_set_min_samples(struct pipe_context
*ctx
, unsigned min_samples
)
1558 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1560 if (rctx
->ps_iter_samples
== min_samples
)
1563 rctx
->ps_iter_samples
= min_samples
;
1564 if (rctx
->framebuffer
.nr_samples
> 1) {
1565 r600_mark_atom_dirty(rctx
, &rctx
->rasterizer_state
.atom
);
1566 if (rctx
->b
.chip_class
== R600
)
1567 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
1571 static void r600_emit_cb_misc_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1573 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1574 struct r600_cb_misc_state
*a
= (struct r600_cb_misc_state
*)atom
;
1576 if (G_028808_SPECIAL_OP(a
->cb_color_control
) == V_028808_SPECIAL_RESOLVE_BOX
) {
1577 radeon_set_context_reg_seq(cs
, R_028238_CB_TARGET_MASK
, 2);
1578 if (rctx
->b
.chip_class
== R600
) {
1579 radeon_emit(cs
, 0xff); /* R_028238_CB_TARGET_MASK */
1580 radeon_emit(cs
, 0xff); /* R_02823C_CB_SHADER_MASK */
1582 radeon_emit(cs
, 0xf); /* R_028238_CB_TARGET_MASK */
1583 radeon_emit(cs
, 0xf); /* R_02823C_CB_SHADER_MASK */
1585 radeon_set_context_reg(cs
, R_028808_CB_COLOR_CONTROL
, a
->cb_color_control
);
1587 unsigned fb_colormask
= (1ULL << ((unsigned)a
->nr_cbufs
* 4)) - 1;
1588 unsigned ps_colormask
= (1ULL << ((unsigned)a
->nr_ps_color_outputs
* 4)) - 1;
1589 unsigned multiwrite
= a
->multiwrite
&& a
->nr_cbufs
> 1;
1591 radeon_set_context_reg_seq(cs
, R_028238_CB_TARGET_MASK
, 2);
1592 radeon_emit(cs
, a
->blend_colormask
& fb_colormask
); /* R_028238_CB_TARGET_MASK */
1593 /* Always enable the first color output to make sure alpha-test works even without one. */
1594 radeon_emit(cs
, 0xf | (multiwrite
? fb_colormask
: ps_colormask
)); /* R_02823C_CB_SHADER_MASK */
1595 radeon_set_context_reg(cs
, R_028808_CB_COLOR_CONTROL
,
1596 a
->cb_color_control
|
1597 S_028808_MULTIWRITE_ENABLE(multiwrite
));
1601 static void r600_emit_db_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1603 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1604 struct r600_db_state
*a
= (struct r600_db_state
*)atom
;
1606 if (a
->rsurf
&& a
->rsurf
->db_htile_surface
) {
1607 struct r600_texture
*rtex
= (struct r600_texture
*)a
->rsurf
->base
.texture
;
1610 radeon_set_context_reg(cs
, R_02802C_DB_DEPTH_CLEAR
, fui(rtex
->depth_clear_value
));
1611 radeon_set_context_reg(cs
, R_028D24_DB_HTILE_SURFACE
, a
->rsurf
->db_htile_surface
);
1612 radeon_set_context_reg(cs
, R_028014_DB_HTILE_DATA_BASE
, a
->rsurf
->db_htile_data_base
);
1613 reloc_idx
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.rings
.gfx
, rtex
->htile_buffer
,
1614 RADEON_USAGE_READWRITE
, RADEON_PRIO_HTILE
);
1615 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1616 cs
->buf
[cs
->cdw
++] = reloc_idx
;
1618 radeon_set_context_reg(cs
, R_028D24_DB_HTILE_SURFACE
, 0);
1622 static void r600_emit_db_misc_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1624 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1625 struct r600_db_misc_state
*a
= (struct r600_db_misc_state
*)atom
;
1626 unsigned db_render_control
= 0;
1627 unsigned db_render_override
=
1628 S_028D10_FORCE_HIS_ENABLE0(V_028D10_FORCE_DISABLE
) |
1629 S_028D10_FORCE_HIS_ENABLE1(V_028D10_FORCE_DISABLE
);
1631 if (a
->occlusion_query_enabled
) {
1632 if (rctx
->b
.chip_class
>= R700
) {
1633 db_render_control
|= S_028D0C_R700_PERFECT_ZPASS_COUNTS(1);
1635 db_render_override
|= S_028D10_NOOP_CULL_DISABLE(1);
1637 if (rctx
->db_state
.rsurf
&& rctx
->db_state
.rsurf
->db_htile_surface
) {
1638 /* FORCE_OFF means HiZ/HiS are determined by DB_SHADER_CONTROL */
1639 db_render_override
|= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_OFF
);
1640 /* This is to fix a lockup when hyperz and alpha test are enabled at
1641 * the same time somehow GPU get confuse on which order to pick for
1644 if (rctx
->alphatest_state
.sx_alpha_test_control
) {
1645 db_render_override
|= S_028D10_FORCE_SHADER_Z_ORDER(1);
1648 db_render_override
|= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE
);
1650 if (rctx
->b
.chip_class
== R600
&& rctx
->framebuffer
.nr_samples
> 1 && rctx
->ps_iter_samples
> 0) {
1651 /* sample shading and hyperz causes lockups on R6xx chips */
1652 db_render_override
|= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE
);
1654 if (a
->flush_depthstencil_through_cb
) {
1655 assert(a
->copy_depth
|| a
->copy_stencil
);
1657 db_render_control
|= S_028D0C_DEPTH_COPY_ENABLE(a
->copy_depth
) |
1658 S_028D0C_STENCIL_COPY_ENABLE(a
->copy_stencil
) |
1659 S_028D0C_COPY_CENTROID(1) |
1660 S_028D0C_COPY_SAMPLE(a
->copy_sample
);
1662 if (rctx
->b
.chip_class
== R600
)
1663 db_render_override
|= S_028D10_NOOP_CULL_DISABLE(1);
1665 if (rctx
->b
.family
== CHIP_RV610
|| rctx
->b
.family
== CHIP_RV630
||
1666 rctx
->b
.family
== CHIP_RV620
|| rctx
->b
.family
== CHIP_RV635
)
1667 db_render_override
|= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE
);
1668 } else if (a
->flush_depth_inplace
|| a
->flush_stencil_inplace
) {
1669 db_render_control
|= S_028D0C_DEPTH_COMPRESS_DISABLE(a
->flush_depth_inplace
) |
1670 S_028D0C_STENCIL_COMPRESS_DISABLE(a
->flush_stencil_inplace
);
1671 db_render_override
|= S_028D10_NOOP_CULL_DISABLE(1);
1673 if (a
->htile_clear
) {
1674 db_render_control
|= S_028D0C_DEPTH_CLEAR_ENABLE(1);
1677 /* RV770 workaround for a hang with 8x MSAA. */
1678 if (rctx
->b
.family
== CHIP_RV770
&& a
->log_samples
== 3) {
1679 db_render_override
|= S_028D10_MAX_TILES_IN_DTT(6);
1682 radeon_set_context_reg_seq(cs
, R_028D0C_DB_RENDER_CONTROL
, 2);
1683 radeon_emit(cs
, db_render_control
); /* R_028D0C_DB_RENDER_CONTROL */
1684 radeon_emit(cs
, db_render_override
); /* R_028D10_DB_RENDER_OVERRIDE */
1685 radeon_set_context_reg(cs
, R_02880C_DB_SHADER_CONTROL
, a
->db_shader_control
);
1688 static void r600_emit_config_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1690 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1691 struct r600_config_state
*a
= (struct r600_config_state
*)atom
;
1693 radeon_set_config_reg(cs
, R_008C04_SQ_GPR_RESOURCE_MGMT_1
, a
->sq_gpr_resource_mgmt_1
);
1694 radeon_set_config_reg(cs
, R_008C08_SQ_GPR_RESOURCE_MGMT_2
, a
->sq_gpr_resource_mgmt_2
);
1697 static void r600_emit_vertex_buffers(struct r600_context
*rctx
, struct r600_atom
*atom
)
1699 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1700 uint32_t dirty_mask
= rctx
->vertex_buffer_state
.dirty_mask
;
1702 while (dirty_mask
) {
1703 struct pipe_vertex_buffer
*vb
;
1704 struct r600_resource
*rbuffer
;
1706 unsigned buffer_index
= u_bit_scan(&dirty_mask
);
1708 vb
= &rctx
->vertex_buffer_state
.vb
[buffer_index
];
1709 rbuffer
= (struct r600_resource
*)vb
->buffer
;
1712 offset
= vb
->buffer_offset
;
1714 /* fetch resources start at index 320 (OFFSET_FS) */
1715 radeon_emit(cs
, PKT3(PKT3_SET_RESOURCE
, 7, 0));
1716 radeon_emit(cs
, (R600_FETCH_CONSTANTS_OFFSET_FS
+ buffer_index
) * 7);
1717 radeon_emit(cs
, offset
); /* RESOURCEi_WORD0 */
1718 radeon_emit(cs
, rbuffer
->b
.b
.width0
- offset
- 1); /* RESOURCEi_WORD1 */
1719 radeon_emit(cs
, /* RESOURCEi_WORD2 */
1720 S_038008_ENDIAN_SWAP(r600_endian_swap(32)) |
1721 S_038008_STRIDE(vb
->stride
));
1722 radeon_emit(cs
, 0); /* RESOURCEi_WORD3 */
1723 radeon_emit(cs
, 0); /* RESOURCEi_WORD4 */
1724 radeon_emit(cs
, 0); /* RESOURCEi_WORD5 */
1725 radeon_emit(cs
, 0xc0000000); /* RESOURCEi_WORD6 */
1727 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1728 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.rings
.gfx
, rbuffer
,
1729 RADEON_USAGE_READ
, RADEON_PRIO_VERTEX_BUFFER
));
1733 static void r600_emit_constant_buffers(struct r600_context
*rctx
,
1734 struct r600_constbuf_state
*state
,
1735 unsigned buffer_id_base
,
1736 unsigned reg_alu_constbuf_size
,
1737 unsigned reg_alu_const_cache
)
1739 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1740 uint32_t dirty_mask
= state
->dirty_mask
;
1742 while (dirty_mask
) {
1743 struct pipe_constant_buffer
*cb
;
1744 struct r600_resource
*rbuffer
;
1746 unsigned buffer_index
= ffs(dirty_mask
) - 1;
1747 unsigned gs_ring_buffer
= (buffer_index
== R600_GS_RING_CONST_BUFFER
);
1748 cb
= &state
->cb
[buffer_index
];
1749 rbuffer
= (struct r600_resource
*)cb
->buffer
;
1752 offset
= cb
->buffer_offset
;
1754 if (!gs_ring_buffer
) {
1755 radeon_set_context_reg(cs
, reg_alu_constbuf_size
+ buffer_index
* 4,
1756 ALIGN_DIVUP(cb
->buffer_size
>> 4, 16));
1757 radeon_set_context_reg(cs
, reg_alu_const_cache
+ buffer_index
* 4, offset
>> 8);
1760 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1761 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.rings
.gfx
, rbuffer
,
1762 RADEON_USAGE_READ
, RADEON_PRIO_CONST_BUFFER
));
1764 radeon_emit(cs
, PKT3(PKT3_SET_RESOURCE
, 7, 0));
1765 radeon_emit(cs
, (buffer_id_base
+ buffer_index
) * 7);
1766 radeon_emit(cs
, offset
); /* RESOURCEi_WORD0 */
1767 radeon_emit(cs
, rbuffer
->b
.b
.width0
- offset
- 1); /* RESOURCEi_WORD1 */
1768 radeon_emit(cs
, /* RESOURCEi_WORD2 */
1769 S_038008_ENDIAN_SWAP(gs_ring_buffer
? ENDIAN_NONE
: r600_endian_swap(32)) |
1770 S_038008_STRIDE(gs_ring_buffer
? 4 : 16));
1771 radeon_emit(cs
, 0); /* RESOURCEi_WORD3 */
1772 radeon_emit(cs
, 0); /* RESOURCEi_WORD4 */
1773 radeon_emit(cs
, 0); /* RESOURCEi_WORD5 */
1774 radeon_emit(cs
, 0xc0000000); /* RESOURCEi_WORD6 */
1776 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1777 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.rings
.gfx
, rbuffer
,
1778 RADEON_USAGE_READ
, RADEON_PRIO_CONST_BUFFER
));
1780 dirty_mask
&= ~(1 << buffer_index
);
1782 state
->dirty_mask
= 0;
1785 static void r600_emit_vs_constant_buffers(struct r600_context
*rctx
, struct r600_atom
*atom
)
1787 r600_emit_constant_buffers(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_VERTEX
],
1788 R600_FETCH_CONSTANTS_OFFSET_VS
,
1789 R_028180_ALU_CONST_BUFFER_SIZE_VS_0
,
1790 R_028980_ALU_CONST_CACHE_VS_0
);
1793 static void r600_emit_gs_constant_buffers(struct r600_context
*rctx
, struct r600_atom
*atom
)
1795 r600_emit_constant_buffers(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_GEOMETRY
],
1796 R600_FETCH_CONSTANTS_OFFSET_GS
,
1797 R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0
,
1798 R_0289C0_ALU_CONST_CACHE_GS_0
);
1801 static void r600_emit_ps_constant_buffers(struct r600_context
*rctx
, struct r600_atom
*atom
)
1803 r600_emit_constant_buffers(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_FRAGMENT
],
1804 R600_FETCH_CONSTANTS_OFFSET_PS
,
1805 R_028140_ALU_CONST_BUFFER_SIZE_PS_0
,
1806 R_028940_ALU_CONST_CACHE_PS_0
);
1809 static void r600_emit_sampler_views(struct r600_context
*rctx
,
1810 struct r600_samplerview_state
*state
,
1811 unsigned resource_id_base
)
1813 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1814 uint32_t dirty_mask
= state
->dirty_mask
;
1816 while (dirty_mask
) {
1817 struct r600_pipe_sampler_view
*rview
;
1818 unsigned resource_index
= u_bit_scan(&dirty_mask
);
1821 rview
= state
->views
[resource_index
];
1824 radeon_emit(cs
, PKT3(PKT3_SET_RESOURCE
, 7, 0));
1825 radeon_emit(cs
, (resource_id_base
+ resource_index
) * 7);
1826 radeon_emit_array(cs
, rview
->tex_resource_words
, 7);
1828 reloc
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.rings
.gfx
, rview
->tex_resource
,
1830 r600_get_sampler_view_priority(rview
->tex_resource
));
1831 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1832 radeon_emit(cs
, reloc
);
1833 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1834 radeon_emit(cs
, reloc
);
1836 state
->dirty_mask
= 0;
1840 static void r600_emit_vs_sampler_views(struct r600_context
*rctx
, struct r600_atom
*atom
)
1842 r600_emit_sampler_views(rctx
, &rctx
->samplers
[PIPE_SHADER_VERTEX
].views
, R600_FETCH_CONSTANTS_OFFSET_VS
+ R600_MAX_CONST_BUFFERS
);
1845 static void r600_emit_gs_sampler_views(struct r600_context
*rctx
, struct r600_atom
*atom
)
1847 r600_emit_sampler_views(rctx
, &rctx
->samplers
[PIPE_SHADER_GEOMETRY
].views
, R600_FETCH_CONSTANTS_OFFSET_GS
+ R600_MAX_CONST_BUFFERS
);
1850 static void r600_emit_ps_sampler_views(struct r600_context
*rctx
, struct r600_atom
*atom
)
1852 r600_emit_sampler_views(rctx
, &rctx
->samplers
[PIPE_SHADER_FRAGMENT
].views
, R600_FETCH_CONSTANTS_OFFSET_PS
+ R600_MAX_CONST_BUFFERS
);
1855 static void r600_emit_sampler_states(struct r600_context
*rctx
,
1856 struct r600_textures_info
*texinfo
,
1857 unsigned resource_id_base
,
1858 unsigned border_color_reg
)
1860 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1861 uint32_t dirty_mask
= texinfo
->states
.dirty_mask
;
1863 while (dirty_mask
) {
1864 struct r600_pipe_sampler_state
*rstate
;
1865 struct r600_pipe_sampler_view
*rview
;
1866 unsigned i
= u_bit_scan(&dirty_mask
);
1868 rstate
= texinfo
->states
.states
[i
];
1870 rview
= texinfo
->views
.views
[i
];
1872 /* TEX_ARRAY_OVERRIDE must be set for array textures to disable
1873 * filtering between layers.
1874 * Don't update TEX_ARRAY_OVERRIDE if we don't have the sampler view.
1877 enum pipe_texture_target target
= rview
->base
.texture
->target
;
1878 if (target
== PIPE_TEXTURE_1D_ARRAY
||
1879 target
== PIPE_TEXTURE_2D_ARRAY
) {
1880 rstate
->tex_sampler_words
[0] |= S_03C000_TEX_ARRAY_OVERRIDE(1);
1881 texinfo
->is_array_sampler
[i
] = true;
1883 rstate
->tex_sampler_words
[0] &= C_03C000_TEX_ARRAY_OVERRIDE
;
1884 texinfo
->is_array_sampler
[i
] = false;
1888 radeon_emit(cs
, PKT3(PKT3_SET_SAMPLER
, 3, 0));
1889 radeon_emit(cs
, (resource_id_base
+ i
) * 3);
1890 radeon_emit_array(cs
, rstate
->tex_sampler_words
, 3);
1892 if (rstate
->border_color_use
) {
1895 offset
= border_color_reg
;
1897 radeon_set_config_reg_seq(cs
, offset
, 4);
1898 radeon_emit_array(cs
, rstate
->border_color
.ui
, 4);
1901 texinfo
->states
.dirty_mask
= 0;
1904 static void r600_emit_vs_sampler_states(struct r600_context
*rctx
, struct r600_atom
*atom
)
1906 r600_emit_sampler_states(rctx
, &rctx
->samplers
[PIPE_SHADER_VERTEX
], 18, R_00A600_TD_VS_SAMPLER0_BORDER_RED
);
1909 static void r600_emit_gs_sampler_states(struct r600_context
*rctx
, struct r600_atom
*atom
)
1911 r600_emit_sampler_states(rctx
, &rctx
->samplers
[PIPE_SHADER_GEOMETRY
], 36, R_00A800_TD_GS_SAMPLER0_BORDER_RED
);
1914 static void r600_emit_ps_sampler_states(struct r600_context
*rctx
, struct r600_atom
*atom
)
1916 r600_emit_sampler_states(rctx
, &rctx
->samplers
[PIPE_SHADER_FRAGMENT
], 0, R_00A400_TD_PS_SAMPLER0_BORDER_RED
);
1919 static void r600_emit_seamless_cube_map(struct r600_context
*rctx
, struct r600_atom
*atom
)
1921 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1924 tmp
= S_009508_DISABLE_CUBE_ANISO(1) |
1925 S_009508_SYNC_GRADIENT(1) |
1926 S_009508_SYNC_WALKER(1) |
1927 S_009508_SYNC_ALIGNER(1);
1928 if (!rctx
->seamless_cube_map
.enabled
) {
1929 tmp
|= S_009508_DISABLE_CUBE_WRAP(1);
1931 radeon_set_config_reg(cs
, R_009508_TA_CNTL_AUX
, tmp
);
1934 static void r600_emit_sample_mask(struct r600_context
*rctx
, struct r600_atom
*a
)
1936 struct r600_sample_mask
*s
= (struct r600_sample_mask
*)a
;
1937 uint8_t mask
= s
->sample_mask
;
1939 radeon_set_context_reg(rctx
->b
.rings
.gfx
.cs
, R_028C48_PA_SC_AA_MASK
,
1940 mask
| (mask
<< 8) | (mask
<< 16) | (mask
<< 24));
1943 static void r600_emit_vertex_fetch_shader(struct r600_context
*rctx
, struct r600_atom
*a
)
1945 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1946 struct r600_cso_state
*state
= (struct r600_cso_state
*)a
;
1947 struct r600_fetch_shader
*shader
= (struct r600_fetch_shader
*)state
->cso
;
1949 radeon_set_context_reg(cs
, R_028894_SQ_PGM_START_FS
, shader
->offset
>> 8);
1950 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1951 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.rings
.gfx
, shader
->buffer
,
1953 RADEON_PRIO_INTERNAL_SHADER
));
1956 static void r600_emit_shader_stages(struct r600_context
*rctx
, struct r600_atom
*a
)
1958 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1959 struct r600_shader_stages_state
*state
= (struct r600_shader_stages_state
*)a
;
1961 uint32_t v2
= 0, primid
= 0;
1963 if (rctx
->vs_shader
->current
->shader
.vs_as_gs_a
) {
1964 v2
= S_028A40_MODE(V_028A40_GS_SCENARIO_A
);
1968 if (state
->geom_enable
) {
1971 if (rctx
->gs_shader
->gs_max_out_vertices
<= 128)
1972 cut_val
= V_028A40_GS_CUT_128
;
1973 else if (rctx
->gs_shader
->gs_max_out_vertices
<= 256)
1974 cut_val
= V_028A40_GS_CUT_256
;
1975 else if (rctx
->gs_shader
->gs_max_out_vertices
<= 512)
1976 cut_val
= V_028A40_GS_CUT_512
;
1978 cut_val
= V_028A40_GS_CUT_1024
;
1980 v2
= S_028A40_MODE(V_028A40_GS_SCENARIO_G
) |
1981 S_028A40_CUT_MODE(cut_val
);
1983 if (rctx
->gs_shader
->current
->shader
.gs_prim_id_input
)
1987 radeon_set_context_reg(cs
, R_028A40_VGT_GS_MODE
, v2
);
1988 radeon_set_context_reg(cs
, R_028A84_VGT_PRIMITIVEID_EN
, primid
);
1991 static void r600_emit_gs_rings(struct r600_context
*rctx
, struct r600_atom
*a
)
1993 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1994 struct r600_gs_rings_state
*state
= (struct r600_gs_rings_state
*)a
;
1995 struct r600_resource
*rbuffer
;
1997 radeon_set_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
1998 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1999 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH
));
2001 if (state
->enable
) {
2002 rbuffer
=(struct r600_resource
*)state
->esgs_ring
.buffer
;
2003 radeon_set_config_reg(cs
, R_008C40_SQ_ESGS_RING_BASE
, 0);
2004 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2005 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.rings
.gfx
, rbuffer
,
2006 RADEON_USAGE_READWRITE
,
2007 RADEON_PRIO_RINGS_STREAMOUT
));
2008 radeon_set_config_reg(cs
, R_008C44_SQ_ESGS_RING_SIZE
,
2009 state
->esgs_ring
.buffer_size
>> 8);
2011 rbuffer
=(struct r600_resource
*)state
->gsvs_ring
.buffer
;
2012 radeon_set_config_reg(cs
, R_008C48_SQ_GSVS_RING_BASE
, 0);
2013 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2014 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.rings
.gfx
, rbuffer
,
2015 RADEON_USAGE_READWRITE
,
2016 RADEON_PRIO_RINGS_STREAMOUT
));
2017 radeon_set_config_reg(cs
, R_008C4C_SQ_GSVS_RING_SIZE
,
2018 state
->gsvs_ring
.buffer_size
>> 8);
2020 radeon_set_config_reg(cs
, R_008C44_SQ_ESGS_RING_SIZE
, 0);
2021 radeon_set_config_reg(cs
, R_008C4C_SQ_GSVS_RING_SIZE
, 0);
2024 radeon_set_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
2025 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
2026 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH
));
2029 /* Adjust GPR allocation on R6xx/R7xx */
2030 bool r600_adjust_gprs(struct r600_context
*rctx
)
2032 unsigned num_ps_gprs
= rctx
->ps_shader
->current
->shader
.bc
.ngpr
;
2033 unsigned num_vs_gprs
, num_es_gprs
, num_gs_gprs
;
2034 unsigned new_num_ps_gprs
= num_ps_gprs
;
2035 unsigned new_num_vs_gprs
, new_num_es_gprs
, new_num_gs_gprs
;
2036 unsigned cur_num_ps_gprs
= G_008C04_NUM_PS_GPRS(rctx
->config_state
.sq_gpr_resource_mgmt_1
);
2037 unsigned cur_num_vs_gprs
= G_008C04_NUM_VS_GPRS(rctx
->config_state
.sq_gpr_resource_mgmt_1
);
2038 unsigned cur_num_gs_gprs
= G_008C08_NUM_GS_GPRS(rctx
->config_state
.sq_gpr_resource_mgmt_2
);
2039 unsigned cur_num_es_gprs
= G_008C08_NUM_ES_GPRS(rctx
->config_state
.sq_gpr_resource_mgmt_2
);
2040 unsigned def_num_ps_gprs
= rctx
->default_ps_gprs
;
2041 unsigned def_num_vs_gprs
= rctx
->default_vs_gprs
;
2042 unsigned def_num_gs_gprs
= 0;
2043 unsigned def_num_es_gprs
= 0;
2044 unsigned def_num_clause_temp_gprs
= rctx
->r6xx_num_clause_temp_gprs
;
2045 /* hardware will reserve twice num_clause_temp_gprs */
2046 unsigned max_gprs
= def_num_gs_gprs
+ def_num_es_gprs
+ def_num_ps_gprs
+ def_num_vs_gprs
+ def_num_clause_temp_gprs
* 2;
2049 if (rctx
->gs_shader
) {
2050 num_es_gprs
= rctx
->vs_shader
->current
->shader
.bc
.ngpr
;
2051 num_gs_gprs
= rctx
->gs_shader
->current
->shader
.bc
.ngpr
;
2052 num_vs_gprs
= rctx
->gs_shader
->current
->gs_copy_shader
->shader
.bc
.ngpr
;
2056 num_vs_gprs
= rctx
->vs_shader
->current
->shader
.bc
.ngpr
;
2058 new_num_vs_gprs
= num_vs_gprs
;
2059 new_num_es_gprs
= num_es_gprs
;
2060 new_num_gs_gprs
= num_gs_gprs
;
2062 /* the sum of all SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS must <= to max_gprs */
2063 if (new_num_ps_gprs
> cur_num_ps_gprs
|| new_num_vs_gprs
> cur_num_vs_gprs
||
2064 new_num_es_gprs
> cur_num_es_gprs
|| new_num_gs_gprs
> cur_num_gs_gprs
) {
2065 /* try to use switch back to default */
2066 if (new_num_ps_gprs
> def_num_ps_gprs
|| new_num_vs_gprs
> def_num_vs_gprs
||
2067 new_num_gs_gprs
> def_num_gs_gprs
|| new_num_es_gprs
> def_num_es_gprs
) {
2068 /* always privilege vs stage so that at worst we have the
2069 * pixel stage producing wrong output (not the vertex
2071 new_num_ps_gprs
= max_gprs
- ((new_num_vs_gprs
+ new_num_es_gprs
+ new_num_gs_gprs
) + def_num_clause_temp_gprs
* 2);
2072 new_num_vs_gprs
= num_vs_gprs
;
2073 new_num_gs_gprs
= num_gs_gprs
;
2074 new_num_es_gprs
= num_es_gprs
;
2076 new_num_ps_gprs
= def_num_ps_gprs
;
2077 new_num_vs_gprs
= def_num_vs_gprs
;
2078 new_num_es_gprs
= def_num_es_gprs
;
2079 new_num_gs_gprs
= def_num_gs_gprs
;
2085 /* SQ_PGM_RESOURCES_*.NUM_GPRS must always be program to a value <=
2086 * SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS otherwise the GPU will lockup
2087 * Also if a shader use more gpr than SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS
2088 * it will lockup. So in this case just discard the draw command
2089 * and don't change the current gprs repartitions.
2091 if (num_ps_gprs
> new_num_ps_gprs
|| num_vs_gprs
> new_num_vs_gprs
||
2092 num_gs_gprs
> new_num_gs_gprs
|| num_es_gprs
> new_num_es_gprs
) {
2093 R600_ERR("shaders require too many register (%d + %d + %d + %d) "
2094 "for a combined maximum of %d\n",
2095 num_ps_gprs
, num_vs_gprs
, num_es_gprs
, num_gs_gprs
, max_gprs
);
2099 /* in some case we endup recomputing the current value */
2100 tmp
= S_008C04_NUM_PS_GPRS(new_num_ps_gprs
) |
2101 S_008C04_NUM_VS_GPRS(new_num_vs_gprs
) |
2102 S_008C04_NUM_CLAUSE_TEMP_GPRS(def_num_clause_temp_gprs
);
2104 tmp2
= S_008C08_NUM_ES_GPRS(new_num_es_gprs
) |
2105 S_008C08_NUM_GS_GPRS(new_num_gs_gprs
);
2106 if (rctx
->config_state
.sq_gpr_resource_mgmt_1
!= tmp
|| rctx
->config_state
.sq_gpr_resource_mgmt_2
!= tmp2
) {
2107 rctx
->config_state
.sq_gpr_resource_mgmt_1
= tmp
;
2108 rctx
->config_state
.sq_gpr_resource_mgmt_2
= tmp2
;
2109 r600_mark_atom_dirty(rctx
, &rctx
->config_state
.atom
);
2110 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
2115 void r600_init_atom_start_cs(struct r600_context
*rctx
)
2130 int num_ps_stack_entries
;
2131 int num_vs_stack_entries
;
2132 int num_gs_stack_entries
;
2133 int num_es_stack_entries
;
2134 enum radeon_family family
;
2135 struct r600_command_buffer
*cb
= &rctx
->start_cs_cmd
;
2138 r600_init_command_buffer(cb
, 256);
2140 /* R6xx requires this packet at the start of each command buffer */
2141 if (rctx
->b
.chip_class
== R600
) {
2142 r600_store_value(cb
, PKT3(PKT3_START_3D_CMDBUF
, 0, 0));
2143 r600_store_value(cb
, 0);
2145 /* All asics require this one */
2146 r600_store_value(cb
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
2147 r600_store_value(cb
, 0x80000000);
2148 r600_store_value(cb
, 0x80000000);
2150 /* We're setting config registers here. */
2151 r600_store_value(cb
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
2152 r600_store_value(cb
, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
2154 family
= rctx
->b
.family
;
2166 num_ps_threads
= 136;
2167 num_vs_threads
= 48;
2170 num_ps_stack_entries
= 128;
2171 num_vs_stack_entries
= 128;
2172 num_gs_stack_entries
= 0;
2173 num_es_stack_entries
= 0;
2182 num_ps_threads
= 144;
2183 num_vs_threads
= 40;
2186 num_ps_stack_entries
= 40;
2187 num_vs_stack_entries
= 40;
2188 num_gs_stack_entries
= 32;
2189 num_es_stack_entries
= 16;
2201 num_ps_threads
= 136;
2202 num_vs_threads
= 48;
2205 num_ps_stack_entries
= 40;
2206 num_vs_stack_entries
= 40;
2207 num_gs_stack_entries
= 32;
2208 num_es_stack_entries
= 16;
2216 num_ps_threads
= 136;
2217 num_vs_threads
= 48;
2220 num_ps_stack_entries
= 40;
2221 num_vs_stack_entries
= 40;
2222 num_gs_stack_entries
= 32;
2223 num_es_stack_entries
= 16;
2231 num_ps_threads
= 180;
2232 num_vs_threads
= 60;
2235 num_ps_stack_entries
= 128;
2236 num_vs_stack_entries
= 128;
2237 num_gs_stack_entries
= 128;
2238 num_es_stack_entries
= 128;
2247 num_ps_threads
= 180;
2248 num_vs_threads
= 60;
2251 num_ps_stack_entries
= 128;
2252 num_vs_stack_entries
= 128;
2253 num_gs_stack_entries
= 0;
2254 num_es_stack_entries
= 0;
2262 num_ps_threads
= 136;
2263 num_vs_threads
= 48;
2266 num_ps_stack_entries
= 128;
2267 num_vs_stack_entries
= 128;
2268 num_gs_stack_entries
= 0;
2269 num_es_stack_entries
= 0;
2273 rctx
->default_ps_gprs
= num_ps_gprs
;
2274 rctx
->default_vs_gprs
= num_vs_gprs
;
2275 rctx
->r6xx_num_clause_temp_gprs
= num_temp_gprs
;
2287 tmp
|= S_008C00_VC_ENABLE(1);
2290 tmp
|= S_008C00_DX9_CONSTS(0);
2291 tmp
|= S_008C00_ALU_INST_PREFER_VECTOR(1);
2292 tmp
|= S_008C00_PS_PRIO(ps_prio
);
2293 tmp
|= S_008C00_VS_PRIO(vs_prio
);
2294 tmp
|= S_008C00_GS_PRIO(gs_prio
);
2295 tmp
|= S_008C00_ES_PRIO(es_prio
);
2296 r600_store_config_reg(cb
, R_008C00_SQ_CONFIG
, tmp
);
2298 /* SQ_GPR_RESOURCE_MGMT_2 */
2299 tmp
= S_008C08_NUM_GS_GPRS(num_gs_gprs
);
2300 tmp
|= S_008C08_NUM_ES_GPRS(num_es_gprs
);
2301 r600_store_config_reg_seq(cb
, R_008C08_SQ_GPR_RESOURCE_MGMT_2
, 4);
2302 r600_store_value(cb
, tmp
);
2304 /* SQ_THREAD_RESOURCE_MGMT */
2305 tmp
= S_008C0C_NUM_PS_THREADS(num_ps_threads
);
2306 tmp
|= S_008C0C_NUM_VS_THREADS(num_vs_threads
);
2307 tmp
|= S_008C0C_NUM_GS_THREADS(num_gs_threads
);
2308 tmp
|= S_008C0C_NUM_ES_THREADS(num_es_threads
);
2309 r600_store_value(cb
, tmp
); /* R_008C0C_SQ_THREAD_RESOURCE_MGMT */
2311 /* SQ_STACK_RESOURCE_MGMT_1 */
2312 tmp
= S_008C10_NUM_PS_STACK_ENTRIES(num_ps_stack_entries
);
2313 tmp
|= S_008C10_NUM_VS_STACK_ENTRIES(num_vs_stack_entries
);
2314 r600_store_value(cb
, tmp
); /* R_008C10_SQ_STACK_RESOURCE_MGMT_1 */
2316 /* SQ_STACK_RESOURCE_MGMT_2 */
2317 tmp
= S_008C14_NUM_GS_STACK_ENTRIES(num_gs_stack_entries
);
2318 tmp
|= S_008C14_NUM_ES_STACK_ENTRIES(num_es_stack_entries
);
2319 r600_store_value(cb
, tmp
); /* R_008C14_SQ_STACK_RESOURCE_MGMT_2 */
2321 r600_store_config_reg(cb
, R_009714_VC_ENHANCE
, 0);
2323 if (rctx
->b
.chip_class
>= R700
) {
2324 r600_store_context_reg(cb
, R_028A50_VGT_ENHANCE
, 4);
2325 r600_store_config_reg(cb
, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
, 0x00004000);
2326 r600_store_config_reg(cb
, R_009830_DB_DEBUG
, 0);
2327 r600_store_config_reg(cb
, R_009838_DB_WATERMARKS
, 0x00420204);
2328 r600_store_context_reg(cb
, R_0286C8_SPI_THREAD_GROUPING
, 0);
2330 r600_store_config_reg(cb
, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
, 0);
2331 r600_store_config_reg(cb
, R_009830_DB_DEBUG
, 0x82000000);
2332 r600_store_config_reg(cb
, R_009838_DB_WATERMARKS
, 0x01020204);
2333 r600_store_context_reg(cb
, R_0286C8_SPI_THREAD_GROUPING
, 1);
2335 r600_store_context_reg_seq(cb
, R_0288A8_SQ_ESGS_RING_ITEMSIZE
, 9);
2336 r600_store_value(cb
, 0); /* R_0288A8_SQ_ESGS_RING_ITEMSIZE */
2337 r600_store_value(cb
, 0); /* R_0288AC_SQ_GSVS_RING_ITEMSIZE */
2338 r600_store_value(cb
, 0); /* R_0288B0_SQ_ESTMP_RING_ITEMSIZE */
2339 r600_store_value(cb
, 0); /* R_0288B4_SQ_GSTMP_RING_ITEMSIZE */
2340 r600_store_value(cb
, 0); /* R_0288B8_SQ_VSTMP_RING_ITEMSIZE */
2341 r600_store_value(cb
, 0); /* R_0288BC_SQ_PSTMP_RING_ITEMSIZE */
2342 r600_store_value(cb
, 0); /* R_0288C0_SQ_FBUF_RING_ITEMSIZE */
2343 r600_store_value(cb
, 0); /* R_0288C4_SQ_REDUC_RING_ITEMSIZE */
2344 r600_store_value(cb
, 0); /* R_0288C8_SQ_GS_VERT_ITEMSIZE */
2346 /* to avoid GPU doing any preloading of constant from random address */
2347 r600_store_context_reg_seq(cb
, R_028140_ALU_CONST_BUFFER_SIZE_PS_0
, 16);
2348 for (i
= 0; i
< 16; i
++)
2349 r600_store_value(cb
, 0);
2351 r600_store_context_reg_seq(cb
, R_028180_ALU_CONST_BUFFER_SIZE_VS_0
, 16);
2352 for (i
= 0; i
< 16; i
++)
2353 r600_store_value(cb
, 0);
2355 r600_store_context_reg_seq(cb
, R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0
, 16);
2356 for (i
= 0; i
< 16; i
++)
2357 r600_store_value(cb
, 0);
2359 r600_store_context_reg_seq(cb
, R_028A10_VGT_OUTPUT_PATH_CNTL
, 13);
2360 r600_store_value(cb
, 0); /* R_028A10_VGT_OUTPUT_PATH_CNTL */
2361 r600_store_value(cb
, 0); /* R_028A14_VGT_HOS_CNTL */
2362 r600_store_value(cb
, 0); /* R_028A18_VGT_HOS_MAX_TESS_LEVEL */
2363 r600_store_value(cb
, 0); /* R_028A1C_VGT_HOS_MIN_TESS_LEVEL */
2364 r600_store_value(cb
, 0); /* R_028A20_VGT_HOS_REUSE_DEPTH */
2365 r600_store_value(cb
, 0); /* R_028A24_VGT_GROUP_PRIM_TYPE */
2366 r600_store_value(cb
, 0); /* R_028A28_VGT_GROUP_FIRST_DECR */
2367 r600_store_value(cb
, 0); /* R_028A2C_VGT_GROUP_DECR */
2368 r600_store_value(cb
, 0); /* R_028A30_VGT_GROUP_VECT_0_CNTL */
2369 r600_store_value(cb
, 0); /* R_028A34_VGT_GROUP_VECT_1_CNTL */
2370 r600_store_value(cb
, 0); /* R_028A38_VGT_GROUP_VECT_0_FMT_CNTL */
2371 r600_store_value(cb
, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */
2372 r600_store_value(cb
, 0); /* R_028A40_VGT_GS_MODE, 0); */
2374 r600_store_context_reg(cb
, R_028A84_VGT_PRIMITIVEID_EN
, 0);
2375 r600_store_context_reg(cb
, R_028AA0_VGT_INSTANCE_STEP_RATE_0
, 0);
2376 r600_store_context_reg(cb
, R_028AA4_VGT_INSTANCE_STEP_RATE_1
, 0);
2378 r600_store_context_reg_seq(cb
, R_028AB4_VGT_REUSE_OFF
, 2);
2379 r600_store_value(cb
, 1); /* R_028AB4_VGT_REUSE_OFF */
2380 r600_store_value(cb
, 0); /* R_028AB8_VGT_VTX_CNT_EN */
2382 r600_store_context_reg(cb
, R_028B20_VGT_STRMOUT_BUFFER_EN
, 0);
2384 r600_store_ctl_const(cb
, R_03CFF0_SQ_VTX_BASE_VTX_LOC
, 0);
2386 r600_store_context_reg(cb
, R_028028_DB_STENCIL_CLEAR
, 0);
2388 r600_store_context_reg_seq(cb
, R_0286DC_SPI_FOG_CNTL
, 3);
2389 r600_store_value(cb
, 0); /* R_0286DC_SPI_FOG_CNTL */
2390 r600_store_value(cb
, 0); /* R_0286E0_SPI_FOG_FUNC_SCALE */
2391 r600_store_value(cb
, 0); /* R_0286E4_SPI_FOG_FUNC_BIAS */
2393 r600_store_context_reg_seq(cb
, R_028D28_DB_SRESULTS_COMPARE_STATE0
, 3);
2394 r600_store_value(cb
, 0); /* R_028D28_DB_SRESULTS_COMPARE_STATE0 */
2395 r600_store_value(cb
, 0); /* R_028D2C_DB_SRESULTS_COMPARE_STATE1 */
2396 r600_store_value(cb
, 0); /* R_028D30_DB_PRELOAD_CONTROL */
2398 r600_store_context_reg(cb
, R_028820_PA_CL_NANINF_CNTL
, 0);
2399 r600_store_context_reg(cb
, R_028A48_PA_SC_MPASS_PS_CNTL
, 0);
2401 r600_store_context_reg_seq(cb
, R_028C0C_PA_CL_GB_VERT_CLIP_ADJ
, 4);
2402 r600_store_value(cb
, fui(1.0)); /* R_028C0C_PA_CL_GB_VERT_CLIP_ADJ */
2403 r600_store_value(cb
, fui(1.0)); /* R_028C10_PA_CL_GB_VERT_DISC_ADJ */
2404 r600_store_value(cb
, fui(1.0)); /* R_028C14_PA_CL_GB_HORZ_CLIP_ADJ */
2405 r600_store_value(cb
, fui(1.0)); /* R_028C18_PA_CL_GB_HORZ_DISC_ADJ */
2407 r600_store_context_reg_seq(cb
, R_0282D0_PA_SC_VPORT_ZMIN_0
, 2 * R600_MAX_VIEWPORTS
);
2408 for (tmp
= 0; tmp
< R600_MAX_VIEWPORTS
; tmp
++) {
2409 r600_store_value(cb
, 0); /* R_0282D0_PA_SC_VPORT_ZMIN_0 */
2410 r600_store_value(cb
, fui(1.0)); /* R_0282D4_PA_SC_VPORT_ZMAX_0 */
2413 r600_store_context_reg(cb
, R_028200_PA_SC_WINDOW_OFFSET
, 0);
2414 r600_store_context_reg(cb
, R_02820C_PA_SC_CLIPRECT_RULE
, 0xFFFF);
2416 if (rctx
->b
.chip_class
>= R700
) {
2417 r600_store_context_reg(cb
, R_028230_PA_SC_EDGERULE
, 0xAAAAAAAA);
2420 r600_store_context_reg_seq(cb
, R_028C30_CB_CLRCMP_CONTROL
, 4);
2421 r600_store_value(cb
, 0x1000000); /* R_028C30_CB_CLRCMP_CONTROL */
2422 r600_store_value(cb
, 0); /* R_028C34_CB_CLRCMP_SRC */
2423 r600_store_value(cb
, 0xFF); /* R_028C38_CB_CLRCMP_DST */
2424 r600_store_value(cb
, 0xFFFFFFFF); /* R_028C3C_CB_CLRCMP_MSK */
2426 r600_store_context_reg_seq(cb
, R_028030_PA_SC_SCREEN_SCISSOR_TL
, 2);
2427 r600_store_value(cb
, 0); /* R_028030_PA_SC_SCREEN_SCISSOR_TL */
2428 r600_store_value(cb
, S_028034_BR_X(8192) | S_028034_BR_Y(8192)); /* R_028034_PA_SC_SCREEN_SCISSOR_BR */
2430 r600_store_context_reg_seq(cb
, R_028240_PA_SC_GENERIC_SCISSOR_TL
, 2);
2431 r600_store_value(cb
, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */
2432 r600_store_value(cb
, S_028244_BR_X(8192) | S_028244_BR_Y(8192)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */
2434 r600_store_context_reg_seq(cb
, R_0288CC_SQ_PGM_CF_OFFSET_PS
, 5);
2435 r600_store_value(cb
, 0); /* R_0288CC_SQ_PGM_CF_OFFSET_PS */
2436 r600_store_value(cb
, 0); /* R_0288D0_SQ_PGM_CF_OFFSET_VS */
2437 r600_store_value(cb
, 0); /* R_0288D4_SQ_PGM_CF_OFFSET_GS */
2438 r600_store_value(cb
, 0); /* R_0288D8_SQ_PGM_CF_OFFSET_ES */
2439 r600_store_value(cb
, 0); /* R_0288DC_SQ_PGM_CF_OFFSET_FS */
2441 r600_store_context_reg(cb
, R_0288E0_SQ_VTX_SEMANTIC_CLEAR
, ~0);
2443 r600_store_context_reg_seq(cb
, R_028400_VGT_MAX_VTX_INDX
, 2);
2444 r600_store_value(cb
, ~0); /* R_028400_VGT_MAX_VTX_INDX */
2445 r600_store_value(cb
, 0); /* R_028404_VGT_MIN_VTX_INDX */
2447 r600_store_context_reg(cb
, R_0288A4_SQ_PGM_RESOURCES_FS
, 0);
2449 if (rctx
->b
.chip_class
== R700
)
2450 r600_store_context_reg(cb
, R_028350_SX_MISC
, 0);
2451 if (rctx
->b
.chip_class
== R700
&& rctx
->screen
->b
.has_streamout
)
2452 r600_store_context_reg(cb
, R_028354_SX_SURFACE_SYNC
, S_028354_SURFACE_SYNC_MASK(0xf));
2454 r600_store_context_reg(cb
, R_028800_DB_DEPTH_CONTROL
, 0);
2455 if (rctx
->screen
->b
.has_streamout
) {
2456 r600_store_context_reg(cb
, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET
, 0);
2459 r600_store_loop_const(cb
, R_03E200_SQ_LOOP_CONST_0
, 0x1000FFF);
2460 r600_store_loop_const(cb
, R_03E200_SQ_LOOP_CONST_0
+ (32 * 4), 0x1000FFF);
2461 r600_store_loop_const(cb
, R_03E200_SQ_LOOP_CONST_0
+ (64 * 4), 0x1000FFF);
2464 void r600_update_ps_state(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
2466 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2467 struct r600_command_buffer
*cb
= &shader
->command_buffer
;
2468 struct r600_shader
*rshader
= &shader
->shader
;
2469 unsigned i
, exports_ps
, num_cout
, spi_ps_in_control_0
, spi_input_z
, spi_ps_in_control_1
, db_shader_control
;
2470 int pos_index
= -1, face_index
= -1, fixed_pt_position_index
= -1;
2471 unsigned tmp
, sid
, ufi
= 0;
2472 int need_linear
= 0;
2473 unsigned z_export
= 0, stencil_export
= 0, mask_export
= 0;
2474 unsigned sprite_coord_enable
= rctx
->rasterizer
? rctx
->rasterizer
->sprite_coord_enable
: 0;
2477 r600_init_command_buffer(cb
, 64);
2482 r600_store_context_reg_seq(cb
, R_028644_SPI_PS_INPUT_CNTL_0
, rshader
->ninput
);
2483 for (i
= 0; i
< rshader
->ninput
; i
++) {
2484 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_POSITION
)
2486 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_FACE
&& face_index
== -1)
2488 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_SAMPLEID
)
2489 fixed_pt_position_index
= i
;
2491 sid
= rshader
->input
[i
].spi_sid
;
2493 tmp
= S_028644_SEMANTIC(sid
);
2495 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_POSITION
||
2496 rshader
->input
[i
].interpolate
== TGSI_INTERPOLATE_CONSTANT
||
2497 (rshader
->input
[i
].interpolate
== TGSI_INTERPOLATE_COLOR
&&
2498 rctx
->rasterizer
&& rctx
->rasterizer
->flatshade
))
2499 tmp
|= S_028644_FLAT_SHADE(1);
2501 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_GENERIC
&&
2502 sprite_coord_enable
& (1 << rshader
->input
[i
].sid
)) {
2503 tmp
|= S_028644_PT_SPRITE_TEX(1);
2506 if (rshader
->input
[i
].interpolate_location
== TGSI_INTERPOLATE_LOC_CENTROID
)
2507 tmp
|= S_028644_SEL_CENTROID(1);
2509 if (rshader
->input
[i
].interpolate_location
== TGSI_INTERPOLATE_LOC_SAMPLE
)
2510 tmp
|= S_028644_SEL_SAMPLE(1);
2512 if (rshader
->input
[i
].interpolate
== TGSI_INTERPOLATE_LINEAR
) {
2514 tmp
|= S_028644_SEL_LINEAR(1);
2517 r600_store_value(cb
, tmp
);
2520 db_shader_control
= 0;
2521 for (i
= 0; i
< rshader
->noutput
; i
++) {
2522 if (rshader
->output
[i
].name
== TGSI_SEMANTIC_POSITION
)
2524 if (rshader
->output
[i
].name
== TGSI_SEMANTIC_STENCIL
)
2526 if (rshader
->output
[i
].name
== TGSI_SEMANTIC_SAMPLEMASK
&&
2527 rctx
->framebuffer
.nr_samples
> 1 && rctx
->ps_iter_samples
> 0)
2530 db_shader_control
|= S_02880C_Z_EXPORT_ENABLE(z_export
);
2531 db_shader_control
|= S_02880C_STENCIL_REF_EXPORT_ENABLE(stencil_export
);
2532 db_shader_control
|= S_02880C_MASK_EXPORT_ENABLE(mask_export
);
2533 if (rshader
->uses_kill
)
2534 db_shader_control
|= S_02880C_KILL_ENABLE(1);
2537 for (i
= 0; i
< rshader
->noutput
; i
++) {
2538 if (rshader
->output
[i
].name
== TGSI_SEMANTIC_POSITION
||
2539 rshader
->output
[i
].name
== TGSI_SEMANTIC_STENCIL
||
2540 rshader
->output
[i
].name
== TGSI_SEMANTIC_SAMPLEMASK
) {
2544 num_cout
= rshader
->nr_ps_color_exports
;
2545 exports_ps
|= S_028854_EXPORT_COLORS(num_cout
);
2547 /* always at least export 1 component per pixel */
2551 shader
->nr_ps_color_outputs
= num_cout
;
2553 spi_ps_in_control_0
= S_0286CC_NUM_INTERP(rshader
->ninput
) |
2554 S_0286CC_PERSP_GRADIENT_ENA(1)|
2555 S_0286CC_LINEAR_GRADIENT_ENA(need_linear
);
2557 if (pos_index
!= -1) {
2558 spi_ps_in_control_0
|= (S_0286CC_POSITION_ENA(1) |
2559 S_0286CC_POSITION_CENTROID(rshader
->input
[pos_index
].interpolate_location
== TGSI_INTERPOLATE_LOC_CENTROID
) |
2560 S_0286CC_POSITION_ADDR(rshader
->input
[pos_index
].gpr
) |
2561 S_0286CC_BARYC_SAMPLE_CNTL(1)) |
2562 S_0286CC_POSITION_SAMPLE(rshader
->input
[pos_index
].interpolate_location
== TGSI_INTERPOLATE_LOC_SAMPLE
);
2563 spi_input_z
|= S_0286D8_PROVIDE_Z_TO_SPI(1);
2566 spi_ps_in_control_1
= 0;
2567 if (face_index
!= -1) {
2568 spi_ps_in_control_1
|= S_0286D0_FRONT_FACE_ENA(1) |
2569 S_0286D0_FRONT_FACE_ADDR(rshader
->input
[face_index
].gpr
);
2571 if (fixed_pt_position_index
!= -1) {
2572 spi_ps_in_control_1
|= S_0286D0_FIXED_PT_POSITION_ENA(1) |
2573 S_0286D0_FIXED_PT_POSITION_ADDR(rshader
->input
[fixed_pt_position_index
].gpr
);
2576 /* HW bug in original R600 */
2577 if (rctx
->b
.family
== CHIP_R600
)
2580 r600_store_context_reg_seq(cb
, R_0286CC_SPI_PS_IN_CONTROL_0
, 2);
2581 r600_store_value(cb
, spi_ps_in_control_0
); /* R_0286CC_SPI_PS_IN_CONTROL_0 */
2582 r600_store_value(cb
, spi_ps_in_control_1
); /* R_0286D0_SPI_PS_IN_CONTROL_1 */
2584 r600_store_context_reg(cb
, R_0286D8_SPI_INPUT_Z
, spi_input_z
);
2586 r600_store_context_reg_seq(cb
, R_028850_SQ_PGM_RESOURCES_PS
, 2);
2587 r600_store_value(cb
, /* R_028850_SQ_PGM_RESOURCES_PS*/
2588 S_028850_NUM_GPRS(rshader
->bc
.ngpr
) |
2589 S_028850_STACK_SIZE(rshader
->bc
.nstack
) |
2590 S_028850_UNCACHED_FIRST_INST(ufi
));
2591 r600_store_value(cb
, exports_ps
); /* R_028854_SQ_PGM_EXPORTS_PS */
2593 r600_store_context_reg(cb
, R_028840_SQ_PGM_START_PS
, 0);
2594 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
2596 /* only set some bits here, the other bits are set in the dsa state */
2597 shader
->db_shader_control
= db_shader_control
;
2598 shader
->ps_depth_export
= z_export
| stencil_export
| mask_export
;
2600 shader
->sprite_coord_enable
= sprite_coord_enable
;
2601 if (rctx
->rasterizer
)
2602 shader
->flatshade
= rctx
->rasterizer
->flatshade
;
2605 void r600_update_vs_state(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
2607 struct r600_command_buffer
*cb
= &shader
->command_buffer
;
2608 struct r600_shader
*rshader
= &shader
->shader
;
2609 unsigned spi_vs_out_id
[10] = {};
2610 unsigned i
, tmp
, nparams
= 0;
2612 for (i
= 0; i
< rshader
->noutput
; i
++) {
2613 if (rshader
->output
[i
].spi_sid
) {
2614 tmp
= rshader
->output
[i
].spi_sid
<< ((nparams
& 3) * 8);
2615 spi_vs_out_id
[nparams
/ 4] |= tmp
;
2620 r600_init_command_buffer(cb
, 32);
2622 r600_store_context_reg_seq(cb
, R_028614_SPI_VS_OUT_ID_0
, 10);
2623 for (i
= 0; i
< 10; i
++) {
2624 r600_store_value(cb
, spi_vs_out_id
[i
]);
2627 /* Certain attributes (position, psize, etc.) don't count as params.
2628 * VS is required to export at least one param and r600_shader_from_tgsi()
2629 * takes care of adding a dummy export.
2634 r600_store_context_reg(cb
, R_0286C4_SPI_VS_OUT_CONFIG
,
2635 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
2636 r600_store_context_reg(cb
, R_028868_SQ_PGM_RESOURCES_VS
,
2637 S_028868_NUM_GPRS(rshader
->bc
.ngpr
) |
2638 S_028868_STACK_SIZE(rshader
->bc
.nstack
));
2639 if (rshader
->vs_position_window_space
) {
2640 r600_store_context_reg(cb
, R_028818_PA_CL_VTE_CNTL
,
2641 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
2643 r600_store_context_reg(cb
, R_028818_PA_CL_VTE_CNTL
,
2644 S_028818_VTX_W0_FMT(1) |
2645 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
2646 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
2647 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
2650 r600_store_context_reg(cb
, R_028858_SQ_PGM_START_VS
, 0);
2651 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
2653 shader
->pa_cl_vs_out_cntl
=
2654 S_02881C_VS_OUT_CCDIST0_VEC_ENA((rshader
->clip_dist_write
& 0x0F) != 0) |
2655 S_02881C_VS_OUT_CCDIST1_VEC_ENA((rshader
->clip_dist_write
& 0xF0) != 0) |
2656 S_02881C_VS_OUT_MISC_VEC_ENA(rshader
->vs_out_misc_write
) |
2657 S_02881C_USE_VTX_POINT_SIZE(rshader
->vs_out_point_size
) |
2658 S_02881C_USE_VTX_EDGE_FLAG(rshader
->vs_out_edgeflag
) |
2659 S_02881C_USE_VTX_RENDER_TARGET_INDX(rshader
->vs_out_layer
) |
2660 S_02881C_USE_VTX_VIEWPORT_INDX(rshader
->vs_out_viewport
);
2663 void r600_update_gs_state(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
2665 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2666 struct r600_command_buffer
*cb
= &shader
->command_buffer
;
2667 struct r600_shader
*rshader
= &shader
->shader
;
2668 struct r600_shader
*cp_shader
= &shader
->gs_copy_shader
->shader
;
2669 unsigned gsvs_itemsize
=
2670 (cp_shader
->ring_item_sizes
[0] * shader
->selector
->gs_max_out_vertices
) >> 2;
2672 r600_init_command_buffer(cb
, 64);
2674 /* VGT_GS_MODE is written by r600_emit_shader_stages */
2675 r600_store_context_reg(cb
, R_028AB8_VGT_VTX_CNT_EN
, 1);
2677 if (rctx
->b
.chip_class
>= R700
) {
2678 r600_store_context_reg(cb
, R_028B38_VGT_GS_MAX_VERT_OUT
,
2679 S_028B38_MAX_VERT_OUT(shader
->selector
->gs_max_out_vertices
));
2681 r600_store_context_reg(cb
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
,
2682 r600_conv_prim_to_gs_out(shader
->selector
->gs_output_prim
));
2684 r600_store_context_reg(cb
, R_0288C8_SQ_GS_VERT_ITEMSIZE
,
2685 cp_shader
->ring_item_sizes
[0] >> 2);
2687 r600_store_context_reg(cb
, R_0288A8_SQ_ESGS_RING_ITEMSIZE
,
2688 (rshader
->ring_item_sizes
[0]) >> 2);
2690 r600_store_context_reg(cb
, R_0288AC_SQ_GSVS_RING_ITEMSIZE
,
2693 /* FIXME calculate these values somehow ??? */
2694 r600_store_config_reg_seq(cb
, R_0088C8_VGT_GS_PER_ES
, 2);
2695 r600_store_value(cb
, 0x80); /* GS_PER_ES */
2696 r600_store_value(cb
, 0x100); /* ES_PER_GS */
2697 r600_store_config_reg_seq(cb
, R_0088E8_VGT_GS_PER_VS
, 1);
2698 r600_store_value(cb
, 0x2); /* GS_PER_VS */
2700 r600_store_context_reg(cb
, R_02887C_SQ_PGM_RESOURCES_GS
,
2701 S_02887C_NUM_GPRS(rshader
->bc
.ngpr
) |
2702 S_02887C_STACK_SIZE(rshader
->bc
.nstack
));
2703 r600_store_context_reg(cb
, R_02886C_SQ_PGM_START_GS
, 0);
2704 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
2707 void r600_update_es_state(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
2709 struct r600_command_buffer
*cb
= &shader
->command_buffer
;
2710 struct r600_shader
*rshader
= &shader
->shader
;
2712 r600_init_command_buffer(cb
, 32);
2714 r600_store_context_reg(cb
, R_028890_SQ_PGM_RESOURCES_ES
,
2715 S_028890_NUM_GPRS(rshader
->bc
.ngpr
) |
2716 S_028890_STACK_SIZE(rshader
->bc
.nstack
));
2717 r600_store_context_reg(cb
, R_028880_SQ_PGM_START_ES
, 0);
2718 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
2722 void *r600_create_resolve_blend(struct r600_context
*rctx
)
2724 struct pipe_blend_state blend
;
2727 memset(&blend
, 0, sizeof(blend
));
2728 blend
.independent_blend_enable
= true;
2729 for (i
= 0; i
< 2; i
++) {
2730 blend
.rt
[i
].colormask
= 0xf;
2731 blend
.rt
[i
].blend_enable
= 1;
2732 blend
.rt
[i
].rgb_func
= PIPE_BLEND_ADD
;
2733 blend
.rt
[i
].alpha_func
= PIPE_BLEND_ADD
;
2734 blend
.rt
[i
].rgb_src_factor
= PIPE_BLENDFACTOR_ZERO
;
2735 blend
.rt
[i
].rgb_dst_factor
= PIPE_BLENDFACTOR_ZERO
;
2736 blend
.rt
[i
].alpha_src_factor
= PIPE_BLENDFACTOR_ZERO
;
2737 blend
.rt
[i
].alpha_dst_factor
= PIPE_BLENDFACTOR_ZERO
;
2739 return r600_create_blend_state_mode(&rctx
->b
.b
, &blend
, V_028808_SPECIAL_RESOLVE_BOX
);
2742 void *r700_create_resolve_blend(struct r600_context
*rctx
)
2744 struct pipe_blend_state blend
;
2746 memset(&blend
, 0, sizeof(blend
));
2747 blend
.independent_blend_enable
= true;
2748 blend
.rt
[0].colormask
= 0xf;
2749 return r600_create_blend_state_mode(&rctx
->b
.b
, &blend
, V_028808_SPECIAL_RESOLVE_BOX
);
2752 void *r600_create_decompress_blend(struct r600_context
*rctx
)
2754 struct pipe_blend_state blend
;
2756 memset(&blend
, 0, sizeof(blend
));
2757 blend
.independent_blend_enable
= true;
2758 blend
.rt
[0].colormask
= 0xf;
2759 return r600_create_blend_state_mode(&rctx
->b
.b
, &blend
, V_028808_SPECIAL_EXPAND_SAMPLES
);
2762 void *r600_create_db_flush_dsa(struct r600_context
*rctx
)
2764 struct pipe_depth_stencil_alpha_state dsa
;
2765 boolean quirk
= false;
2767 if (rctx
->b
.family
== CHIP_RV610
|| rctx
->b
.family
== CHIP_RV630
||
2768 rctx
->b
.family
== CHIP_RV620
|| rctx
->b
.family
== CHIP_RV635
)
2771 memset(&dsa
, 0, sizeof(dsa
));
2774 dsa
.depth
.enabled
= 1;
2775 dsa
.depth
.func
= PIPE_FUNC_LEQUAL
;
2776 dsa
.stencil
[0].enabled
= 1;
2777 dsa
.stencil
[0].func
= PIPE_FUNC_ALWAYS
;
2778 dsa
.stencil
[0].zpass_op
= PIPE_STENCIL_OP_KEEP
;
2779 dsa
.stencil
[0].zfail_op
= PIPE_STENCIL_OP_INCR
;
2780 dsa
.stencil
[0].writemask
= 0xff;
2783 return rctx
->b
.b
.create_depth_stencil_alpha_state(&rctx
->b
.b
, &dsa
);
2786 void r600_update_db_shader_control(struct r600_context
* rctx
)
2789 unsigned db_shader_control
;
2791 if (!rctx
->ps_shader
) {
2795 dual_export
= rctx
->framebuffer
.export_16bpc
&&
2796 !rctx
->ps_shader
->current
->ps_depth_export
;
2798 db_shader_control
= rctx
->ps_shader
->current
->db_shader_control
|
2799 S_02880C_DUAL_EXPORT_ENABLE(dual_export
);
2801 /* When alpha test is enabled we can't trust the hw to make the proper
2802 * decision on the order in which ztest should be run related to fragment
2805 * If alpha test is enabled perform z test after fragment. RE_Z (early
2806 * z test but no write to the zbuffer) seems to cause lockup on r6xx/r7xx
2808 if (rctx
->alphatest_state
.sx_alpha_test_control
) {
2809 db_shader_control
|= S_02880C_Z_ORDER(V_02880C_LATE_Z
);
2811 db_shader_control
|= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z
);
2814 if (db_shader_control
!= rctx
->db_misc_state
.db_shader_control
) {
2815 rctx
->db_misc_state
.db_shader_control
= db_shader_control
;
2816 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
2820 static inline unsigned r600_array_mode(unsigned mode
)
2823 case RADEON_SURF_MODE_LINEAR_ALIGNED
: return V_0280A0_ARRAY_LINEAR_ALIGNED
;
2825 case RADEON_SURF_MODE_1D
: return V_0280A0_ARRAY_1D_TILED_THIN1
;
2827 case RADEON_SURF_MODE_2D
: return V_0280A0_ARRAY_2D_TILED_THIN1
;
2829 case RADEON_SURF_MODE_LINEAR
: return V_0280A0_ARRAY_LINEAR_GENERAL
;
2833 static boolean
r600_dma_copy_tile(struct r600_context
*rctx
,
2834 struct pipe_resource
*dst
,
2839 struct pipe_resource
*src
,
2844 unsigned copy_height
,
2848 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.dma
.cs
;
2849 struct r600_texture
*rsrc
= (struct r600_texture
*)src
;
2850 struct r600_texture
*rdst
= (struct r600_texture
*)dst
;
2851 unsigned array_mode
, lbpp
, pitch_tile_max
, slice_tile_max
, size
;
2852 unsigned ncopy
, height
, cheight
, detile
, i
, x
, y
, z
, src_mode
, dst_mode
;
2853 uint64_t base
, addr
;
2855 dst_mode
= rdst
->surface
.level
[dst_level
].mode
;
2856 src_mode
= rsrc
->surface
.level
[src_level
].mode
;
2857 /* downcast linear aligned to linear to simplify test */
2858 src_mode
= src_mode
== RADEON_SURF_MODE_LINEAR_ALIGNED
? RADEON_SURF_MODE_LINEAR
: src_mode
;
2859 dst_mode
= dst_mode
== RADEON_SURF_MODE_LINEAR_ALIGNED
? RADEON_SURF_MODE_LINEAR
: dst_mode
;
2860 assert(dst_mode
!= src_mode
);
2863 lbpp
= util_logbase2(bpp
);
2864 pitch_tile_max
= ((pitch
/ bpp
) / 8) - 1;
2866 if (dst_mode
== RADEON_SURF_MODE_LINEAR
) {
2868 array_mode
= r600_array_mode(src_mode
);
2869 slice_tile_max
= (rsrc
->surface
.level
[src_level
].nblk_x
* rsrc
->surface
.level
[src_level
].nblk_y
) / (8*8);
2870 slice_tile_max
= slice_tile_max
? slice_tile_max
- 1 : 0;
2871 /* linear height must be the same as the slice tile max height, it's ok even
2872 * if the linear destination/source have smaller heigh as the size of the
2873 * dma packet will be using the copy_height which is always smaller or equal
2874 * to the linear height
2876 height
= rsrc
->surface
.level
[src_level
].npix_y
;
2881 base
= rsrc
->surface
.level
[src_level
].offset
;
2882 addr
= rdst
->surface
.level
[dst_level
].offset
;
2883 addr
+= rdst
->surface
.level
[dst_level
].slice_size
* dst_z
;
2884 addr
+= dst_y
* pitch
+ dst_x
* bpp
;
2887 array_mode
= r600_array_mode(dst_mode
);
2888 slice_tile_max
= (rdst
->surface
.level
[dst_level
].nblk_x
* rdst
->surface
.level
[dst_level
].nblk_y
) / (8*8);
2889 slice_tile_max
= slice_tile_max
? slice_tile_max
- 1 : 0;
2890 /* linear height must be the same as the slice tile max height, it's ok even
2891 * if the linear destination/source have smaller heigh as the size of the
2892 * dma packet will be using the copy_height which is always smaller or equal
2893 * to the linear height
2895 height
= rdst
->surface
.level
[dst_level
].npix_y
;
2900 base
= rdst
->surface
.level
[dst_level
].offset
;
2901 addr
= rsrc
->surface
.level
[src_level
].offset
;
2902 addr
+= rsrc
->surface
.level
[src_level
].slice_size
* src_z
;
2903 addr
+= src_y
* pitch
+ src_x
* bpp
;
2905 /* check that we are in dw/base alignment constraint */
2906 if (addr
% 4 || base
% 256) {
2910 /* It's a r6xx/r7xx limitation, the blit must be on 8 boundary for number
2911 * line in the blit. Compute max 8 line we can copy in the size limit
2913 cheight
= ((R600_DMA_COPY_MAX_SIZE_DW
* 4) / pitch
) & 0xfffffff8;
2914 ncopy
= (copy_height
/ cheight
) + !!(copy_height
% cheight
);
2915 r600_need_dma_space(&rctx
->b
, ncopy
* 7);
2917 for (i
= 0; i
< ncopy
; i
++) {
2918 cheight
= cheight
> copy_height
? copy_height
: cheight
;
2919 size
= (cheight
* pitch
) / 4;
2920 /* emit reloc before writing cs so that cs is always in consistent state */
2921 radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.rings
.dma
, &rsrc
->resource
, RADEON_USAGE_READ
,
2922 RADEON_PRIO_SDMA_TEXTURE
);
2923 radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.rings
.dma
, &rdst
->resource
, RADEON_USAGE_WRITE
,
2924 RADEON_PRIO_SDMA_TEXTURE
);
2925 cs
->buf
[cs
->cdw
++] = DMA_PACKET(DMA_PACKET_COPY
, 1, 0, size
);
2926 cs
->buf
[cs
->cdw
++] = base
>> 8;
2927 cs
->buf
[cs
->cdw
++] = (detile
<< 31) | (array_mode
<< 27) |
2928 (lbpp
<< 24) | ((height
- 1) << 10) |
2930 cs
->buf
[cs
->cdw
++] = (slice_tile_max
<< 12) | (z
<< 0);
2931 cs
->buf
[cs
->cdw
++] = (x
<< 3) | (y
<< 17);
2932 cs
->buf
[cs
->cdw
++] = addr
& 0xfffffffc;
2933 cs
->buf
[cs
->cdw
++] = (addr
>> 32UL) & 0xff;
2934 copy_height
-= cheight
;
2935 addr
+= cheight
* pitch
;
2941 static void r600_dma_copy(struct pipe_context
*ctx
,
2942 struct pipe_resource
*dst
,
2944 unsigned dstx
, unsigned dsty
, unsigned dstz
,
2945 struct pipe_resource
*src
,
2947 const struct pipe_box
*src_box
)
2949 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2950 struct r600_texture
*rsrc
= (struct r600_texture
*)src
;
2951 struct r600_texture
*rdst
= (struct r600_texture
*)dst
;
2952 unsigned dst_pitch
, src_pitch
, bpp
, dst_mode
, src_mode
, copy_height
;
2953 unsigned src_w
, dst_w
;
2954 unsigned src_x
, src_y
;
2955 unsigned dst_x
= dstx
, dst_y
= dsty
, dst_z
= dstz
;
2957 if (rctx
->b
.rings
.dma
.cs
== NULL
) {
2961 if (dst
->target
== PIPE_BUFFER
&& src
->target
== PIPE_BUFFER
) {
2962 if (dst_x
% 4 || src_box
->x
% 4 || src_box
->width
% 4)
2965 r600_dma_copy_buffer(rctx
, dst
, src
, dst_x
, src_box
->x
, src_box
->width
);
2969 if (src
->format
!= dst
->format
|| src_box
->depth
> 1) {
2973 src_x
= util_format_get_nblocksx(src
->format
, src_box
->x
);
2974 dst_x
= util_format_get_nblocksx(src
->format
, dst_x
);
2975 src_y
= util_format_get_nblocksy(src
->format
, src_box
->y
);
2976 dst_y
= util_format_get_nblocksy(src
->format
, dst_y
);
2978 bpp
= rdst
->surface
.bpe
;
2979 dst_pitch
= rdst
->surface
.level
[dst_level
].pitch_bytes
;
2980 src_pitch
= rsrc
->surface
.level
[src_level
].pitch_bytes
;
2981 src_w
= rsrc
->surface
.level
[src_level
].npix_x
;
2982 dst_w
= rdst
->surface
.level
[dst_level
].npix_x
;
2983 copy_height
= src_box
->height
/ rsrc
->surface
.blk_h
;
2985 dst_mode
= rdst
->surface
.level
[dst_level
].mode
;
2986 src_mode
= rsrc
->surface
.level
[src_level
].mode
;
2987 /* downcast linear aligned to linear to simplify test */
2988 src_mode
= src_mode
== RADEON_SURF_MODE_LINEAR_ALIGNED
? RADEON_SURF_MODE_LINEAR
: src_mode
;
2989 dst_mode
= dst_mode
== RADEON_SURF_MODE_LINEAR_ALIGNED
? RADEON_SURF_MODE_LINEAR
: dst_mode
;
2991 if (src_pitch
!= dst_pitch
|| src_box
->x
|| dst_x
|| src_w
!= dst_w
) {
2992 /* strict requirement on r6xx/r7xx */
2995 /* lot of constraint on alignment this should capture them all */
2996 if (src_pitch
% 8 || src_box
->y
% 8 || dst_y
% 8) {
3000 if (src_mode
== dst_mode
) {
3001 uint64_t dst_offset
, src_offset
, size
;
3003 /* simple dma blit would do NOTE code here assume :
3006 * dst_pitch == src_pitch
3008 src_offset
= rsrc
->surface
.level
[src_level
].offset
;
3009 src_offset
+= rsrc
->surface
.level
[src_level
].slice_size
* src_box
->z
;
3010 src_offset
+= src_y
* src_pitch
+ src_x
* bpp
;
3011 dst_offset
= rdst
->surface
.level
[dst_level
].offset
;
3012 dst_offset
+= rdst
->surface
.level
[dst_level
].slice_size
* dst_z
;
3013 dst_offset
+= dst_y
* dst_pitch
+ dst_x
* bpp
;
3014 size
= src_box
->height
* src_pitch
;
3015 /* must be dw aligned */
3016 if (dst_offset
% 4 || src_offset
% 4 || size
% 4) {
3019 r600_dma_copy_buffer(rctx
, dst
, src
, dst_offset
, src_offset
, size
);
3021 if (!r600_dma_copy_tile(rctx
, dst
, dst_level
, dst_x
, dst_y
, dst_z
,
3022 src
, src_level
, src_x
, src_y
, src_box
->z
,
3023 copy_height
, dst_pitch
, bpp
)) {
3030 r600_resource_copy_region(ctx
, dst
, dst_level
, dstx
, dsty
, dstz
,
3031 src
, src_level
, src_box
);
3034 void r600_init_state_functions(struct r600_context
*rctx
)
3039 * To avoid GPU lockup registers must be emited in a specific order
3040 * (no kidding ...). The order below is important and have been
3041 * partialy infered from analyzing fglrx command stream.
3043 * Don't reorder atom without carefully checking the effect (GPU lockup
3044 * or piglit regression).
3048 r600_init_atom(rctx
, &rctx
->framebuffer
.atom
, id
++, r600_emit_framebuffer_state
, 0);
3051 r600_init_atom(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_VERTEX
].atom
, id
++, r600_emit_vs_constant_buffers
, 0);
3052 r600_init_atom(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_GEOMETRY
].atom
, id
++, r600_emit_gs_constant_buffers
, 0);
3053 r600_init_atom(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_FRAGMENT
].atom
, id
++, r600_emit_ps_constant_buffers
, 0);
3055 /* sampler must be emited before TA_CNTL_AUX otherwise DISABLE_CUBE_WRAP change
3056 * does not take effect (TA_CNTL_AUX emited by r600_emit_seamless_cube_map)
3058 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_VERTEX
].states
.atom
, id
++, r600_emit_vs_sampler_states
, 0);
3059 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_GEOMETRY
].states
.atom
, id
++, r600_emit_gs_sampler_states
, 0);
3060 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_FRAGMENT
].states
.atom
, id
++, r600_emit_ps_sampler_states
, 0);
3062 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_VERTEX
].views
.atom
, id
++, r600_emit_vs_sampler_views
, 0);
3063 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_GEOMETRY
].views
.atom
, id
++, r600_emit_gs_sampler_views
, 0);
3064 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_FRAGMENT
].views
.atom
, id
++, r600_emit_ps_sampler_views
, 0);
3065 r600_init_atom(rctx
, &rctx
->vertex_buffer_state
.atom
, id
++, r600_emit_vertex_buffers
, 0);
3067 r600_init_atom(rctx
, &rctx
->vgt_state
.atom
, id
++, r600_emit_vgt_state
, 10);
3069 r600_init_atom(rctx
, &rctx
->seamless_cube_map
.atom
, id
++, r600_emit_seamless_cube_map
, 3);
3070 r600_init_atom(rctx
, &rctx
->sample_mask
.atom
, id
++, r600_emit_sample_mask
, 3);
3071 rctx
->sample_mask
.sample_mask
= ~0;
3073 r600_init_atom(rctx
, &rctx
->alphatest_state
.atom
, id
++, r600_emit_alphatest_state
, 6);
3074 r600_init_atom(rctx
, &rctx
->blend_color
.atom
, id
++, r600_emit_blend_color
, 6);
3075 r600_init_atom(rctx
, &rctx
->blend_state
.atom
, id
++, r600_emit_cso_state
, 0);
3076 r600_init_atom(rctx
, &rctx
->cb_misc_state
.atom
, id
++, r600_emit_cb_misc_state
, 7);
3077 r600_init_atom(rctx
, &rctx
->clip_misc_state
.atom
, id
++, r600_emit_clip_misc_state
, 6);
3078 r600_init_atom(rctx
, &rctx
->clip_state
.atom
, id
++, r600_emit_clip_state
, 26);
3079 r600_init_atom(rctx
, &rctx
->db_misc_state
.atom
, id
++, r600_emit_db_misc_state
, 7);
3080 r600_init_atom(rctx
, &rctx
->db_state
.atom
, id
++, r600_emit_db_state
, 11);
3081 r600_init_atom(rctx
, &rctx
->dsa_state
.atom
, id
++, r600_emit_cso_state
, 0);
3082 r600_init_atom(rctx
, &rctx
->poly_offset_state
.atom
, id
++, r600_emit_polygon_offset
, 6);
3083 r600_init_atom(rctx
, &rctx
->rasterizer_state
.atom
, id
++, r600_emit_cso_state
, 0);
3084 r600_init_atom(rctx
, &rctx
->scissor
.atom
, id
++, r600_emit_scissor_state
, 0);
3085 r600_init_atom(rctx
, &rctx
->viewport
.atom
, id
++, r600_emit_viewport_state
, 0);
3086 r600_init_atom(rctx
, &rctx
->config_state
.atom
, id
++, r600_emit_config_state
, 3);
3087 r600_init_atom(rctx
, &rctx
->stencil_ref
.atom
, id
++, r600_emit_stencil_ref
, 4);
3088 r600_init_atom(rctx
, &rctx
->vertex_fetch_shader
.atom
, id
++, r600_emit_vertex_fetch_shader
, 5);
3089 r600_add_atom(rctx
, &rctx
->b
.streamout
.begin_atom
, id
++);
3090 r600_add_atom(rctx
, &rctx
->b
.streamout
.enable_atom
, id
++);
3091 r600_init_atom(rctx
, &rctx
->vertex_shader
.atom
, id
++, r600_emit_shader
, 23);
3092 r600_init_atom(rctx
, &rctx
->pixel_shader
.atom
, id
++, r600_emit_shader
, 0);
3093 r600_init_atom(rctx
, &rctx
->geometry_shader
.atom
, id
++, r600_emit_shader
, 0);
3094 r600_init_atom(rctx
, &rctx
->export_shader
.atom
, id
++, r600_emit_shader
, 0);
3095 r600_init_atom(rctx
, &rctx
->shader_stages
.atom
, id
++, r600_emit_shader_stages
, 0);
3096 r600_init_atom(rctx
, &rctx
->gs_rings
.atom
, id
++, r600_emit_gs_rings
, 0);
3098 rctx
->b
.b
.create_blend_state
= r600_create_blend_state
;
3099 rctx
->b
.b
.create_depth_stencil_alpha_state
= r600_create_dsa_state
;
3100 rctx
->b
.b
.create_rasterizer_state
= r600_create_rs_state
;
3101 rctx
->b
.b
.create_sampler_state
= r600_create_sampler_state
;
3102 rctx
->b
.b
.create_sampler_view
= r600_create_sampler_view
;
3103 rctx
->b
.b
.set_framebuffer_state
= r600_set_framebuffer_state
;
3104 rctx
->b
.b
.set_polygon_stipple
= r600_set_polygon_stipple
;
3105 rctx
->b
.b
.set_min_samples
= r600_set_min_samples
;
3106 rctx
->b
.b
.set_scissor_states
= r600_set_scissor_states
;
3107 rctx
->b
.b
.get_sample_position
= r600_get_sample_position
;
3108 rctx
->b
.dma_copy
= r600_dma_copy
;
3110 /* this function must be last */