2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "r600_formats.h"
24 #include "r600_shader.h"
25 #include "r600_query.h"
26 #include "evergreend.h"
28 #include "pipe/p_shader_tokens.h"
29 #include "util/u_pack_color.h"
30 #include "util/u_memory.h"
31 #include "util/u_framebuffer.h"
32 #include "util/u_dual_blend.h"
33 #include "evergreen_compute.h"
34 #include "util/u_math.h"
36 static inline unsigned evergreen_array_mode(unsigned mode
)
40 case RADEON_SURF_MODE_LINEAR_ALIGNED
: return V_028C70_ARRAY_LINEAR_ALIGNED
;
42 case RADEON_SURF_MODE_1D
: return V_028C70_ARRAY_1D_TILED_THIN1
;
44 case RADEON_SURF_MODE_2D
: return V_028C70_ARRAY_2D_TILED_THIN1
;
48 static uint32_t eg_num_banks(uint32_t nbanks
)
64 static unsigned eg_tile_split(unsigned tile_split
)
67 case 64: tile_split
= 0; break;
68 case 128: tile_split
= 1; break;
69 case 256: tile_split
= 2; break;
70 case 512: tile_split
= 3; break;
72 case 1024: tile_split
= 4; break;
73 case 2048: tile_split
= 5; break;
74 case 4096: tile_split
= 6; break;
79 static unsigned eg_macro_tile_aspect(unsigned macro_tile_aspect
)
81 switch (macro_tile_aspect
) {
83 case 1: macro_tile_aspect
= 0; break;
84 case 2: macro_tile_aspect
= 1; break;
85 case 4: macro_tile_aspect
= 2; break;
86 case 8: macro_tile_aspect
= 3; break;
88 return macro_tile_aspect
;
91 static unsigned eg_bank_wh(unsigned bankwh
)
95 case 1: bankwh
= 0; break;
96 case 2: bankwh
= 1; break;
97 case 4: bankwh
= 2; break;
98 case 8: bankwh
= 3; break;
103 static uint32_t r600_translate_blend_function(int blend_func
)
105 switch (blend_func
) {
107 return V_028780_COMB_DST_PLUS_SRC
;
108 case PIPE_BLEND_SUBTRACT
:
109 return V_028780_COMB_SRC_MINUS_DST
;
110 case PIPE_BLEND_REVERSE_SUBTRACT
:
111 return V_028780_COMB_DST_MINUS_SRC
;
113 return V_028780_COMB_MIN_DST_SRC
;
115 return V_028780_COMB_MAX_DST_SRC
;
117 R600_ERR("Unknown blend function %d\n", blend_func
);
124 static uint32_t r600_translate_blend_factor(int blend_fact
)
126 switch (blend_fact
) {
127 case PIPE_BLENDFACTOR_ONE
:
128 return V_028780_BLEND_ONE
;
129 case PIPE_BLENDFACTOR_SRC_COLOR
:
130 return V_028780_BLEND_SRC_COLOR
;
131 case PIPE_BLENDFACTOR_SRC_ALPHA
:
132 return V_028780_BLEND_SRC_ALPHA
;
133 case PIPE_BLENDFACTOR_DST_ALPHA
:
134 return V_028780_BLEND_DST_ALPHA
;
135 case PIPE_BLENDFACTOR_DST_COLOR
:
136 return V_028780_BLEND_DST_COLOR
;
137 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
:
138 return V_028780_BLEND_SRC_ALPHA_SATURATE
;
139 case PIPE_BLENDFACTOR_CONST_COLOR
:
140 return V_028780_BLEND_CONST_COLOR
;
141 case PIPE_BLENDFACTOR_CONST_ALPHA
:
142 return V_028780_BLEND_CONST_ALPHA
;
143 case PIPE_BLENDFACTOR_ZERO
:
144 return V_028780_BLEND_ZERO
;
145 case PIPE_BLENDFACTOR_INV_SRC_COLOR
:
146 return V_028780_BLEND_ONE_MINUS_SRC_COLOR
;
147 case PIPE_BLENDFACTOR_INV_SRC_ALPHA
:
148 return V_028780_BLEND_ONE_MINUS_SRC_ALPHA
;
149 case PIPE_BLENDFACTOR_INV_DST_ALPHA
:
150 return V_028780_BLEND_ONE_MINUS_DST_ALPHA
;
151 case PIPE_BLENDFACTOR_INV_DST_COLOR
:
152 return V_028780_BLEND_ONE_MINUS_DST_COLOR
;
153 case PIPE_BLENDFACTOR_INV_CONST_COLOR
:
154 return V_028780_BLEND_ONE_MINUS_CONST_COLOR
;
155 case PIPE_BLENDFACTOR_INV_CONST_ALPHA
:
156 return V_028780_BLEND_ONE_MINUS_CONST_ALPHA
;
157 case PIPE_BLENDFACTOR_SRC1_COLOR
:
158 return V_028780_BLEND_SRC1_COLOR
;
159 case PIPE_BLENDFACTOR_SRC1_ALPHA
:
160 return V_028780_BLEND_SRC1_ALPHA
;
161 case PIPE_BLENDFACTOR_INV_SRC1_COLOR
:
162 return V_028780_BLEND_INV_SRC1_COLOR
;
163 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA
:
164 return V_028780_BLEND_INV_SRC1_ALPHA
;
166 R600_ERR("Bad blend factor %d not supported!\n", blend_fact
);
173 static unsigned r600_tex_dim(struct r600_texture
*rtex
,
174 unsigned view_target
, unsigned nr_samples
)
176 unsigned res_target
= rtex
->resource
.b
.b
.target
;
178 if (view_target
== PIPE_TEXTURE_CUBE
||
179 view_target
== PIPE_TEXTURE_CUBE_ARRAY
)
180 res_target
= view_target
;
181 /* If interpreting cubemaps as something else, set 2D_ARRAY. */
182 else if (res_target
== PIPE_TEXTURE_CUBE
||
183 res_target
== PIPE_TEXTURE_CUBE_ARRAY
)
184 res_target
= PIPE_TEXTURE_2D_ARRAY
;
186 switch (res_target
) {
188 case PIPE_TEXTURE_1D
:
189 return V_030000_SQ_TEX_DIM_1D
;
190 case PIPE_TEXTURE_1D_ARRAY
:
191 return V_030000_SQ_TEX_DIM_1D_ARRAY
;
192 case PIPE_TEXTURE_2D
:
193 case PIPE_TEXTURE_RECT
:
194 return nr_samples
> 1 ? V_030000_SQ_TEX_DIM_2D_MSAA
:
195 V_030000_SQ_TEX_DIM_2D
;
196 case PIPE_TEXTURE_2D_ARRAY
:
197 return nr_samples
> 1 ? V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA
:
198 V_030000_SQ_TEX_DIM_2D_ARRAY
;
199 case PIPE_TEXTURE_3D
:
200 return V_030000_SQ_TEX_DIM_3D
;
201 case PIPE_TEXTURE_CUBE
:
202 case PIPE_TEXTURE_CUBE_ARRAY
:
203 return V_030000_SQ_TEX_DIM_CUBEMAP
;
207 static uint32_t r600_translate_dbformat(enum pipe_format format
)
210 case PIPE_FORMAT_Z16_UNORM
:
211 return V_028040_Z_16
;
212 case PIPE_FORMAT_Z24X8_UNORM
:
213 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
214 case PIPE_FORMAT_X8Z24_UNORM
:
215 case PIPE_FORMAT_S8_UINT_Z24_UNORM
:
216 return V_028040_Z_24
;
217 case PIPE_FORMAT_Z32_FLOAT
:
218 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
219 return V_028040_Z_32_FLOAT
;
225 static bool r600_is_sampler_format_supported(struct pipe_screen
*screen
, enum pipe_format format
)
227 return r600_translate_texformat(screen
, format
, NULL
, NULL
, NULL
,
231 static bool r600_is_colorbuffer_format_supported(enum chip_class chip
, enum pipe_format format
)
233 return r600_translate_colorformat(chip
, format
, FALSE
) != ~0U &&
234 r600_translate_colorswap(format
, FALSE
) != ~0U;
237 static bool r600_is_zs_format_supported(enum pipe_format format
)
239 return r600_translate_dbformat(format
) != ~0U;
242 bool evergreen_is_format_supported(struct pipe_screen
*screen
,
243 enum pipe_format format
,
244 enum pipe_texture_target target
,
245 unsigned sample_count
,
246 unsigned storage_sample_count
,
249 struct r600_screen
*rscreen
= (struct r600_screen
*)screen
;
252 if (target
>= PIPE_MAX_TEXTURE_TYPES
) {
253 R600_ERR("r600: unsupported texture type %d\n", target
);
257 if (MAX2(1, sample_count
) != MAX2(1, storage_sample_count
))
260 if (sample_count
> 1) {
261 if (!rscreen
->has_msaa
)
264 switch (sample_count
) {
274 if (usage
& PIPE_BIND_SAMPLER_VIEW
) {
275 if (target
== PIPE_BUFFER
) {
276 if (r600_is_vertex_format_supported(format
))
277 retval
|= PIPE_BIND_SAMPLER_VIEW
;
279 if (r600_is_sampler_format_supported(screen
, format
))
280 retval
|= PIPE_BIND_SAMPLER_VIEW
;
284 if ((usage
& (PIPE_BIND_RENDER_TARGET
|
285 PIPE_BIND_DISPLAY_TARGET
|
288 PIPE_BIND_BLENDABLE
)) &&
289 r600_is_colorbuffer_format_supported(rscreen
->b
.chip_class
, format
)) {
291 (PIPE_BIND_RENDER_TARGET
|
292 PIPE_BIND_DISPLAY_TARGET
|
295 if (!util_format_is_pure_integer(format
) &&
296 !util_format_is_depth_or_stencil(format
))
297 retval
|= usage
& PIPE_BIND_BLENDABLE
;
300 if ((usage
& PIPE_BIND_DEPTH_STENCIL
) &&
301 r600_is_zs_format_supported(format
)) {
302 retval
|= PIPE_BIND_DEPTH_STENCIL
;
305 if ((usage
& PIPE_BIND_VERTEX_BUFFER
) &&
306 r600_is_vertex_format_supported(format
)) {
307 retval
|= PIPE_BIND_VERTEX_BUFFER
;
310 if ((usage
& PIPE_BIND_LINEAR
) &&
311 !util_format_is_compressed(format
) &&
312 !(usage
& PIPE_BIND_DEPTH_STENCIL
))
313 retval
|= PIPE_BIND_LINEAR
;
315 return retval
== usage
;
318 static void *evergreen_create_blend_state_mode(struct pipe_context
*ctx
,
319 const struct pipe_blend_state
*state
, int mode
)
321 uint32_t color_control
= 0, target_mask
= 0;
322 struct r600_blend_state
*blend
= CALLOC_STRUCT(r600_blend_state
);
328 r600_init_command_buffer(&blend
->buffer
, 20);
329 r600_init_command_buffer(&blend
->buffer_no_blend
, 20);
331 if (state
->logicop_enable
) {
332 color_control
|= (state
->logicop_func
<< 16) | (state
->logicop_func
<< 20);
334 color_control
|= (0xcc << 16);
336 /* we pretend 8 buffer are used, CB_SHADER_MASK will disable unused one */
337 if (state
->independent_blend_enable
) {
338 for (int i
= 0; i
< 8; i
++) {
339 target_mask
|= (state
->rt
[i
].colormask
<< (4 * i
));
342 for (int i
= 0; i
< 8; i
++) {
343 target_mask
|= (state
->rt
[0].colormask
<< (4 * i
));
347 /* only have dual source on MRT0 */
348 blend
->dual_src_blend
= util_blend_state_is_dual(state
, 0);
349 blend
->cb_target_mask
= target_mask
;
350 blend
->alpha_to_one
= state
->alpha_to_one
;
353 color_control
|= S_028808_MODE(mode
);
355 color_control
|= S_028808_MODE(V_028808_CB_DISABLE
);
358 r600_store_context_reg(&blend
->buffer
, R_028808_CB_COLOR_CONTROL
, color_control
);
359 r600_store_context_reg(&blend
->buffer
, R_028B70_DB_ALPHA_TO_MASK
,
360 S_028B70_ALPHA_TO_MASK_ENABLE(state
->alpha_to_coverage
) |
361 S_028B70_ALPHA_TO_MASK_OFFSET0(2) |
362 S_028B70_ALPHA_TO_MASK_OFFSET1(2) |
363 S_028B70_ALPHA_TO_MASK_OFFSET2(2) |
364 S_028B70_ALPHA_TO_MASK_OFFSET3(2));
365 r600_store_context_reg_seq(&blend
->buffer
, R_028780_CB_BLEND0_CONTROL
, 8);
367 /* Copy over the dwords set so far into buffer_no_blend.
368 * Only the CB_BLENDi_CONTROL registers must be set after this. */
369 memcpy(blend
->buffer_no_blend
.buf
, blend
->buffer
.buf
, blend
->buffer
.num_dw
* 4);
370 blend
->buffer_no_blend
.num_dw
= blend
->buffer
.num_dw
;
372 for (int i
= 0; i
< 8; i
++) {
373 /* state->rt entries > 0 only written if independent blending */
374 const int j
= state
->independent_blend_enable
? i
: 0;
376 unsigned eqRGB
= state
->rt
[j
].rgb_func
;
377 unsigned srcRGB
= state
->rt
[j
].rgb_src_factor
;
378 unsigned dstRGB
= state
->rt
[j
].rgb_dst_factor
;
379 unsigned eqA
= state
->rt
[j
].alpha_func
;
380 unsigned srcA
= state
->rt
[j
].alpha_src_factor
;
381 unsigned dstA
= state
->rt
[j
].alpha_dst_factor
;
384 r600_store_value(&blend
->buffer_no_blend
, 0);
386 if (!state
->rt
[j
].blend_enable
) {
387 r600_store_value(&blend
->buffer
, 0);
391 bc
|= S_028780_BLEND_CONTROL_ENABLE(1);
392 bc
|= S_028780_COLOR_COMB_FCN(r600_translate_blend_function(eqRGB
));
393 bc
|= S_028780_COLOR_SRCBLEND(r600_translate_blend_factor(srcRGB
));
394 bc
|= S_028780_COLOR_DESTBLEND(r600_translate_blend_factor(dstRGB
));
396 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
) {
397 bc
|= S_028780_SEPARATE_ALPHA_BLEND(1);
398 bc
|= S_028780_ALPHA_COMB_FCN(r600_translate_blend_function(eqA
));
399 bc
|= S_028780_ALPHA_SRCBLEND(r600_translate_blend_factor(srcA
));
400 bc
|= S_028780_ALPHA_DESTBLEND(r600_translate_blend_factor(dstA
));
402 r600_store_value(&blend
->buffer
, bc
);
407 static void *evergreen_create_blend_state(struct pipe_context
*ctx
,
408 const struct pipe_blend_state
*state
)
411 return evergreen_create_blend_state_mode(ctx
, state
, V_028808_CB_NORMAL
);
414 static void *evergreen_create_dsa_state(struct pipe_context
*ctx
,
415 const struct pipe_depth_stencil_alpha_state
*state
)
417 unsigned db_depth_control
, alpha_test_control
, alpha_ref
;
418 struct r600_dsa_state
*dsa
= CALLOC_STRUCT(r600_dsa_state
);
424 r600_init_command_buffer(&dsa
->buffer
, 3);
426 dsa
->valuemask
[0] = state
->stencil
[0].valuemask
;
427 dsa
->valuemask
[1] = state
->stencil
[1].valuemask
;
428 dsa
->writemask
[0] = state
->stencil
[0].writemask
;
429 dsa
->writemask
[1] = state
->stencil
[1].writemask
;
430 dsa
->zwritemask
= state
->depth
.writemask
;
432 db_depth_control
= S_028800_Z_ENABLE(state
->depth
.enabled
) |
433 S_028800_Z_WRITE_ENABLE(state
->depth
.writemask
) |
434 S_028800_ZFUNC(state
->depth
.func
);
437 if (state
->stencil
[0].enabled
) {
438 db_depth_control
|= S_028800_STENCIL_ENABLE(1);
439 db_depth_control
|= S_028800_STENCILFUNC(state
->stencil
[0].func
); /* translates straight */
440 db_depth_control
|= S_028800_STENCILFAIL(r600_translate_stencil_op(state
->stencil
[0].fail_op
));
441 db_depth_control
|= S_028800_STENCILZPASS(r600_translate_stencil_op(state
->stencil
[0].zpass_op
));
442 db_depth_control
|= S_028800_STENCILZFAIL(r600_translate_stencil_op(state
->stencil
[0].zfail_op
));
444 if (state
->stencil
[1].enabled
) {
445 db_depth_control
|= S_028800_BACKFACE_ENABLE(1);
446 db_depth_control
|= S_028800_STENCILFUNC_BF(state
->stencil
[1].func
); /* translates straight */
447 db_depth_control
|= S_028800_STENCILFAIL_BF(r600_translate_stencil_op(state
->stencil
[1].fail_op
));
448 db_depth_control
|= S_028800_STENCILZPASS_BF(r600_translate_stencil_op(state
->stencil
[1].zpass_op
));
449 db_depth_control
|= S_028800_STENCILZFAIL_BF(r600_translate_stencil_op(state
->stencil
[1].zfail_op
));
454 alpha_test_control
= 0;
456 if (state
->alpha
.enabled
) {
457 alpha_test_control
= S_028410_ALPHA_FUNC(state
->alpha
.func
);
458 alpha_test_control
|= S_028410_ALPHA_TEST_ENABLE(1);
459 alpha_ref
= fui(state
->alpha
.ref_value
);
461 dsa
->sx_alpha_test_control
= alpha_test_control
& 0xff;
462 dsa
->alpha_ref
= alpha_ref
;
465 r600_store_context_reg(&dsa
->buffer
, R_028800_DB_DEPTH_CONTROL
, db_depth_control
);
469 static void *evergreen_create_rs_state(struct pipe_context
*ctx
,
470 const struct pipe_rasterizer_state
*state
)
472 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
473 unsigned tmp
, spi_interp
;
474 float psize_min
, psize_max
;
475 struct r600_rasterizer_state
*rs
= CALLOC_STRUCT(r600_rasterizer_state
);
481 r600_init_command_buffer(&rs
->buffer
, 30);
483 rs
->scissor_enable
= state
->scissor
;
484 rs
->clip_halfz
= state
->clip_halfz
;
485 rs
->flatshade
= state
->flatshade
;
486 rs
->sprite_coord_enable
= state
->sprite_coord_enable
;
487 rs
->rasterizer_discard
= state
->rasterizer_discard
;
488 rs
->two_side
= state
->light_twoside
;
489 rs
->clip_plane_enable
= state
->clip_plane_enable
;
490 rs
->pa_sc_line_stipple
= state
->line_stipple_enable
?
491 S_028A0C_LINE_PATTERN(state
->line_stipple_pattern
) |
492 S_028A0C_REPEAT_COUNT(state
->line_stipple_factor
) : 0;
493 rs
->pa_cl_clip_cntl
=
494 S_028810_DX_CLIP_SPACE_DEF(state
->clip_halfz
) |
495 S_028810_ZCLIP_NEAR_DISABLE(!state
->depth_clip_near
) |
496 S_028810_ZCLIP_FAR_DISABLE(!state
->depth_clip_far
) |
497 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1) |
498 S_028810_DX_RASTERIZATION_KILL(state
->rasterizer_discard
);
499 rs
->multisample_enable
= state
->multisample
;
502 rs
->offset_units
= state
->offset_units
;
503 rs
->offset_scale
= state
->offset_scale
* 16.0f
;
504 rs
->offset_enable
= state
->offset_point
|| state
->offset_line
|| state
->offset_tri
;
505 rs
->offset_units_unscaled
= state
->offset_units_unscaled
;
507 if (state
->point_size_per_vertex
) {
508 psize_min
= util_get_min_point_size(state
);
511 /* Force the point size to be as if the vertex output was disabled. */
512 psize_min
= state
->point_size
;
513 psize_max
= state
->point_size
;
516 spi_interp
= S_0286D4_FLAT_SHADE_ENA(1);
517 spi_interp
|= S_0286D4_PNT_SPRITE_ENA(1) |
518 S_0286D4_PNT_SPRITE_OVRD_X(2) |
519 S_0286D4_PNT_SPRITE_OVRD_Y(3) |
520 S_0286D4_PNT_SPRITE_OVRD_Z(0) |
521 S_0286D4_PNT_SPRITE_OVRD_W(1);
522 if (state
->sprite_coord_mode
!= PIPE_SPRITE_COORD_UPPER_LEFT
) {
523 spi_interp
|= S_0286D4_PNT_SPRITE_TOP_1(1);
526 r600_store_context_reg_seq(&rs
->buffer
, R_028A00_PA_SU_POINT_SIZE
, 3);
527 /* point size 12.4 fixed point (divide by two, because 0.5 = 1 pixel) */
528 tmp
= r600_pack_float_12p4(state
->point_size
/2);
529 r600_store_value(&rs
->buffer
, /* R_028A00_PA_SU_POINT_SIZE */
530 S_028A00_HEIGHT(tmp
) | S_028A00_WIDTH(tmp
));
531 r600_store_value(&rs
->buffer
, /* R_028A04_PA_SU_POINT_MINMAX */
532 S_028A04_MIN_SIZE(r600_pack_float_12p4(psize_min
/2)) |
533 S_028A04_MAX_SIZE(r600_pack_float_12p4(psize_max
/2)));
534 r600_store_value(&rs
->buffer
, /* R_028A08_PA_SU_LINE_CNTL */
535 S_028A08_WIDTH((unsigned)(state
->line_width
* 8)));
537 r600_store_context_reg(&rs
->buffer
, R_0286D4_SPI_INTERP_CONTROL_0
, spi_interp
);
538 r600_store_context_reg(&rs
->buffer
, R_028A48_PA_SC_MODE_CNTL_0
,
539 S_028A48_MSAA_ENABLE(state
->multisample
) |
540 S_028A48_VPORT_SCISSOR_ENABLE(1) |
541 S_028A48_LINE_STIPPLE_ENABLE(state
->line_stipple_enable
));
543 if (rctx
->b
.chip_class
== CAYMAN
) {
544 r600_store_context_reg(&rs
->buffer
, CM_R_028BE4_PA_SU_VTX_CNTL
,
545 S_028C08_PIX_CENTER_HALF(state
->half_pixel_center
) |
546 S_028C08_QUANT_MODE(V_028C08_X_1_256TH
));
548 r600_store_context_reg(&rs
->buffer
, R_028C08_PA_SU_VTX_CNTL
,
549 S_028C08_PIX_CENTER_HALF(state
->half_pixel_center
) |
550 S_028C08_QUANT_MODE(V_028C08_X_1_256TH
));
553 r600_store_context_reg(&rs
->buffer
, R_028B7C_PA_SU_POLY_OFFSET_CLAMP
, fui(state
->offset_clamp
));
554 r600_store_context_reg(&rs
->buffer
, R_028814_PA_SU_SC_MODE_CNTL
,
555 S_028814_PROVOKING_VTX_LAST(!state
->flatshade_first
) |
556 S_028814_CULL_FRONT((state
->cull_face
& PIPE_FACE_FRONT
) ? 1 : 0) |
557 S_028814_CULL_BACK((state
->cull_face
& PIPE_FACE_BACK
) ? 1 : 0) |
558 S_028814_FACE(!state
->front_ccw
) |
559 S_028814_POLY_OFFSET_FRONT_ENABLE(util_get_offset(state
, state
->fill_front
)) |
560 S_028814_POLY_OFFSET_BACK_ENABLE(util_get_offset(state
, state
->fill_back
)) |
561 S_028814_POLY_OFFSET_PARA_ENABLE(state
->offset_point
|| state
->offset_line
) |
562 S_028814_POLY_MODE(state
->fill_front
!= PIPE_POLYGON_MODE_FILL
||
563 state
->fill_back
!= PIPE_POLYGON_MODE_FILL
) |
564 S_028814_POLYMODE_FRONT_PTYPE(r600_translate_fill(state
->fill_front
)) |
565 S_028814_POLYMODE_BACK_PTYPE(r600_translate_fill(state
->fill_back
)));
569 static void *evergreen_create_sampler_state(struct pipe_context
*ctx
,
570 const struct pipe_sampler_state
*state
)
572 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)ctx
->screen
;
573 struct r600_pipe_sampler_state
*ss
= CALLOC_STRUCT(r600_pipe_sampler_state
);
574 unsigned max_aniso
= rscreen
->force_aniso
>= 0 ? rscreen
->force_aniso
575 : state
->max_anisotropy
;
576 unsigned max_aniso_ratio
= r600_tex_aniso_filter(max_aniso
);
577 bool trunc_coord
= state
->min_img_filter
== PIPE_TEX_FILTER_NEAREST
&&
578 state
->mag_img_filter
== PIPE_TEX_FILTER_NEAREST
;
579 float max_lod
= state
->max_lod
;
585 /* If the min_mip_filter is NONE, then the texture has no mipmapping and
586 * MIP_FILTER will also be set to NONE. However, if more then one LOD is
587 * configured, then the texture lookup seems to fail for some specific texture
588 * formats. Forcing the number of LODs to one in this case fixes it. */
589 if (state
->min_mip_filter
== PIPE_TEX_MIPFILTER_NONE
)
590 max_lod
= state
->min_lod
;
592 ss
->border_color_use
= sampler_state_needs_border_color(state
);
594 /* R_03C000_SQ_TEX_SAMPLER_WORD0_0 */
595 ss
->tex_sampler_words
[0] =
596 S_03C000_CLAMP_X(r600_tex_wrap(state
->wrap_s
)) |
597 S_03C000_CLAMP_Y(r600_tex_wrap(state
->wrap_t
)) |
598 S_03C000_CLAMP_Z(r600_tex_wrap(state
->wrap_r
)) |
599 S_03C000_XY_MAG_FILTER(eg_tex_filter(state
->mag_img_filter
, max_aniso
)) |
600 S_03C000_XY_MIN_FILTER(eg_tex_filter(state
->min_img_filter
, max_aniso
)) |
601 S_03C000_MIP_FILTER(r600_tex_mipfilter(state
->min_mip_filter
)) |
602 S_03C000_MAX_ANISO_RATIO(max_aniso_ratio
) |
603 S_03C000_DEPTH_COMPARE_FUNCTION(r600_tex_compare(state
->compare_func
)) |
604 S_03C000_BORDER_COLOR_TYPE(ss
->border_color_use
? V_03C000_SQ_TEX_BORDER_COLOR_REGISTER
: 0);
605 /* R_03C004_SQ_TEX_SAMPLER_WORD1_0 */
606 ss
->tex_sampler_words
[1] =
607 S_03C004_MIN_LOD(S_FIXED(CLAMP(state
->min_lod
, 0, 15), 8)) |
608 S_03C004_MAX_LOD(S_FIXED(CLAMP(max_lod
, 0, 15), 8));
609 /* R_03C008_SQ_TEX_SAMPLER_WORD2_0 */
610 ss
->tex_sampler_words
[2] =
611 S_03C008_LOD_BIAS(S_FIXED(CLAMP(state
->lod_bias
, -16, 16), 8)) |
612 (state
->seamless_cube_map
? 0 : S_03C008_DISABLE_CUBE_WRAP(1)) |
613 S_03C008_TRUNCATE_COORD(trunc_coord
) |
616 if (ss
->border_color_use
) {
617 memcpy(&ss
->border_color
, &state
->border_color
, sizeof(state
->border_color
));
622 struct eg_buf_res_params
{
623 enum pipe_format pipe_format
;
626 unsigned char swizzle
[4];
632 static void evergreen_fill_buffer_resource_words(struct r600_context
*rctx
,
633 struct pipe_resource
*buffer
,
634 struct eg_buf_res_params
*params
,
635 bool *skip_mip_address_reloc
,
636 unsigned tex_resource_words
[8])
638 struct r600_texture
*tmp
= (struct r600_texture
*)buffer
;
640 int stride
= util_format_get_blocksize(params
->pipe_format
);
641 unsigned format
, num_format
, format_comp
, endian
;
642 unsigned swizzle_res
;
643 const struct util_format_description
*desc
;
645 r600_vertex_data_type(params
->pipe_format
,
646 &format
, &num_format
, &format_comp
,
649 desc
= util_format_description(params
->pipe_format
);
651 if (params
->force_swizzle
)
652 swizzle_res
= r600_get_swizzle_combined(params
->swizzle
, NULL
, TRUE
);
654 swizzle_res
= r600_get_swizzle_combined(desc
->swizzle
, params
->swizzle
, TRUE
);
656 va
= tmp
->resource
.gpu_address
+ params
->offset
;
657 *skip_mip_address_reloc
= true;
658 tex_resource_words
[0] = va
;
659 tex_resource_words
[1] = params
->size
- 1;
660 tex_resource_words
[2] = S_030008_BASE_ADDRESS_HI(va
>> 32UL) |
661 S_030008_STRIDE(stride
) |
662 S_030008_DATA_FORMAT(format
) |
663 S_030008_NUM_FORMAT_ALL(num_format
) |
664 S_030008_FORMAT_COMP_ALL(format_comp
) |
665 S_030008_ENDIAN_SWAP(endian
);
666 tex_resource_words
[3] = swizzle_res
| S_03000C_UNCACHED(params
->uncached
);
668 * dword 4 is for number of elements, for use with resinfo,
669 * albeit the amd gpu shader analyser
670 * uses a const buffer to store the element sizes for buffer txq
672 tex_resource_words
[4] = params
->size_in_bytes
? params
->size
: (params
->size
/ stride
);
674 tex_resource_words
[5] = tex_resource_words
[6] = 0;
675 tex_resource_words
[7] = S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_BUFFER
);
678 static struct pipe_sampler_view
*
679 texture_buffer_sampler_view(struct r600_context
*rctx
,
680 struct r600_pipe_sampler_view
*view
,
681 unsigned width0
, unsigned height0
)
683 struct r600_texture
*tmp
= (struct r600_texture
*)view
->base
.texture
;
684 struct eg_buf_res_params params
;
686 memset(¶ms
, 0, sizeof(params
));
688 params
.pipe_format
= view
->base
.format
;
689 params
.offset
= view
->base
.u
.buf
.offset
;
690 params
.size
= view
->base
.u
.buf
.size
;
691 params
.swizzle
[0] = view
->base
.swizzle_r
;
692 params
.swizzle
[1] = view
->base
.swizzle_g
;
693 params
.swizzle
[2] = view
->base
.swizzle_b
;
694 params
.swizzle
[3] = view
->base
.swizzle_a
;
696 evergreen_fill_buffer_resource_words(rctx
, view
->base
.texture
,
697 ¶ms
, &view
->skip_mip_address_reloc
,
698 view
->tex_resource_words
);
699 view
->tex_resource
= &tmp
->resource
;
701 if (tmp
->resource
.gpu_address
)
702 list_addtail(&view
->list
, &rctx
->texture_buffers
);
706 struct eg_tex_res_params
{
707 enum pipe_format pipe_format
;
711 unsigned first_level
;
713 unsigned first_layer
;
716 unsigned char swizzle
[4];
719 static int evergreen_fill_tex_resource_words(struct r600_context
*rctx
,
720 struct pipe_resource
*texture
,
721 struct eg_tex_res_params
*params
,
722 bool *skip_mip_address_reloc
,
723 unsigned tex_resource_words
[8])
725 struct r600_screen
*rscreen
= (struct r600_screen
*)rctx
->b
.b
.screen
;
726 struct r600_texture
*tmp
= (struct r600_texture
*)texture
;
727 unsigned format
, endian
;
728 uint32_t word4
= 0, yuv_format
= 0, pitch
= 0;
729 unsigned char array_mode
= 0, non_disp_tiling
= 0;
730 unsigned height
, depth
, width
;
731 unsigned macro_aspect
, tile_split
, bankh
, bankw
, nbanks
, fmask_bankh
;
732 struct legacy_surf_level
*surflevel
;
733 unsigned base_level
, first_level
, last_level
;
734 unsigned dim
, last_layer
;
736 bool do_endian_swap
= FALSE
;
738 tile_split
= tmp
->surface
.u
.legacy
.tile_split
;
739 surflevel
= tmp
->surface
.u
.legacy
.level
;
741 /* Texturing with separate depth and stencil. */
742 if (tmp
->db_compatible
) {
743 switch (params
->pipe_format
) {
744 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
745 params
->pipe_format
= PIPE_FORMAT_Z32_FLOAT
;
747 case PIPE_FORMAT_X8Z24_UNORM
:
748 case PIPE_FORMAT_S8_UINT_Z24_UNORM
:
749 /* Z24 is always stored like this for DB
752 params
->pipe_format
= PIPE_FORMAT_Z24X8_UNORM
;
754 case PIPE_FORMAT_X24S8_UINT
:
755 case PIPE_FORMAT_S8X24_UINT
:
756 case PIPE_FORMAT_X32_S8X24_UINT
:
757 params
->pipe_format
= PIPE_FORMAT_S8_UINT
;
758 tile_split
= tmp
->surface
.u
.legacy
.stencil_tile_split
;
759 surflevel
= tmp
->surface
.u
.legacy
.stencil_level
;
766 do_endian_swap
= !tmp
->db_compatible
;
768 format
= r600_translate_texformat(rctx
->b
.b
.screen
, params
->pipe_format
,
770 &word4
, &yuv_format
, do_endian_swap
);
771 assert(format
!= ~0);
776 endian
= r600_colorformat_endian_swap(format
, do_endian_swap
);
779 first_level
= params
->first_level
;
780 last_level
= params
->last_level
;
781 width
= params
->width0
;
782 height
= params
->height0
;
783 depth
= texture
->depth0
;
785 if (params
->force_level
) {
786 base_level
= params
->force_level
;
789 width
= u_minify(width
, params
->force_level
);
790 height
= u_minify(height
, params
->force_level
);
791 depth
= u_minify(depth
, params
->force_level
);
794 pitch
= surflevel
[base_level
].nblk_x
* util_format_get_blockwidth(params
->pipe_format
);
795 non_disp_tiling
= tmp
->non_disp_tiling
;
797 switch (surflevel
[base_level
].mode
) {
799 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
800 array_mode
= V_028C70_ARRAY_LINEAR_ALIGNED
;
802 case RADEON_SURF_MODE_2D
:
803 array_mode
= V_028C70_ARRAY_2D_TILED_THIN1
;
805 case RADEON_SURF_MODE_1D
:
806 array_mode
= V_028C70_ARRAY_1D_TILED_THIN1
;
809 macro_aspect
= tmp
->surface
.u
.legacy
.mtilea
;
810 bankw
= tmp
->surface
.u
.legacy
.bankw
;
811 bankh
= tmp
->surface
.u
.legacy
.bankh
;
812 tile_split
= eg_tile_split(tile_split
);
813 macro_aspect
= eg_macro_tile_aspect(macro_aspect
);
814 bankw
= eg_bank_wh(bankw
);
815 bankh
= eg_bank_wh(bankh
);
816 fmask_bankh
= eg_bank_wh(tmp
->fmask
.bank_height
);
818 /* 128 bit formats require tile type = 1 */
819 if (rscreen
->b
.chip_class
== CAYMAN
) {
820 if (util_format_get_blocksize(params
->pipe_format
) >= 16)
823 nbanks
= eg_num_banks(rscreen
->b
.info
.r600_num_banks
);
826 va
= tmp
->resource
.gpu_address
;
828 /* array type views and views into array types need to use layer offset */
829 dim
= r600_tex_dim(tmp
, params
->target
, texture
->nr_samples
);
831 if (dim
== V_030000_SQ_TEX_DIM_1D_ARRAY
) {
833 depth
= texture
->array_size
;
834 } else if (dim
== V_030000_SQ_TEX_DIM_2D_ARRAY
||
835 dim
== V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA
) {
836 depth
= texture
->array_size
;
837 } else if (dim
== V_030000_SQ_TEX_DIM_CUBEMAP
)
838 depth
= texture
->array_size
/ 6;
840 tex_resource_words
[0] = (S_030000_DIM(dim
) |
841 S_030000_PITCH((pitch
/ 8) - 1) |
842 S_030000_TEX_WIDTH(width
- 1));
843 if (rscreen
->b
.chip_class
== CAYMAN
)
844 tex_resource_words
[0] |= CM_S_030000_NON_DISP_TILING_ORDER(non_disp_tiling
);
846 tex_resource_words
[0] |= S_030000_NON_DISP_TILING_ORDER(non_disp_tiling
);
847 tex_resource_words
[1] = (S_030004_TEX_HEIGHT(height
- 1) |
848 S_030004_TEX_DEPTH(depth
- 1) |
849 S_030004_ARRAY_MODE(array_mode
));
850 tex_resource_words
[2] = (surflevel
[base_level
].offset
+ va
) >> 8;
852 *skip_mip_address_reloc
= false;
853 /* TEX_RESOURCE_WORD3.MIP_ADDRESS */
854 if (texture
->nr_samples
> 1 && rscreen
->has_compressed_msaa_texturing
) {
856 /* disable FMASK (0 = disabled) */
857 tex_resource_words
[3] = 0;
858 *skip_mip_address_reloc
= true;
860 /* FMASK should be in MIP_ADDRESS for multisample textures */
861 tex_resource_words
[3] = (tmp
->fmask
.offset
+ va
) >> 8;
863 } else if (last_level
&& texture
->nr_samples
<= 1) {
864 tex_resource_words
[3] = (surflevel
[1].offset
+ va
) >> 8;
866 tex_resource_words
[3] = (surflevel
[base_level
].offset
+ va
) >> 8;
869 last_layer
= params
->last_layer
;
870 if (params
->target
!= texture
->target
&& depth
== 1) {
871 last_layer
= params
->first_layer
;
873 tex_resource_words
[4] = (word4
|
874 S_030010_ENDIAN_SWAP(endian
));
875 tex_resource_words
[5] = S_030014_BASE_ARRAY(params
->first_layer
) |
876 S_030014_LAST_ARRAY(last_layer
);
877 tex_resource_words
[6] = S_030018_TILE_SPLIT(tile_split
);
879 if (texture
->nr_samples
> 1) {
880 unsigned log_samples
= util_logbase2(texture
->nr_samples
);
881 if (rscreen
->b
.chip_class
== CAYMAN
) {
882 tex_resource_words
[4] |= S_030010_LOG2_NUM_FRAGMENTS(log_samples
);
884 /* LAST_LEVEL holds log2(nr_samples) for multisample textures */
885 tex_resource_words
[5] |= S_030014_LAST_LEVEL(log_samples
);
886 tex_resource_words
[6] |= S_030018_FMASK_BANK_HEIGHT(fmask_bankh
);
888 bool no_mip
= first_level
== last_level
;
890 tex_resource_words
[4] |= S_030010_BASE_LEVEL(first_level
);
891 tex_resource_words
[5] |= S_030014_LAST_LEVEL(last_level
);
892 /* aniso max 16 samples */
893 tex_resource_words
[6] |= S_030018_MAX_ANISO_RATIO(no_mip
? 0 : 4);
896 tex_resource_words
[7] = S_03001C_DATA_FORMAT(format
) |
897 S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_TEXTURE
) |
898 S_03001C_BANK_WIDTH(bankw
) |
899 S_03001C_BANK_HEIGHT(bankh
) |
900 S_03001C_MACRO_TILE_ASPECT(macro_aspect
) |
901 S_03001C_NUM_BANKS(nbanks
) |
902 S_03001C_DEPTH_SAMPLE_ORDER(tmp
->db_compatible
);
906 struct pipe_sampler_view
*
907 evergreen_create_sampler_view_custom(struct pipe_context
*ctx
,
908 struct pipe_resource
*texture
,
909 const struct pipe_sampler_view
*state
,
910 unsigned width0
, unsigned height0
,
911 unsigned force_level
)
913 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
914 struct r600_pipe_sampler_view
*view
= CALLOC_STRUCT(r600_pipe_sampler_view
);
915 struct r600_texture
*tmp
= (struct r600_texture
*)texture
;
916 struct eg_tex_res_params params
;
922 /* initialize base object */
924 view
->base
.texture
= NULL
;
925 pipe_reference(NULL
, &texture
->reference
);
926 view
->base
.texture
= texture
;
927 view
->base
.reference
.count
= 1;
928 view
->base
.context
= ctx
;
930 if (state
->target
== PIPE_BUFFER
)
931 return texture_buffer_sampler_view(rctx
, view
, width0
, height0
);
933 memset(¶ms
, 0, sizeof(params
));
934 params
.pipe_format
= state
->format
;
935 params
.force_level
= force_level
;
936 params
.width0
= width0
;
937 params
.height0
= height0
;
938 params
.first_level
= state
->u
.tex
.first_level
;
939 params
.last_level
= state
->u
.tex
.last_level
;
940 params
.first_layer
= state
->u
.tex
.first_layer
;
941 params
.last_layer
= state
->u
.tex
.last_layer
;
942 params
.target
= state
->target
;
943 params
.swizzle
[0] = state
->swizzle_r
;
944 params
.swizzle
[1] = state
->swizzle_g
;
945 params
.swizzle
[2] = state
->swizzle_b
;
946 params
.swizzle
[3] = state
->swizzle_a
;
948 ret
= evergreen_fill_tex_resource_words(rctx
, texture
, ¶ms
,
949 &view
->skip_mip_address_reloc
,
950 view
->tex_resource_words
);
956 if (state
->format
== PIPE_FORMAT_X24S8_UINT
||
957 state
->format
== PIPE_FORMAT_S8X24_UINT
||
958 state
->format
== PIPE_FORMAT_X32_S8X24_UINT
||
959 state
->format
== PIPE_FORMAT_S8_UINT
)
960 view
->is_stencil_sampler
= true;
962 view
->tex_resource
= &tmp
->resource
;
967 static struct pipe_sampler_view
*
968 evergreen_create_sampler_view(struct pipe_context
*ctx
,
969 struct pipe_resource
*tex
,
970 const struct pipe_sampler_view
*state
)
972 return evergreen_create_sampler_view_custom(ctx
, tex
, state
,
973 tex
->width0
, tex
->height0
, 0);
976 static void evergreen_emit_config_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
978 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
979 struct r600_config_state
*a
= (struct r600_config_state
*)atom
;
981 radeon_set_config_reg_seq(cs
, R_008C04_SQ_GPR_RESOURCE_MGMT_1
, 3);
982 if (a
->dyn_gpr_enabled
) {
983 radeon_emit(cs
, S_008C04_NUM_CLAUSE_TEMP_GPRS(rctx
->r6xx_num_clause_temp_gprs
));
987 radeon_emit(cs
, a
->sq_gpr_resource_mgmt_1
);
988 radeon_emit(cs
, a
->sq_gpr_resource_mgmt_2
);
989 radeon_emit(cs
, a
->sq_gpr_resource_mgmt_3
);
991 radeon_set_config_reg(cs
, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
, (a
->dyn_gpr_enabled
<< 8));
992 if (a
->dyn_gpr_enabled
) {
993 radeon_set_context_reg(cs
, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1
,
994 S_028838_PS_GPRS(0x1e) |
995 S_028838_VS_GPRS(0x1e) |
996 S_028838_GS_GPRS(0x1e) |
997 S_028838_ES_GPRS(0x1e) |
998 S_028838_HS_GPRS(0x1e) |
999 S_028838_LS_GPRS(0x1e)); /* workaround for hw issues with dyn gpr - must set all limits to 240 instead of 0, 0x1e == 240 / 8*/
1003 static void evergreen_emit_clip_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1005 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
1006 struct pipe_clip_state
*state
= &rctx
->clip_state
.state
;
1008 radeon_set_context_reg_seq(cs
, R_0285BC_PA_CL_UCP0_X
, 6*4);
1009 radeon_emit_array(cs
, (unsigned*)state
, 6*4);
1012 static void evergreen_set_polygon_stipple(struct pipe_context
*ctx
,
1013 const struct pipe_poly_stipple
*state
)
1017 static void evergreen_get_scissor_rect(struct r600_context
*rctx
,
1018 unsigned tl_x
, unsigned tl_y
, unsigned br_x
, unsigned br_y
,
1019 uint32_t *tl
, uint32_t *br
)
1021 struct pipe_scissor_state scissor
= {tl_x
, tl_y
, br_x
, br_y
};
1023 evergreen_apply_scissor_bug_workaround(&rctx
->b
, &scissor
);
1025 *tl
= S_028240_TL_X(scissor
.minx
) | S_028240_TL_Y(scissor
.miny
);
1026 *br
= S_028244_BR_X(scissor
.maxx
) | S_028244_BR_Y(scissor
.maxy
);
1029 struct r600_tex_color_info
{
1038 unsigned fmask_slice
;
1040 boolean export_16bpc
;
1043 static void evergreen_set_color_surface_buffer(struct r600_context
*rctx
,
1044 struct r600_resource
*res
,
1045 enum pipe_format pformat
,
1046 unsigned first_element
,
1047 unsigned last_element
,
1048 struct r600_tex_color_info
*color
)
1050 unsigned format
, swap
, ntype
, endian
;
1051 const struct util_format_description
*desc
;
1052 unsigned block_size
= util_format_get_blocksize(res
->b
.b
.format
);
1053 unsigned pitch_alignment
=
1054 MAX2(64, rctx
->screen
->b
.info
.pipe_interleave_bytes
/ block_size
);
1055 unsigned pitch
= align(res
->b
.b
.width0
, pitch_alignment
);
1057 unsigned width_elements
;
1059 width_elements
= last_element
- first_element
+ 1;
1061 format
= r600_translate_colorformat(rctx
->b
.chip_class
, pformat
, FALSE
);
1062 swap
= r600_translate_colorswap(pformat
, FALSE
);
1064 endian
= r600_colorformat_endian_swap(format
, FALSE
);
1066 desc
= util_format_description(pformat
);
1067 for (i
= 0; i
< 4; i
++) {
1068 if (desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
) {
1072 ntype
= V_028C70_NUMBER_UNORM
;
1073 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
)
1074 ntype
= V_028C70_NUMBER_SRGB
;
1075 else if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_SIGNED
) {
1076 if (desc
->channel
[i
].normalized
)
1077 ntype
= V_028C70_NUMBER_SNORM
;
1078 else if (desc
->channel
[i
].pure_integer
)
1079 ntype
= V_028C70_NUMBER_SINT
;
1080 } else if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_UNSIGNED
) {
1081 if (desc
->channel
[i
].normalized
)
1082 ntype
= V_028C70_NUMBER_UNORM
;
1083 else if (desc
->channel
[i
].pure_integer
)
1084 ntype
= V_028C70_NUMBER_UINT
;
1085 } else if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_FLOAT
) {
1086 ntype
= V_028C70_NUMBER_FLOAT
;
1089 pitch
= (pitch
/ 8) - 1;
1090 color
->pitch
= S_028C64_PITCH_TILE_MAX(pitch
);
1092 color
->info
= S_028C70_ARRAY_MODE(V_028C70_ARRAY_LINEAR_ALIGNED
);
1093 color
->info
|= S_028C70_FORMAT(format
) |
1094 S_028C70_COMP_SWAP(swap
) |
1095 S_028C70_BLEND_CLAMP(0) |
1096 S_028C70_BLEND_BYPASS(1) |
1097 S_028C70_NUMBER_TYPE(ntype
) |
1098 S_028C70_ENDIAN(endian
);
1099 color
->attrib
= S_028C74_NON_DISP_TILING_ORDER(1);
1100 color
->ntype
= ntype
;
1101 color
->export_16bpc
= false;
1102 color
->dim
= width_elements
- 1;
1103 color
->slice
= 0; /* (width_elements / 64) - 1;*/
1105 color
->offset
= (res
->gpu_address
+ first_element
) >> 8;
1107 color
->fmask
= color
->offset
;
1108 color
->fmask_slice
= 0;
1111 static void evergreen_set_color_surface_common(struct r600_context
*rctx
,
1112 struct r600_texture
*rtex
,
1114 unsigned first_layer
,
1115 unsigned last_layer
,
1116 enum pipe_format pformat
,
1117 struct r600_tex_color_info
*color
)
1119 struct r600_screen
*rscreen
= rctx
->screen
;
1120 unsigned pitch
, slice
;
1121 unsigned non_disp_tiling
, macro_aspect
, tile_split
, bankh
, bankw
, fmask_bankh
, nbanks
;
1122 unsigned format
, swap
, ntype
, endian
;
1123 const struct util_format_description
*desc
;
1124 bool blend_clamp
= 0, blend_bypass
= 0, do_endian_swap
= FALSE
;
1127 color
->offset
= rtex
->surface
.u
.legacy
.level
[level
].offset
;
1128 color
->view
= S_028C6C_SLICE_START(first_layer
) |
1129 S_028C6C_SLICE_MAX(last_layer
);
1131 color
->offset
+= rtex
->resource
.gpu_address
;
1132 color
->offset
>>= 8;
1135 pitch
= (rtex
->surface
.u
.legacy
.level
[level
].nblk_x
) / 8 - 1;
1136 slice
= (rtex
->surface
.u
.legacy
.level
[level
].nblk_x
* rtex
->surface
.u
.legacy
.level
[level
].nblk_y
) / 64;
1142 switch (rtex
->surface
.u
.legacy
.level
[level
].mode
) {
1144 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
1145 color
->info
= S_028C70_ARRAY_MODE(V_028C70_ARRAY_LINEAR_ALIGNED
);
1146 non_disp_tiling
= 1;
1148 case RADEON_SURF_MODE_1D
:
1149 color
->info
= S_028C70_ARRAY_MODE(V_028C70_ARRAY_1D_TILED_THIN1
);
1150 non_disp_tiling
= rtex
->non_disp_tiling
;
1152 case RADEON_SURF_MODE_2D
:
1153 color
->info
= S_028C70_ARRAY_MODE(V_028C70_ARRAY_2D_TILED_THIN1
);
1154 non_disp_tiling
= rtex
->non_disp_tiling
;
1157 tile_split
= rtex
->surface
.u
.legacy
.tile_split
;
1158 macro_aspect
= rtex
->surface
.u
.legacy
.mtilea
;
1159 bankw
= rtex
->surface
.u
.legacy
.bankw
;
1160 bankh
= rtex
->surface
.u
.legacy
.bankh
;
1161 if (rtex
->fmask
.size
)
1162 fmask_bankh
= rtex
->fmask
.bank_height
;
1164 fmask_bankh
= rtex
->surface
.u
.legacy
.bankh
;
1165 tile_split
= eg_tile_split(tile_split
);
1166 macro_aspect
= eg_macro_tile_aspect(macro_aspect
);
1167 bankw
= eg_bank_wh(bankw
);
1168 bankh
= eg_bank_wh(bankh
);
1169 fmask_bankh
= eg_bank_wh(fmask_bankh
);
1171 if (rscreen
->b
.chip_class
== CAYMAN
) {
1172 if (util_format_get_blocksize(pformat
) >= 16)
1173 non_disp_tiling
= 1;
1175 nbanks
= eg_num_banks(rscreen
->b
.info
.r600_num_banks
);
1176 desc
= util_format_description(pformat
);
1177 for (i
= 0; i
< 4; i
++) {
1178 if (desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
) {
1182 color
->attrib
= S_028C74_TILE_SPLIT(tile_split
)|
1183 S_028C74_NUM_BANKS(nbanks
) |
1184 S_028C74_BANK_WIDTH(bankw
) |
1185 S_028C74_BANK_HEIGHT(bankh
) |
1186 S_028C74_MACRO_TILE_ASPECT(macro_aspect
) |
1187 S_028C74_NON_DISP_TILING_ORDER(non_disp_tiling
) |
1188 S_028C74_FMASK_BANK_HEIGHT(fmask_bankh
);
1190 if (rctx
->b
.chip_class
== CAYMAN
) {
1191 color
->attrib
|= S_028C74_FORCE_DST_ALPHA_1(desc
->swizzle
[3] ==
1194 if (rtex
->resource
.b
.b
.nr_samples
> 1) {
1195 unsigned log_samples
= util_logbase2(rtex
->resource
.b
.b
.nr_samples
);
1196 color
->attrib
|= S_028C74_NUM_SAMPLES(log_samples
) |
1197 S_028C74_NUM_FRAGMENTS(log_samples
);
1201 ntype
= V_028C70_NUMBER_UNORM
;
1202 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
)
1203 ntype
= V_028C70_NUMBER_SRGB
;
1204 else if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_SIGNED
) {
1205 if (desc
->channel
[i
].normalized
)
1206 ntype
= V_028C70_NUMBER_SNORM
;
1207 else if (desc
->channel
[i
].pure_integer
)
1208 ntype
= V_028C70_NUMBER_SINT
;
1209 } else if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_UNSIGNED
) {
1210 if (desc
->channel
[i
].normalized
)
1211 ntype
= V_028C70_NUMBER_UNORM
;
1212 else if (desc
->channel
[i
].pure_integer
)
1213 ntype
= V_028C70_NUMBER_UINT
;
1214 } else if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_FLOAT
) {
1215 ntype
= V_028C70_NUMBER_FLOAT
;
1218 if (R600_BIG_ENDIAN
)
1219 do_endian_swap
= !rtex
->db_compatible
;
1221 format
= r600_translate_colorformat(rctx
->b
.chip_class
, pformat
, do_endian_swap
);
1222 assert(format
!= ~0);
1223 swap
= r600_translate_colorswap(pformat
, do_endian_swap
);
1226 endian
= r600_colorformat_endian_swap(format
, do_endian_swap
);
1228 /* blend clamp should be set for all NORM/SRGB types */
1229 if (ntype
== V_028C70_NUMBER_UNORM
|| ntype
== V_028C70_NUMBER_SNORM
||
1230 ntype
== V_028C70_NUMBER_SRGB
)
1233 /* set blend bypass according to docs if SINT/UINT or
1234 8/24 COLOR variants */
1235 if (ntype
== V_028C70_NUMBER_UINT
|| ntype
== V_028C70_NUMBER_SINT
||
1236 format
== V_028C70_COLOR_8_24
|| format
== V_028C70_COLOR_24_8
||
1237 format
== V_028C70_COLOR_X24_8_32_FLOAT
) {
1242 color
->ntype
= ntype
;
1243 color
->info
|= S_028C70_FORMAT(format
) |
1244 S_028C70_COMP_SWAP(swap
) |
1245 S_028C70_BLEND_CLAMP(blend_clamp
) |
1246 S_028C70_BLEND_BYPASS(blend_bypass
) |
1247 S_028C70_SIMPLE_FLOAT(1) |
1248 S_028C70_NUMBER_TYPE(ntype
) |
1249 S_028C70_ENDIAN(endian
);
1251 if (rtex
->fmask
.size
) {
1252 color
->info
|= S_028C70_COMPRESSION(1);
1255 /* EXPORT_NORM is an optimzation that can be enabled for better
1256 * performance in certain cases.
1257 * EXPORT_NORM can be enabled if:
1258 * - 11-bit or smaller UNORM/SNORM/SRGB
1259 * - 16-bit or smaller FLOAT
1261 color
->export_16bpc
= false;
1262 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_ZS
&&
1263 ((desc
->channel
[i
].size
< 12 &&
1264 desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_FLOAT
&&
1265 ntype
!= V_028C70_NUMBER_UINT
&& ntype
!= V_028C70_NUMBER_SINT
) ||
1266 (desc
->channel
[i
].size
< 17 &&
1267 desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_FLOAT
))) {
1268 color
->info
|= S_028C70_SOURCE_FORMAT(V_028C70_EXPORT_4C_16BPC
);
1269 color
->export_16bpc
= true;
1272 color
->pitch
= S_028C64_PITCH_TILE_MAX(pitch
);
1273 color
->slice
= S_028C68_SLICE_TILE_MAX(slice
);
1275 if (rtex
->fmask
.size
) {
1276 color
->fmask
= (rtex
->resource
.gpu_address
+ rtex
->fmask
.offset
) >> 8;
1277 color
->fmask_slice
= S_028C88_TILE_MAX(rtex
->fmask
.slice_tile_max
);
1279 color
->fmask
= color
->offset
;
1280 color
->fmask_slice
= S_028C88_TILE_MAX(slice
);
1285 * This function intializes the CB* register values for RATs. It is meant
1286 * to be used for 1D aligned buffers that do not have an associated
1289 void evergreen_init_color_surface_rat(struct r600_context
*rctx
,
1290 struct r600_surface
*surf
)
1292 struct pipe_resource
*pipe_buffer
= surf
->base
.texture
;
1293 struct r600_tex_color_info color
;
1295 evergreen_set_color_surface_buffer(rctx
, (struct r600_resource
*)surf
->base
.texture
,
1296 surf
->base
.format
, 0, pipe_buffer
->width0
,
1299 surf
->cb_color_base
= color
.offset
;
1300 surf
->cb_color_dim
= color
.dim
;
1301 surf
->cb_color_info
= color
.info
| S_028C70_RAT(1);
1302 surf
->cb_color_pitch
= color
.pitch
;
1303 surf
->cb_color_slice
= color
.slice
;
1304 surf
->cb_color_view
= color
.view
;
1305 surf
->cb_color_attrib
= color
.attrib
;
1306 surf
->cb_color_fmask
= color
.fmask
;
1307 surf
->cb_color_fmask_slice
= color
.fmask_slice
;
1309 surf
->cb_color_view
= 0;
1311 /* Set the buffer range the GPU will have access to: */
1312 util_range_add(pipe_buffer
, &r600_resource(pipe_buffer
)->valid_buffer_range
,
1313 0, pipe_buffer
->width0
);
1317 void evergreen_init_color_surface(struct r600_context
*rctx
,
1318 struct r600_surface
*surf
)
1320 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->base
.texture
;
1321 unsigned level
= surf
->base
.u
.tex
.level
;
1322 struct r600_tex_color_info color
;
1324 evergreen_set_color_surface_common(rctx
, rtex
, level
,
1325 surf
->base
.u
.tex
.first_layer
,
1326 surf
->base
.u
.tex
.last_layer
,
1330 surf
->alphatest_bypass
= color
.ntype
== V_028C70_NUMBER_UINT
||
1331 color
.ntype
== V_028C70_NUMBER_SINT
;
1332 surf
->export_16bpc
= color
.export_16bpc
;
1334 /* XXX handle enabling of CB beyond BASE8 which has different offset */
1335 surf
->cb_color_base
= color
.offset
;
1336 surf
->cb_color_dim
= color
.dim
;
1337 surf
->cb_color_info
= color
.info
;
1338 surf
->cb_color_pitch
= color
.pitch
;
1339 surf
->cb_color_slice
= color
.slice
;
1340 surf
->cb_color_view
= color
.view
;
1341 surf
->cb_color_attrib
= color
.attrib
;
1342 surf
->cb_color_fmask
= color
.fmask
;
1343 surf
->cb_color_fmask_slice
= color
.fmask_slice
;
1345 surf
->color_initialized
= true;
1348 static void evergreen_init_depth_surface(struct r600_context
*rctx
,
1349 struct r600_surface
*surf
)
1351 struct r600_screen
*rscreen
= rctx
->screen
;
1352 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->base
.texture
;
1353 unsigned level
= surf
->base
.u
.tex
.level
;
1354 struct legacy_surf_level
*levelinfo
= &rtex
->surface
.u
.legacy
.level
[level
];
1356 unsigned format
, array_mode
;
1357 unsigned macro_aspect
, tile_split
, bankh
, bankw
, nbanks
;
1360 format
= r600_translate_dbformat(surf
->base
.format
);
1361 assert(format
!= ~0);
1363 offset
= rtex
->resource
.gpu_address
;
1364 offset
+= rtex
->surface
.u
.legacy
.level
[level
].offset
;
1366 switch (rtex
->surface
.u
.legacy
.level
[level
].mode
) {
1367 case RADEON_SURF_MODE_2D
:
1368 array_mode
= V_028C70_ARRAY_2D_TILED_THIN1
;
1370 case RADEON_SURF_MODE_1D
:
1371 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
1373 array_mode
= V_028C70_ARRAY_1D_TILED_THIN1
;
1376 tile_split
= rtex
->surface
.u
.legacy
.tile_split
;
1377 macro_aspect
= rtex
->surface
.u
.legacy
.mtilea
;
1378 bankw
= rtex
->surface
.u
.legacy
.bankw
;
1379 bankh
= rtex
->surface
.u
.legacy
.bankh
;
1380 tile_split
= eg_tile_split(tile_split
);
1381 macro_aspect
= eg_macro_tile_aspect(macro_aspect
);
1382 bankw
= eg_bank_wh(bankw
);
1383 bankh
= eg_bank_wh(bankh
);
1384 nbanks
= eg_num_banks(rscreen
->b
.info
.r600_num_banks
);
1387 surf
->db_z_info
= S_028040_ARRAY_MODE(array_mode
) |
1388 S_028040_FORMAT(format
) |
1389 S_028040_TILE_SPLIT(tile_split
)|
1390 S_028040_NUM_BANKS(nbanks
) |
1391 S_028040_BANK_WIDTH(bankw
) |
1392 S_028040_BANK_HEIGHT(bankh
) |
1393 S_028040_MACRO_TILE_ASPECT(macro_aspect
);
1394 if (rscreen
->b
.chip_class
== CAYMAN
&& rtex
->resource
.b
.b
.nr_samples
> 1) {
1395 surf
->db_z_info
|= S_028040_NUM_SAMPLES(util_logbase2(rtex
->resource
.b
.b
.nr_samples
));
1398 assert(levelinfo
->nblk_x
% 8 == 0 && levelinfo
->nblk_y
% 8 == 0);
1400 surf
->db_depth_base
= offset
;
1401 surf
->db_depth_view
= S_028008_SLICE_START(surf
->base
.u
.tex
.first_layer
) |
1402 S_028008_SLICE_MAX(surf
->base
.u
.tex
.last_layer
);
1403 surf
->db_depth_size
= S_028058_PITCH_TILE_MAX(levelinfo
->nblk_x
/ 8 - 1) |
1404 S_028058_HEIGHT_TILE_MAX(levelinfo
->nblk_y
/ 8 - 1);
1405 surf
->db_depth_slice
= S_02805C_SLICE_TILE_MAX(levelinfo
->nblk_x
*
1406 levelinfo
->nblk_y
/ 64 - 1);
1408 if (rtex
->surface
.has_stencil
) {
1409 uint64_t stencil_offset
;
1410 unsigned stile_split
= rtex
->surface
.u
.legacy
.stencil_tile_split
;
1412 stile_split
= eg_tile_split(stile_split
);
1414 stencil_offset
= rtex
->surface
.u
.legacy
.stencil_level
[level
].offset
;
1415 stencil_offset
+= rtex
->resource
.gpu_address
;
1417 surf
->db_stencil_base
= stencil_offset
>> 8;
1418 surf
->db_stencil_info
= S_028044_FORMAT(V_028044_STENCIL_8
) |
1419 S_028044_TILE_SPLIT(stile_split
);
1421 surf
->db_stencil_base
= offset
;
1422 /* DRM 2.6.18 allows the INVALID format to disable stencil.
1423 * Older kernels are out of luck. */
1424 surf
->db_stencil_info
= rctx
->screen
->b
.info
.drm_minor
>= 18 ?
1425 S_028044_FORMAT(V_028044_STENCIL_INVALID
) :
1426 S_028044_FORMAT(V_028044_STENCIL_8
);
1429 if (r600_htile_enabled(rtex
, level
)) {
1430 uint64_t va
= rtex
->resource
.gpu_address
+ rtex
->htile_offset
;
1431 surf
->db_htile_data_base
= va
>> 8;
1432 surf
->db_htile_surface
= S_028ABC_HTILE_WIDTH(1) |
1433 S_028ABC_HTILE_HEIGHT(1) |
1434 S_028ABC_FULL_CACHE(1);
1435 surf
->db_z_info
|= S_028040_TILE_SURFACE_ENABLE(1);
1436 surf
->db_preload_control
= 0;
1439 surf
->depth_initialized
= true;
1442 static void evergreen_set_framebuffer_state(struct pipe_context
*ctx
,
1443 const struct pipe_framebuffer_state
*state
)
1445 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1446 struct r600_surface
*surf
;
1447 struct r600_texture
*rtex
;
1448 uint32_t i
, log_samples
;
1449 uint32_t target_mask
= 0;
1450 /* Flush TC when changing the framebuffer state, because the only
1451 * client not using TC that can change textures is the framebuffer.
1452 * Other places don't typically have to flush TC.
1454 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
|
1455 R600_CONTEXT_FLUSH_AND_INV
|
1456 R600_CONTEXT_FLUSH_AND_INV_CB
|
1457 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
1458 R600_CONTEXT_FLUSH_AND_INV_DB
|
1459 R600_CONTEXT_FLUSH_AND_INV_DB_META
|
1460 R600_CONTEXT_INV_TEX_CACHE
;
1462 util_copy_framebuffer_state(&rctx
->framebuffer
.state
, state
);
1465 rctx
->framebuffer
.export_16bpc
= state
->nr_cbufs
!= 0;
1466 rctx
->framebuffer
.cb0_is_integer
= state
->nr_cbufs
&& state
->cbufs
[0] &&
1467 util_format_is_pure_integer(state
->cbufs
[0]->format
);
1468 rctx
->framebuffer
.compressed_cb_mask
= 0;
1469 rctx
->framebuffer
.nr_samples
= util_framebuffer_get_num_samples(state
);
1471 for (i
= 0; i
< state
->nr_cbufs
; i
++) {
1472 surf
= (struct r600_surface
*)state
->cbufs
[i
];
1476 target_mask
|= (0xf << (i
* 4));
1478 rtex
= (struct r600_texture
*)surf
->base
.texture
;
1480 r600_context_add_resource_size(ctx
, state
->cbufs
[i
]->texture
);
1482 if (!surf
->color_initialized
) {
1483 evergreen_init_color_surface(rctx
, surf
);
1486 if (!surf
->export_16bpc
) {
1487 rctx
->framebuffer
.export_16bpc
= false;
1490 if (rtex
->fmask
.size
) {
1491 rctx
->framebuffer
.compressed_cb_mask
|= 1 << i
;
1495 /* Update alpha-test state dependencies.
1496 * Alpha-test is done on the first colorbuffer only. */
1497 if (state
->nr_cbufs
) {
1498 bool alphatest_bypass
= false;
1499 bool export_16bpc
= true;
1501 surf
= (struct r600_surface
*)state
->cbufs
[0];
1503 alphatest_bypass
= surf
->alphatest_bypass
;
1504 export_16bpc
= surf
->export_16bpc
;
1507 if (rctx
->alphatest_state
.bypass
!= alphatest_bypass
) {
1508 rctx
->alphatest_state
.bypass
= alphatest_bypass
;
1509 r600_mark_atom_dirty(rctx
, &rctx
->alphatest_state
.atom
);
1511 if (rctx
->alphatest_state
.cb0_export_16bpc
!= export_16bpc
) {
1512 rctx
->alphatest_state
.cb0_export_16bpc
= export_16bpc
;
1513 r600_mark_atom_dirty(rctx
, &rctx
->alphatest_state
.atom
);
1519 surf
= (struct r600_surface
*)state
->zsbuf
;
1521 r600_context_add_resource_size(ctx
, state
->zsbuf
->texture
);
1523 if (!surf
->depth_initialized
) {
1524 evergreen_init_depth_surface(rctx
, surf
);
1527 if (state
->zsbuf
->format
!= rctx
->poly_offset_state
.zs_format
) {
1528 rctx
->poly_offset_state
.zs_format
= state
->zsbuf
->format
;
1529 r600_mark_atom_dirty(rctx
, &rctx
->poly_offset_state
.atom
);
1532 if (rctx
->db_state
.rsurf
!= surf
) {
1533 rctx
->db_state
.rsurf
= surf
;
1534 r600_mark_atom_dirty(rctx
, &rctx
->db_state
.atom
);
1535 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
1537 } else if (rctx
->db_state
.rsurf
) {
1538 rctx
->db_state
.rsurf
= NULL
;
1539 r600_mark_atom_dirty(rctx
, &rctx
->db_state
.atom
);
1540 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
1543 if (rctx
->cb_misc_state
.nr_cbufs
!= state
->nr_cbufs
||
1544 rctx
->cb_misc_state
.bound_cbufs_target_mask
!= target_mask
) {
1545 rctx
->cb_misc_state
.bound_cbufs_target_mask
= target_mask
;
1546 rctx
->cb_misc_state
.nr_cbufs
= state
->nr_cbufs
;
1547 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
1550 if (state
->nr_cbufs
== 0 && rctx
->alphatest_state
.bypass
) {
1551 rctx
->alphatest_state
.bypass
= false;
1552 r600_mark_atom_dirty(rctx
, &rctx
->alphatest_state
.atom
);
1555 log_samples
= util_logbase2(rctx
->framebuffer
.nr_samples
);
1556 /* This is for Cayman to program SAMPLE_RATE, and for RV770 to fix a hw bug. */
1557 if ((rctx
->b
.chip_class
== CAYMAN
||
1558 rctx
->b
.family
== CHIP_RV770
) &&
1559 rctx
->db_misc_state
.log_samples
!= log_samples
) {
1560 rctx
->db_misc_state
.log_samples
= log_samples
;
1561 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
1565 /* Calculate the CS size. */
1566 rctx
->framebuffer
.atom
.num_dw
= 4; /* SCISSOR */
1569 if (rctx
->b
.chip_class
== EVERGREEN
)
1570 rctx
->framebuffer
.atom
.num_dw
+= 17; /* Evergreen */
1572 rctx
->framebuffer
.atom
.num_dw
+= 28; /* Cayman */
1575 rctx
->framebuffer
.atom
.num_dw
+= state
->nr_cbufs
* 23;
1576 rctx
->framebuffer
.atom
.num_dw
+= state
->nr_cbufs
* 2;
1577 rctx
->framebuffer
.atom
.num_dw
+= (12 - state
->nr_cbufs
) * 3;
1581 rctx
->framebuffer
.atom
.num_dw
+= 24;
1582 rctx
->framebuffer
.atom
.num_dw
+= 2;
1583 } else if (rctx
->screen
->b
.info
.drm_minor
>= 18) {
1584 rctx
->framebuffer
.atom
.num_dw
+= 4;
1587 r600_mark_atom_dirty(rctx
, &rctx
->framebuffer
.atom
);
1589 r600_set_sample_locations_constant_buffer(rctx
);
1590 rctx
->framebuffer
.do_update_surf_dirtiness
= true;
1593 static void evergreen_set_min_samples(struct pipe_context
*ctx
, unsigned min_samples
)
1595 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1597 if (rctx
->ps_iter_samples
== min_samples
)
1600 rctx
->ps_iter_samples
= min_samples
;
1601 if (rctx
->framebuffer
.nr_samples
> 1) {
1602 r600_mark_atom_dirty(rctx
, &rctx
->framebuffer
.atom
);
1607 static const uint32_t sample_locs_8x
[] = {
1608 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3),
1609 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7),
1610 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3),
1611 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7),
1612 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3),
1613 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7),
1614 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3),
1615 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7),
1617 static unsigned max_dist_8x
= 7;
1619 static void evergreen_get_sample_position(struct pipe_context
*ctx
,
1620 unsigned sample_count
,
1621 unsigned sample_index
,
1628 switch (sample_count
) {
1631 out_value
[0] = out_value
[1] = 0.5;
1634 offset
= 4 * (sample_index
* 2);
1635 val
.idx
= (eg_sample_locs_2x
[0] >> offset
) & 0xf;
1636 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1637 val
.idx
= (eg_sample_locs_2x
[0] >> (offset
+ 4)) & 0xf;
1638 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1641 offset
= 4 * (sample_index
* 2);
1642 val
.idx
= (eg_sample_locs_4x
[0] >> offset
) & 0xf;
1643 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1644 val
.idx
= (eg_sample_locs_4x
[0] >> (offset
+ 4)) & 0xf;
1645 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1648 offset
= 4 * (sample_index
% 4 * 2);
1649 index
= (sample_index
/ 4);
1650 val
.idx
= (sample_locs_8x
[index
] >> offset
) & 0xf;
1651 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1652 val
.idx
= (sample_locs_8x
[index
] >> (offset
+ 4)) & 0xf;
1653 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1658 static void evergreen_emit_msaa_state(struct r600_context
*rctx
, int nr_samples
, int ps_iter_samples
)
1661 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
1662 unsigned max_dist
= 0;
1664 switch (nr_samples
) {
1669 radeon_set_context_reg_seq(cs
, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0
, ARRAY_SIZE(eg_sample_locs_2x
));
1670 radeon_emit_array(cs
, eg_sample_locs_2x
, ARRAY_SIZE(eg_sample_locs_2x
));
1671 max_dist
= eg_max_dist_2x
;
1674 radeon_set_context_reg_seq(cs
, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0
, ARRAY_SIZE(eg_sample_locs_4x
));
1675 radeon_emit_array(cs
, eg_sample_locs_4x
, ARRAY_SIZE(eg_sample_locs_4x
));
1676 max_dist
= eg_max_dist_4x
;
1679 radeon_set_context_reg_seq(cs
, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0
, ARRAY_SIZE(sample_locs_8x
));
1680 radeon_emit_array(cs
, sample_locs_8x
, ARRAY_SIZE(sample_locs_8x
));
1681 max_dist
= max_dist_8x
;
1685 if (nr_samples
> 1) {
1686 radeon_set_context_reg_seq(cs
, R_028C00_PA_SC_LINE_CNTL
, 2);
1687 radeon_emit(cs
, S_028C00_LAST_PIXEL(1) |
1688 S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */
1689 radeon_emit(cs
, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples
)) |
1690 S_028C04_MAX_SAMPLE_DIST(max_dist
)); /* R_028C04_PA_SC_AA_CONFIG */
1691 radeon_set_context_reg(cs
, R_028A4C_PA_SC_MODE_CNTL_1
,
1692 EG_S_028A4C_PS_ITER_SAMPLE(ps_iter_samples
> 1) |
1693 EG_S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1694 EG_S_028A4C_FORCE_EOV_REZ_ENABLE(1));
1696 radeon_set_context_reg_seq(cs
, R_028C00_PA_SC_LINE_CNTL
, 2);
1697 radeon_emit(cs
, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */
1698 radeon_emit(cs
, 0); /* R_028C04_PA_SC_AA_CONFIG */
1699 radeon_set_context_reg(cs
, R_028A4C_PA_SC_MODE_CNTL_1
,
1700 EG_S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1701 EG_S_028A4C_FORCE_EOV_REZ_ENABLE(1));
1705 static void evergreen_emit_image_state(struct r600_context
*rctx
, struct r600_atom
*atom
,
1706 int immed_id_base
, int res_id_base
, int offset
, uint32_t pkt_flags
)
1708 struct r600_image_state
*state
= (struct r600_image_state
*)atom
;
1709 struct pipe_framebuffer_state
*fb_state
= &rctx
->framebuffer
.state
;
1710 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
1711 struct r600_texture
*rtex
;
1712 struct r600_resource
*resource
;
1715 for (i
= 0; i
< R600_MAX_IMAGES
; i
++) {
1716 struct r600_image_view
*image
= &state
->views
[i
];
1717 unsigned reloc
, immed_reloc
;
1718 int idx
= i
+ offset
;
1721 idx
+= fb_state
->nr_cbufs
+ (rctx
->dual_src_blend
? 1 : 0);
1722 if (!image
->base
.resource
)
1725 resource
= (struct r600_resource
*)image
->base
.resource
;
1726 if (resource
->b
.b
.target
!= PIPE_BUFFER
)
1727 rtex
= (struct r600_texture
*)image
->base
.resource
;
1731 reloc
= radeon_add_to_buffer_list(&rctx
->b
,
1734 RADEON_USAGE_READWRITE
,
1735 RADEON_PRIO_SHADER_RW_BUFFER
);
1737 immed_reloc
= radeon_add_to_buffer_list(&rctx
->b
,
1739 resource
->immed_buffer
,
1740 RADEON_USAGE_READWRITE
,
1741 RADEON_PRIO_SHADER_RW_BUFFER
);
1744 radeon_compute_set_context_reg_seq(cs
, R_028C60_CB_COLOR0_BASE
+ idx
* 0x3C, 13);
1746 radeon_set_context_reg_seq(cs
, R_028C60_CB_COLOR0_BASE
+ idx
* 0x3C, 13);
1748 radeon_emit(cs
, image
->cb_color_base
); /* R_028C60_CB_COLOR0_BASE */
1749 radeon_emit(cs
, image
->cb_color_pitch
); /* R_028C64_CB_COLOR0_PITCH */
1750 radeon_emit(cs
, image
->cb_color_slice
); /* R_028C68_CB_COLOR0_SLICE */
1751 radeon_emit(cs
, image
->cb_color_view
); /* R_028C6C_CB_COLOR0_VIEW */
1752 radeon_emit(cs
, image
->cb_color_info
); /* R_028C70_CB_COLOR0_INFO */
1753 radeon_emit(cs
, image
->cb_color_attrib
); /* R_028C74_CB_COLOR0_ATTRIB */
1754 radeon_emit(cs
, image
->cb_color_dim
); /* R_028C78_CB_COLOR0_DIM */
1755 radeon_emit(cs
, rtex
? rtex
->cmask
.base_address_reg
: image
->cb_color_base
); /* R_028C7C_CB_COLOR0_CMASK */
1756 radeon_emit(cs
, rtex
? rtex
->cmask
.slice_tile_max
: 0); /* R_028C80_CB_COLOR0_CMASK_SLICE */
1757 radeon_emit(cs
, image
->cb_color_fmask
); /* R_028C84_CB_COLOR0_FMASK */
1758 radeon_emit(cs
, image
->cb_color_fmask_slice
); /* R_028C88_CB_COLOR0_FMASK_SLICE */
1759 radeon_emit(cs
, rtex
? rtex
->color_clear_value
[0] : 0); /* R_028C8C_CB_COLOR0_CLEAR_WORD0 */
1760 radeon_emit(cs
, rtex
? rtex
->color_clear_value
[1] : 0); /* R_028C90_CB_COLOR0_CLEAR_WORD1 */
1762 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
1763 radeon_emit(cs
, reloc
);
1765 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
1766 radeon_emit(cs
, reloc
);
1768 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C7C_CB_COLOR0_CMASK */
1769 radeon_emit(cs
, reloc
);
1771 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C84_CB_COLOR0_FMASK */
1772 radeon_emit(cs
, reloc
);
1775 radeon_compute_set_context_reg(cs
, R_028B9C_CB_IMMED0_BASE
+ (idx
* 4), resource
->immed_buffer
->gpu_address
>> 8);
1777 radeon_set_context_reg(cs
, R_028B9C_CB_IMMED0_BASE
+ (idx
* 4), resource
->immed_buffer
->gpu_address
>> 8);
1779 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /**/
1780 radeon_emit(cs
, immed_reloc
);
1782 radeon_emit(cs
, PKT3(PKT3_SET_RESOURCE
, 8, 0) | pkt_flags
);
1783 radeon_emit(cs
, (immed_id_base
+ i
+ offset
) * 8);
1784 radeon_emit_array(cs
, image
->immed_resource_words
, 8);
1786 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0) | pkt_flags
);
1787 radeon_emit(cs
, immed_reloc
);
1789 radeon_emit(cs
, PKT3(PKT3_SET_RESOURCE
, 8, 0) | pkt_flags
);
1790 radeon_emit(cs
, (res_id_base
+ i
+ offset
) * 8);
1791 radeon_emit_array(cs
, image
->resource_words
, 8);
1793 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0) | pkt_flags
);
1794 radeon_emit(cs
, reloc
);
1796 if (!image
->skip_mip_address_reloc
) {
1797 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0) | pkt_flags
);
1798 radeon_emit(cs
, reloc
);
1803 static void evergreen_emit_fragment_image_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1805 evergreen_emit_image_state(rctx
, atom
,
1806 R600_IMAGE_IMMED_RESOURCE_OFFSET
,
1807 R600_IMAGE_REAL_RESOURCE_OFFSET
, 0, 0);
1810 static void evergreen_emit_compute_image_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1812 evergreen_emit_image_state(rctx
, atom
,
1813 EG_FETCH_CONSTANTS_OFFSET_CS
+ R600_IMAGE_IMMED_RESOURCE_OFFSET
,
1814 EG_FETCH_CONSTANTS_OFFSET_CS
+ R600_IMAGE_REAL_RESOURCE_OFFSET
,
1815 0, RADEON_CP_PACKET3_COMPUTE_MODE
);
1818 static void evergreen_emit_fragment_buffer_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1820 int offset
= util_bitcount(rctx
->fragment_images
.enabled_mask
);
1821 evergreen_emit_image_state(rctx
, atom
,
1822 R600_IMAGE_IMMED_RESOURCE_OFFSET
,
1823 R600_IMAGE_REAL_RESOURCE_OFFSET
, offset
, 0);
1826 static void evergreen_emit_compute_buffer_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1828 int offset
= util_bitcount(rctx
->compute_images
.enabled_mask
);
1829 evergreen_emit_image_state(rctx
, atom
,
1830 EG_FETCH_CONSTANTS_OFFSET_CS
+ R600_IMAGE_IMMED_RESOURCE_OFFSET
,
1831 EG_FETCH_CONSTANTS_OFFSET_CS
+ R600_IMAGE_REAL_RESOURCE_OFFSET
,
1832 offset
, RADEON_CP_PACKET3_COMPUTE_MODE
);
1835 static void evergreen_emit_framebuffer_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1837 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
1838 struct pipe_framebuffer_state
*state
= &rctx
->framebuffer
.state
;
1839 unsigned nr_cbufs
= state
->nr_cbufs
;
1841 struct r600_texture
*tex
= NULL
;
1842 struct r600_surface
*cb
= NULL
;
1844 /* XXX support more colorbuffers once we need them */
1845 assert(nr_cbufs
<= 8);
1850 for (i
= 0; i
< nr_cbufs
; i
++) {
1851 unsigned reloc
, cmask_reloc
;
1853 cb
= (struct r600_surface
*)state
->cbufs
[i
];
1855 radeon_set_context_reg(cs
, R_028C70_CB_COLOR0_INFO
+ i
* 0x3C,
1856 S_028C70_FORMAT(V_028C70_COLOR_INVALID
));
1860 tex
= (struct r600_texture
*)cb
->base
.texture
;
1861 reloc
= radeon_add_to_buffer_list(&rctx
->b
,
1863 (struct r600_resource
*)cb
->base
.texture
,
1864 RADEON_USAGE_READWRITE
,
1865 tex
->resource
.b
.b
.nr_samples
> 1 ?
1866 RADEON_PRIO_COLOR_BUFFER_MSAA
:
1867 RADEON_PRIO_COLOR_BUFFER
);
1869 if (tex
->cmask_buffer
&& tex
->cmask_buffer
!= &tex
->resource
) {
1870 cmask_reloc
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
1871 tex
->cmask_buffer
, RADEON_USAGE_READWRITE
,
1872 RADEON_PRIO_SEPARATE_META
);
1874 cmask_reloc
= reloc
;
1877 radeon_set_context_reg_seq(cs
, R_028C60_CB_COLOR0_BASE
+ i
* 0x3C, 13);
1878 radeon_emit(cs
, cb
->cb_color_base
); /* R_028C60_CB_COLOR0_BASE */
1879 radeon_emit(cs
, cb
->cb_color_pitch
); /* R_028C64_CB_COLOR0_PITCH */
1880 radeon_emit(cs
, cb
->cb_color_slice
); /* R_028C68_CB_COLOR0_SLICE */
1881 radeon_emit(cs
, cb
->cb_color_view
); /* R_028C6C_CB_COLOR0_VIEW */
1882 radeon_emit(cs
, cb
->cb_color_info
| tex
->cb_color_info
); /* R_028C70_CB_COLOR0_INFO */
1883 radeon_emit(cs
, cb
->cb_color_attrib
); /* R_028C74_CB_COLOR0_ATTRIB */
1884 radeon_emit(cs
, cb
->cb_color_dim
); /* R_028C78_CB_COLOR0_DIM */
1885 radeon_emit(cs
, tex
->cmask
.base_address_reg
); /* R_028C7C_CB_COLOR0_CMASK */
1886 radeon_emit(cs
, tex
->cmask
.slice_tile_max
); /* R_028C80_CB_COLOR0_CMASK_SLICE */
1887 radeon_emit(cs
, cb
->cb_color_fmask
); /* R_028C84_CB_COLOR0_FMASK */
1888 radeon_emit(cs
, cb
->cb_color_fmask_slice
); /* R_028C88_CB_COLOR0_FMASK_SLICE */
1889 radeon_emit(cs
, tex
->color_clear_value
[0]); /* R_028C8C_CB_COLOR0_CLEAR_WORD0 */
1890 radeon_emit(cs
, tex
->color_clear_value
[1]); /* R_028C90_CB_COLOR0_CLEAR_WORD1 */
1892 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
1893 radeon_emit(cs
, reloc
);
1895 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
1896 radeon_emit(cs
, reloc
);
1898 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C7C_CB_COLOR0_CMASK */
1899 radeon_emit(cs
, cmask_reloc
);
1901 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028C84_CB_COLOR0_FMASK */
1902 radeon_emit(cs
, reloc
);
1904 /* set CB_COLOR1_INFO for possible dual-src blending */
1905 if (rctx
->framebuffer
.dual_src_blend
&& i
== 1 && state
->cbufs
[0]) {
1906 radeon_set_context_reg(cs
, R_028C70_CB_COLOR0_INFO
+ 1 * 0x3C,
1907 cb
->cb_color_info
| tex
->cb_color_info
);
1910 i
+= util_bitcount(rctx
->fragment_images
.enabled_mask
);
1911 i
+= util_bitcount(rctx
->fragment_buffers
.enabled_mask
);
1913 radeon_set_context_reg(cs
, R_028C70_CB_COLOR0_INFO
+ i
* 0x3C, 0);
1915 radeon_set_context_reg(cs
, R_028E50_CB_COLOR8_INFO
+ (i
- 8) * 0x1C, 0);
1919 struct r600_surface
*zb
= (struct r600_surface
*)state
->zsbuf
;
1920 unsigned reloc
= radeon_add_to_buffer_list(&rctx
->b
,
1922 (struct r600_resource
*)state
->zsbuf
->texture
,
1923 RADEON_USAGE_READWRITE
,
1924 zb
->base
.texture
->nr_samples
> 1 ?
1925 RADEON_PRIO_DEPTH_BUFFER_MSAA
:
1926 RADEON_PRIO_DEPTH_BUFFER
);
1928 radeon_set_context_reg(cs
, R_028008_DB_DEPTH_VIEW
, zb
->db_depth_view
);
1930 radeon_set_context_reg_seq(cs
, R_028040_DB_Z_INFO
, 8);
1931 radeon_emit(cs
, zb
->db_z_info
); /* R_028040_DB_Z_INFO */
1932 radeon_emit(cs
, zb
->db_stencil_info
); /* R_028044_DB_STENCIL_INFO */
1933 radeon_emit(cs
, zb
->db_depth_base
); /* R_028048_DB_Z_READ_BASE */
1934 radeon_emit(cs
, zb
->db_stencil_base
); /* R_02804C_DB_STENCIL_READ_BASE */
1935 radeon_emit(cs
, zb
->db_depth_base
); /* R_028050_DB_Z_WRITE_BASE */
1936 radeon_emit(cs
, zb
->db_stencil_base
); /* R_028054_DB_STENCIL_WRITE_BASE */
1937 radeon_emit(cs
, zb
->db_depth_size
); /* R_028058_DB_DEPTH_SIZE */
1938 radeon_emit(cs
, zb
->db_depth_slice
); /* R_02805C_DB_DEPTH_SLICE */
1940 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028048_DB_Z_READ_BASE */
1941 radeon_emit(cs
, reloc
);
1943 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_02804C_DB_STENCIL_READ_BASE */
1944 radeon_emit(cs
, reloc
);
1946 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028050_DB_Z_WRITE_BASE */
1947 radeon_emit(cs
, reloc
);
1949 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0)); /* R_028054_DB_STENCIL_WRITE_BASE */
1950 radeon_emit(cs
, reloc
);
1951 } else if (rctx
->screen
->b
.info
.drm_minor
>= 18) {
1952 /* DRM 2.6.18 allows the INVALID format to disable depth/stencil.
1953 * Older kernels are out of luck. */
1954 radeon_set_context_reg_seq(cs
, R_028040_DB_Z_INFO
, 2);
1955 radeon_emit(cs
, S_028040_FORMAT(V_028040_Z_INVALID
)); /* R_028040_DB_Z_INFO */
1956 radeon_emit(cs
, S_028044_FORMAT(V_028044_STENCIL_INVALID
)); /* R_028044_DB_STENCIL_INFO */
1959 /* Framebuffer dimensions. */
1960 evergreen_get_scissor_rect(rctx
, 0, 0, state
->width
, state
->height
, &tl
, &br
);
1962 radeon_set_context_reg_seq(cs
, R_028204_PA_SC_WINDOW_SCISSOR_TL
, 2);
1963 radeon_emit(cs
, tl
); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */
1964 radeon_emit(cs
, br
); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */
1966 if (rctx
->b
.chip_class
== EVERGREEN
) {
1967 evergreen_emit_msaa_state(rctx
, rctx
->framebuffer
.nr_samples
, rctx
->ps_iter_samples
);
1969 cayman_emit_msaa_state(cs
, rctx
->framebuffer
.nr_samples
,
1970 rctx
->ps_iter_samples
, 0);
1974 static void evergreen_emit_polygon_offset(struct r600_context
*rctx
, struct r600_atom
*a
)
1976 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
1977 struct r600_poly_offset_state
*state
= (struct r600_poly_offset_state
*)a
;
1978 float offset_units
= state
->offset_units
;
1979 float offset_scale
= state
->offset_scale
;
1980 uint32_t pa_su_poly_offset_db_fmt_cntl
= 0;
1982 if (!state
->offset_units_unscaled
) {
1983 switch (state
->zs_format
) {
1984 case PIPE_FORMAT_Z24X8_UNORM
:
1985 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
1986 case PIPE_FORMAT_X8Z24_UNORM
:
1987 case PIPE_FORMAT_S8_UINT_Z24_UNORM
:
1988 offset_units
*= 2.0f
;
1989 pa_su_poly_offset_db_fmt_cntl
=
1990 S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS((char)-24);
1992 case PIPE_FORMAT_Z16_UNORM
:
1993 offset_units
*= 4.0f
;
1994 pa_su_poly_offset_db_fmt_cntl
=
1995 S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS((char)-16);
1998 pa_su_poly_offset_db_fmt_cntl
=
1999 S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS((char)-23) |
2000 S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
2004 radeon_set_context_reg_seq(cs
, R_028B80_PA_SU_POLY_OFFSET_FRONT_SCALE
, 4);
2005 radeon_emit(cs
, fui(offset_scale
));
2006 radeon_emit(cs
, fui(offset_units
));
2007 radeon_emit(cs
, fui(offset_scale
));
2008 radeon_emit(cs
, fui(offset_units
));
2010 radeon_set_context_reg(cs
, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL
,
2011 pa_su_poly_offset_db_fmt_cntl
);
2014 uint32_t evergreen_construct_rat_mask(struct r600_context
*rctx
, struct r600_cb_misc_state
*a
,
2017 unsigned base_mask
= 0;
2018 unsigned dirty_mask
= a
->image_rat_enabled_mask
;
2019 while (dirty_mask
) {
2020 unsigned idx
= u_bit_scan(&dirty_mask
);
2021 base_mask
|= (0xf << (idx
* 4));
2023 unsigned offset
= util_last_bit(a
->image_rat_enabled_mask
);
2024 dirty_mask
= a
->buffer_rat_enabled_mask
;
2025 while (dirty_mask
) {
2026 unsigned idx
= u_bit_scan(&dirty_mask
);
2027 base_mask
|= (0xf << (idx
+ offset
) * 4);
2029 return base_mask
<< (nr_cbufs
* 4);
2032 static void evergreen_emit_cb_misc_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
2034 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
2035 struct r600_cb_misc_state
*a
= (struct r600_cb_misc_state
*)atom
;
2036 unsigned fb_colormask
= a
->bound_cbufs_target_mask
;
2037 unsigned ps_colormask
= a
->ps_color_export_mask
;
2038 unsigned rat_colormask
= evergreen_construct_rat_mask(rctx
, a
, a
->nr_cbufs
);
2039 radeon_set_context_reg_seq(cs
, R_028238_CB_TARGET_MASK
, 2);
2040 radeon_emit(cs
, (a
->blend_colormask
& fb_colormask
) | rat_colormask
); /* R_028238_CB_TARGET_MASK */
2041 /* This must match the used export instructions exactly.
2042 * Other values may lead to undefined behavior and hangs.
2044 radeon_emit(cs
, ps_colormask
); /* R_02823C_CB_SHADER_MASK */
2047 static void evergreen_emit_db_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
2049 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
2050 struct r600_db_state
*a
= (struct r600_db_state
*)atom
;
2052 if (a
->rsurf
&& a
->rsurf
->db_htile_surface
) {
2053 struct r600_texture
*rtex
= (struct r600_texture
*)a
->rsurf
->base
.texture
;
2056 radeon_set_context_reg(cs
, R_02802C_DB_DEPTH_CLEAR
, fui(rtex
->depth_clear_value
));
2057 radeon_set_context_reg(cs
, R_028ABC_DB_HTILE_SURFACE
, a
->rsurf
->db_htile_surface
);
2058 radeon_set_context_reg(cs
, R_028AC8_DB_PRELOAD_CONTROL
, a
->rsurf
->db_preload_control
);
2059 radeon_set_context_reg(cs
, R_028014_DB_HTILE_DATA_BASE
, a
->rsurf
->db_htile_data_base
);
2060 reloc_idx
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, &rtex
->resource
,
2061 RADEON_USAGE_READWRITE
, RADEON_PRIO_SEPARATE_META
);
2062 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2063 radeon_emit(cs
, reloc_idx
);
2065 radeon_set_context_reg(cs
, R_028ABC_DB_HTILE_SURFACE
, 0);
2066 radeon_set_context_reg(cs
, R_028AC8_DB_PRELOAD_CONTROL
, 0);
2070 static void evergreen_emit_db_misc_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
2072 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
2073 struct r600_db_misc_state
*a
= (struct r600_db_misc_state
*)atom
;
2074 unsigned db_render_control
= 0;
2075 unsigned db_count_control
= 0;
2076 unsigned db_render_override
=
2077 S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE
) |
2078 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE
);
2080 if (rctx
->b
.num_occlusion_queries
> 0 &&
2081 !a
->occlusion_queries_disabled
) {
2082 db_count_control
|= S_028004_PERFECT_ZPASS_COUNTS(1);
2083 if (rctx
->b
.chip_class
== CAYMAN
) {
2084 db_count_control
|= S_028004_SAMPLE_RATE(a
->log_samples
);
2086 db_render_override
|= S_02800C_NOOP_CULL_DISABLE(1);
2088 db_count_control
|= S_028004_ZPASS_INCREMENT_DISABLE(1);
2091 /* This is to fix a lockup when hyperz and alpha test are enabled at
2092 * the same time somehow GPU get confuse on which order to pick for
2095 if (rctx
->alphatest_state
.sx_alpha_test_control
)
2096 db_render_override
|= S_02800C_FORCE_SHADER_Z_ORDER(1);
2098 if (a
->flush_depthstencil_through_cb
) {
2099 assert(a
->copy_depth
|| a
->copy_stencil
);
2101 db_render_control
|= S_028000_DEPTH_COPY_ENABLE(a
->copy_depth
) |
2102 S_028000_STENCIL_COPY_ENABLE(a
->copy_stencil
) |
2103 S_028000_COPY_CENTROID(1) |
2104 S_028000_COPY_SAMPLE(a
->copy_sample
);
2105 } else if (a
->flush_depth_inplace
|| a
->flush_stencil_inplace
) {
2106 db_render_control
|= S_028000_DEPTH_COMPRESS_DISABLE(a
->flush_depth_inplace
) |
2107 S_028000_STENCIL_COMPRESS_DISABLE(a
->flush_stencil_inplace
);
2108 db_render_override
|= S_02800C_DISABLE_PIXEL_RATE_TILES(1);
2110 if (a
->htile_clear
) {
2111 /* FIXME we might want to disable cliprect here */
2112 db_render_control
|= S_028000_DEPTH_CLEAR_ENABLE(1);
2115 radeon_set_context_reg_seq(cs
, R_028000_DB_RENDER_CONTROL
, 2);
2116 radeon_emit(cs
, db_render_control
); /* R_028000_DB_RENDER_CONTROL */
2117 radeon_emit(cs
, db_count_control
); /* R_028004_DB_COUNT_CONTROL */
2118 radeon_set_context_reg(cs
, R_02800C_DB_RENDER_OVERRIDE
, db_render_override
);
2119 radeon_set_context_reg(cs
, R_02880C_DB_SHADER_CONTROL
, a
->db_shader_control
);
2122 static void evergreen_emit_vertex_buffers(struct r600_context
*rctx
,
2123 struct r600_vertexbuf_state
*state
,
2124 unsigned resource_offset
,
2127 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
2128 uint32_t dirty_mask
= state
->dirty_mask
;
2130 while (dirty_mask
) {
2131 struct pipe_vertex_buffer
*vb
;
2132 struct r600_resource
*rbuffer
;
2134 unsigned buffer_index
= u_bit_scan(&dirty_mask
);
2136 vb
= &state
->vb
[buffer_index
];
2137 rbuffer
= (struct r600_resource
*)vb
->buffer
.resource
;
2140 va
= rbuffer
->gpu_address
+ vb
->buffer_offset
;
2142 /* fetch resources start at index 992 */
2143 radeon_emit(cs
, PKT3(PKT3_SET_RESOURCE
, 8, 0) | pkt_flags
);
2144 radeon_emit(cs
, (resource_offset
+ buffer_index
) * 8);
2145 radeon_emit(cs
, va
); /* RESOURCEi_WORD0 */
2146 radeon_emit(cs
, rbuffer
->b
.b
.width0
- vb
->buffer_offset
- 1); /* RESOURCEi_WORD1 */
2147 radeon_emit(cs
, /* RESOURCEi_WORD2 */
2148 S_030008_ENDIAN_SWAP(r600_endian_swap(32)) |
2149 S_030008_STRIDE(vb
->stride
) |
2150 S_030008_BASE_ADDRESS_HI(va
>> 32UL));
2151 radeon_emit(cs
, /* RESOURCEi_WORD3 */
2152 S_03000C_DST_SEL_X(V_03000C_SQ_SEL_X
) |
2153 S_03000C_DST_SEL_Y(V_03000C_SQ_SEL_Y
) |
2154 S_03000C_DST_SEL_Z(V_03000C_SQ_SEL_Z
) |
2155 S_03000C_DST_SEL_W(V_03000C_SQ_SEL_W
));
2156 radeon_emit(cs
, 0); /* RESOURCEi_WORD4 */
2157 radeon_emit(cs
, 0); /* RESOURCEi_WORD5 */
2158 radeon_emit(cs
, 0); /* RESOURCEi_WORD6 */
2159 radeon_emit(cs
, 0xc0000000); /* RESOURCEi_WORD7 */
2161 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0) | pkt_flags
);
2162 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rbuffer
,
2163 RADEON_USAGE_READ
, RADEON_PRIO_VERTEX_BUFFER
));
2165 state
->dirty_mask
= 0;
2168 static void evergreen_fs_emit_vertex_buffers(struct r600_context
*rctx
, struct r600_atom
* atom
)
2170 evergreen_emit_vertex_buffers(rctx
, &rctx
->vertex_buffer_state
, EG_FETCH_CONSTANTS_OFFSET_FS
, 0);
2173 static void evergreen_cs_emit_vertex_buffers(struct r600_context
*rctx
, struct r600_atom
* atom
)
2175 evergreen_emit_vertex_buffers(rctx
, &rctx
->cs_vertex_buffer_state
, EG_FETCH_CONSTANTS_OFFSET_CS
,
2176 RADEON_CP_PACKET3_COMPUTE_MODE
);
2179 static void evergreen_emit_constant_buffers(struct r600_context
*rctx
,
2180 struct r600_constbuf_state
*state
,
2181 unsigned buffer_id_base
,
2182 unsigned reg_alu_constbuf_size
,
2183 unsigned reg_alu_const_cache
,
2186 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
2187 uint32_t dirty_mask
= state
->dirty_mask
;
2189 while (dirty_mask
) {
2190 struct pipe_constant_buffer
*cb
;
2191 struct r600_resource
*rbuffer
;
2193 unsigned buffer_index
= ffs(dirty_mask
) - 1;
2194 unsigned gs_ring_buffer
= (buffer_index
== R600_GS_RING_CONST_BUFFER
);
2196 cb
= &state
->cb
[buffer_index
];
2197 rbuffer
= (struct r600_resource
*)cb
->buffer
;
2200 va
= rbuffer
->gpu_address
+ cb
->buffer_offset
;
2202 if (buffer_index
< R600_MAX_HW_CONST_BUFFERS
) {
2203 radeon_set_context_reg_flag(cs
, reg_alu_constbuf_size
+ buffer_index
* 4,
2204 DIV_ROUND_UP(cb
->buffer_size
, 256), pkt_flags
);
2205 radeon_set_context_reg_flag(cs
, reg_alu_const_cache
+ buffer_index
* 4, va
>> 8,
2207 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0) | pkt_flags
);
2208 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rbuffer
,
2209 RADEON_USAGE_READ
, RADEON_PRIO_CONST_BUFFER
));
2212 radeon_emit(cs
, PKT3(PKT3_SET_RESOURCE
, 8, 0) | pkt_flags
);
2213 radeon_emit(cs
, (buffer_id_base
+ buffer_index
) * 8);
2214 radeon_emit(cs
, va
); /* RESOURCEi_WORD0 */
2215 radeon_emit(cs
, cb
->buffer_size
-1); /* RESOURCEi_WORD1 */
2216 radeon_emit(cs
, /* RESOURCEi_WORD2 */
2217 S_030008_ENDIAN_SWAP(gs_ring_buffer
? ENDIAN_NONE
: r600_endian_swap(32)) |
2218 S_030008_STRIDE(gs_ring_buffer
? 4 : 16) |
2219 S_030008_BASE_ADDRESS_HI(va
>> 32UL) |
2220 S_030008_DATA_FORMAT(FMT_32_32_32_32_FLOAT
));
2221 radeon_emit(cs
, /* RESOURCEi_WORD3 */
2222 S_03000C_UNCACHED(gs_ring_buffer
? 1 : 0) |
2223 S_03000C_DST_SEL_X(V_03000C_SQ_SEL_X
) |
2224 S_03000C_DST_SEL_Y(V_03000C_SQ_SEL_Y
) |
2225 S_03000C_DST_SEL_Z(V_03000C_SQ_SEL_Z
) |
2226 S_03000C_DST_SEL_W(V_03000C_SQ_SEL_W
));
2227 radeon_emit(cs
, 0); /* RESOURCEi_WORD4 */
2228 radeon_emit(cs
, 0); /* RESOURCEi_WORD5 */
2229 radeon_emit(cs
, 0); /* RESOURCEi_WORD6 */
2230 radeon_emit(cs
, /* RESOURCEi_WORD7 */
2231 S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_BUFFER
));
2233 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0) | pkt_flags
);
2234 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rbuffer
,
2235 RADEON_USAGE_READ
, RADEON_PRIO_CONST_BUFFER
));
2237 dirty_mask
&= ~(1 << buffer_index
);
2239 state
->dirty_mask
= 0;
2242 /* VS constants can be in VS/ES (same space) or LS if tess is enabled */
2243 static void evergreen_emit_vs_constant_buffers(struct r600_context
*rctx
, struct r600_atom
*atom
)
2245 if (rctx
->vs_shader
->current
->shader
.vs_as_ls
) {
2246 evergreen_emit_constant_buffers(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_VERTEX
],
2247 EG_FETCH_CONSTANTS_OFFSET_LS
,
2248 R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0
,
2249 R_028F40_ALU_CONST_CACHE_LS_0
,
2250 0 /* PKT3 flags */);
2252 evergreen_emit_constant_buffers(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_VERTEX
],
2253 EG_FETCH_CONSTANTS_OFFSET_VS
,
2254 R_028180_ALU_CONST_BUFFER_SIZE_VS_0
,
2255 R_028980_ALU_CONST_CACHE_VS_0
,
2256 0 /* PKT3 flags */);
2260 static void evergreen_emit_gs_constant_buffers(struct r600_context
*rctx
, struct r600_atom
*atom
)
2262 evergreen_emit_constant_buffers(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_GEOMETRY
],
2263 EG_FETCH_CONSTANTS_OFFSET_GS
,
2264 R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0
,
2265 R_0289C0_ALU_CONST_CACHE_GS_0
,
2266 0 /* PKT3 flags */);
2269 static void evergreen_emit_ps_constant_buffers(struct r600_context
*rctx
, struct r600_atom
*atom
)
2271 evergreen_emit_constant_buffers(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_FRAGMENT
],
2272 EG_FETCH_CONSTANTS_OFFSET_PS
,
2273 R_028140_ALU_CONST_BUFFER_SIZE_PS_0
,
2274 R_028940_ALU_CONST_CACHE_PS_0
,
2275 0 /* PKT3 flags */);
2278 static void evergreen_emit_cs_constant_buffers(struct r600_context
*rctx
, struct r600_atom
*atom
)
2280 evergreen_emit_constant_buffers(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_COMPUTE
],
2281 EG_FETCH_CONSTANTS_OFFSET_CS
,
2282 R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0
,
2283 R_028F40_ALU_CONST_CACHE_LS_0
,
2284 RADEON_CP_PACKET3_COMPUTE_MODE
);
2287 /* tes constants can be emitted to VS or ES - which are common */
2288 static void evergreen_emit_tes_constant_buffers(struct r600_context
*rctx
, struct r600_atom
*atom
)
2290 if (!rctx
->tes_shader
)
2292 evergreen_emit_constant_buffers(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_TESS_EVAL
],
2293 EG_FETCH_CONSTANTS_OFFSET_VS
,
2294 R_028180_ALU_CONST_BUFFER_SIZE_VS_0
,
2295 R_028980_ALU_CONST_CACHE_VS_0
,
2299 static void evergreen_emit_tcs_constant_buffers(struct r600_context
*rctx
, struct r600_atom
*atom
)
2301 if (!rctx
->tes_shader
)
2303 evergreen_emit_constant_buffers(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_TESS_CTRL
],
2304 EG_FETCH_CONSTANTS_OFFSET_HS
,
2305 R_028F80_ALU_CONST_BUFFER_SIZE_HS_0
,
2306 R_028F00_ALU_CONST_CACHE_HS_0
,
2310 void evergreen_setup_scratch_buffers(struct r600_context
*rctx
) {
2311 static const struct {
2315 } regs
[EG_NUM_HW_STAGES
] = {
2316 [R600_HW_STAGE_PS
] = { R_008C68_SQ_PSTMP_RING_BASE
, R_028914_SQ_PSTMP_RING_ITEMSIZE
, R_008C6C_SQ_PSTMP_RING_SIZE
},
2317 [R600_HW_STAGE_VS
] = { R_008C60_SQ_VSTMP_RING_BASE
, R_028910_SQ_VSTMP_RING_ITEMSIZE
, R_008C64_SQ_VSTMP_RING_SIZE
},
2318 [R600_HW_STAGE_GS
] = { R_008C58_SQ_GSTMP_RING_BASE
, R_02890C_SQ_GSTMP_RING_ITEMSIZE
, R_008C5C_SQ_GSTMP_RING_SIZE
},
2319 [R600_HW_STAGE_ES
] = { R_008C50_SQ_ESTMP_RING_BASE
, R_028908_SQ_ESTMP_RING_ITEMSIZE
, R_008C54_SQ_ESTMP_RING_SIZE
},
2320 [EG_HW_STAGE_LS
] = { R_008E10_SQ_LSTMP_RING_BASE
, R_028830_SQ_LSTMP_RING_ITEMSIZE
, R_008E14_SQ_LSTMP_RING_SIZE
},
2321 [EG_HW_STAGE_HS
] = { R_008E18_SQ_HSTMP_RING_BASE
, R_028834_SQ_HSTMP_RING_ITEMSIZE
, R_008E1C_SQ_HSTMP_RING_SIZE
}
2324 for (unsigned i
= 0; i
< EG_NUM_HW_STAGES
; i
++) {
2325 struct r600_pipe_shader
*stage
= rctx
->hw_shader_stages
[i
].shader
;
2327 if (stage
&& unlikely(stage
->scratch_space_needed
)) {
2328 r600_setup_scratch_area_for_shader(rctx
, stage
,
2329 &rctx
->scratch_buffers
[i
], regs
[i
].ring_base
, regs
[i
].item_size
, regs
[i
].ring_size
);
2334 static void evergreen_emit_sampler_views(struct r600_context
*rctx
,
2335 struct r600_samplerview_state
*state
,
2336 unsigned resource_id_base
, unsigned pkt_flags
)
2338 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
2339 uint32_t dirty_mask
= state
->dirty_mask
;
2341 while (dirty_mask
) {
2342 struct r600_pipe_sampler_view
*rview
;
2343 unsigned resource_index
= u_bit_scan(&dirty_mask
);
2346 rview
= state
->views
[resource_index
];
2349 radeon_emit(cs
, PKT3(PKT3_SET_RESOURCE
, 8, 0) | pkt_flags
);
2350 radeon_emit(cs
, (resource_id_base
+ resource_index
) * 8);
2351 radeon_emit_array(cs
, rview
->tex_resource_words
, 8);
2353 reloc
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rview
->tex_resource
,
2355 r600_get_sampler_view_priority(rview
->tex_resource
));
2356 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0) | pkt_flags
);
2357 radeon_emit(cs
, reloc
);
2359 if (!rview
->skip_mip_address_reloc
) {
2360 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0) | pkt_flags
);
2361 radeon_emit(cs
, reloc
);
2364 state
->dirty_mask
= 0;
2367 static void evergreen_emit_vs_sampler_views(struct r600_context
*rctx
, struct r600_atom
*atom
)
2369 if (rctx
->vs_shader
->current
->shader
.vs_as_ls
) {
2370 evergreen_emit_sampler_views(rctx
, &rctx
->samplers
[PIPE_SHADER_VERTEX
].views
,
2371 EG_FETCH_CONSTANTS_OFFSET_LS
+ R600_MAX_CONST_BUFFERS
, 0);
2373 evergreen_emit_sampler_views(rctx
, &rctx
->samplers
[PIPE_SHADER_VERTEX
].views
,
2374 EG_FETCH_CONSTANTS_OFFSET_VS
+ R600_MAX_CONST_BUFFERS
, 0);
2378 static void evergreen_emit_gs_sampler_views(struct r600_context
*rctx
, struct r600_atom
*atom
)
2380 evergreen_emit_sampler_views(rctx
, &rctx
->samplers
[PIPE_SHADER_GEOMETRY
].views
,
2381 EG_FETCH_CONSTANTS_OFFSET_GS
+ R600_MAX_CONST_BUFFERS
, 0);
2384 static void evergreen_emit_tcs_sampler_views(struct r600_context
*rctx
, struct r600_atom
*atom
)
2386 evergreen_emit_sampler_views(rctx
, &rctx
->samplers
[PIPE_SHADER_TESS_CTRL
].views
,
2387 EG_FETCH_CONSTANTS_OFFSET_HS
+ R600_MAX_CONST_BUFFERS
, 0);
2390 static void evergreen_emit_tes_sampler_views(struct r600_context
*rctx
, struct r600_atom
*atom
)
2392 if (!rctx
->tes_shader
)
2394 evergreen_emit_sampler_views(rctx
, &rctx
->samplers
[PIPE_SHADER_TESS_EVAL
].views
,
2395 EG_FETCH_CONSTANTS_OFFSET_VS
+ R600_MAX_CONST_BUFFERS
, 0);
2398 static void evergreen_emit_ps_sampler_views(struct r600_context
*rctx
, struct r600_atom
*atom
)
2400 evergreen_emit_sampler_views(rctx
, &rctx
->samplers
[PIPE_SHADER_FRAGMENT
].views
,
2401 EG_FETCH_CONSTANTS_OFFSET_PS
+ R600_MAX_CONST_BUFFERS
, 0);
2404 static void evergreen_emit_cs_sampler_views(struct r600_context
*rctx
, struct r600_atom
*atom
)
2406 evergreen_emit_sampler_views(rctx
, &rctx
->samplers
[PIPE_SHADER_COMPUTE
].views
,
2407 EG_FETCH_CONSTANTS_OFFSET_CS
+ R600_MAX_CONST_BUFFERS
, RADEON_CP_PACKET3_COMPUTE_MODE
);
2410 static void evergreen_convert_border_color(union pipe_color_union
*in
,
2411 union pipe_color_union
*out
,
2412 enum pipe_format format
)
2414 if (util_format_is_pure_integer(format
) &&
2415 !util_format_is_depth_or_stencil(format
)) {
2416 const struct util_format_description
*d
= util_format_description(format
);
2418 for (int i
= 0; i
< d
->nr_channels
; ++i
) {
2419 int cs
= d
->channel
[i
].size
;
2420 if (d
->channel
[i
].type
== UTIL_FORMAT_TYPE_SIGNED
)
2421 out
->f
[i
] = (double)(in
->i
[i
]) / ((1ul << (cs
- 1)) - 1 );
2422 else if (d
->channel
[i
].type
== UTIL_FORMAT_TYPE_UNSIGNED
)
2423 out
->f
[i
] = (double)(in
->ui
[i
]) / ((1ul << cs
) - 1 );
2430 case PIPE_FORMAT_X24S8_UINT
:
2431 case PIPE_FORMAT_X32_S8X24_UINT
:
2432 out
->f
[0] = (double)(in
->ui
[0]) / 255.0;
2433 out
->f
[1] = out
->f
[2] = out
->f
[3] = 0.0f
;
2436 memcpy(out
->f
, in
->f
, 4 * sizeof(float));
2441 static void evergreen_emit_sampler_states(struct r600_context
*rctx
,
2442 struct r600_textures_info
*texinfo
,
2443 unsigned resource_id_base
,
2444 unsigned border_index_reg
,
2447 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
2448 uint32_t dirty_mask
= texinfo
->states
.dirty_mask
;
2449 union pipe_color_union border_color
= {{0,0,0,1}};
2450 union pipe_color_union
*border_color_ptr
= &border_color
;
2452 while (dirty_mask
) {
2453 struct r600_pipe_sampler_state
*rstate
;
2454 unsigned i
= u_bit_scan(&dirty_mask
);
2456 rstate
= texinfo
->states
.states
[i
];
2459 if (rstate
->border_color_use
) {
2460 struct r600_pipe_sampler_view
*rview
= texinfo
->views
.views
[i
];
2462 evergreen_convert_border_color(&rstate
->border_color
,
2463 &border_color
, rview
->base
.format
);
2465 border_color_ptr
= &rstate
->border_color
;
2469 radeon_emit(cs
, PKT3(PKT3_SET_SAMPLER
, 3, 0) | pkt_flags
);
2470 radeon_emit(cs
, (resource_id_base
+ i
) * 3);
2471 radeon_emit_array(cs
, rstate
->tex_sampler_words
, 3);
2473 if (rstate
->border_color_use
) {
2474 radeon_set_config_reg_seq(cs
, border_index_reg
, 5);
2476 radeon_emit_array(cs
, border_color_ptr
->ui
, 4);
2479 texinfo
->states
.dirty_mask
= 0;
2482 static void evergreen_emit_vs_sampler_states(struct r600_context
*rctx
, struct r600_atom
*atom
)
2484 if (rctx
->vs_shader
->current
->shader
.vs_as_ls
) {
2485 evergreen_emit_sampler_states(rctx
, &rctx
->samplers
[PIPE_SHADER_VERTEX
], 72,
2486 R_00A450_TD_LS_SAMPLER0_BORDER_COLOR_INDEX
, 0);
2488 evergreen_emit_sampler_states(rctx
, &rctx
->samplers
[PIPE_SHADER_VERTEX
], 18,
2489 R_00A414_TD_VS_SAMPLER0_BORDER_INDEX
, 0);
2493 static void evergreen_emit_gs_sampler_states(struct r600_context
*rctx
, struct r600_atom
*atom
)
2495 evergreen_emit_sampler_states(rctx
, &rctx
->samplers
[PIPE_SHADER_GEOMETRY
], 36,
2496 R_00A428_TD_GS_SAMPLER0_BORDER_INDEX
, 0);
2499 static void evergreen_emit_tcs_sampler_states(struct r600_context
*rctx
, struct r600_atom
*atom
)
2501 evergreen_emit_sampler_states(rctx
, &rctx
->samplers
[PIPE_SHADER_TESS_CTRL
], 54,
2502 R_00A43C_TD_HS_SAMPLER0_BORDER_COLOR_INDEX
, 0);
2505 static void evergreen_emit_tes_sampler_states(struct r600_context
*rctx
, struct r600_atom
*atom
)
2507 if (!rctx
->tes_shader
)
2509 evergreen_emit_sampler_states(rctx
, &rctx
->samplers
[PIPE_SHADER_TESS_EVAL
], 18,
2510 R_00A414_TD_VS_SAMPLER0_BORDER_INDEX
, 0);
2513 static void evergreen_emit_ps_sampler_states(struct r600_context
*rctx
, struct r600_atom
*atom
)
2515 evergreen_emit_sampler_states(rctx
, &rctx
->samplers
[PIPE_SHADER_FRAGMENT
], 0,
2516 R_00A400_TD_PS_SAMPLER0_BORDER_INDEX
, 0);
2519 static void evergreen_emit_cs_sampler_states(struct r600_context
*rctx
, struct r600_atom
*atom
)
2521 evergreen_emit_sampler_states(rctx
, &rctx
->samplers
[PIPE_SHADER_COMPUTE
], 90,
2522 R_00A464_TD_CS_SAMPLER0_BORDER_INDEX
,
2523 RADEON_CP_PACKET3_COMPUTE_MODE
);
2526 static void evergreen_emit_sample_mask(struct r600_context
*rctx
, struct r600_atom
*a
)
2528 struct r600_sample_mask
*s
= (struct r600_sample_mask
*)a
;
2529 uint8_t mask
= s
->sample_mask
;
2531 radeon_set_context_reg(rctx
->b
.gfx
.cs
, R_028C3C_PA_SC_AA_MASK
,
2532 mask
| (mask
<< 8) | (mask
<< 16) | (mask
<< 24));
2535 static void cayman_emit_sample_mask(struct r600_context
*rctx
, struct r600_atom
*a
)
2537 struct r600_sample_mask
*s
= (struct r600_sample_mask
*)a
;
2538 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
2539 uint16_t mask
= s
->sample_mask
;
2541 radeon_set_context_reg_seq(cs
, CM_R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0
, 2);
2542 radeon_emit(cs
, mask
| (mask
<< 16)); /* X0Y0_X1Y0 */
2543 radeon_emit(cs
, mask
| (mask
<< 16)); /* X0Y1_X1Y1 */
2546 static void evergreen_emit_vertex_fetch_shader(struct r600_context
*rctx
, struct r600_atom
*a
)
2548 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
2549 struct r600_cso_state
*state
= (struct r600_cso_state
*)a
;
2550 struct r600_fetch_shader
*shader
= (struct r600_fetch_shader
*)state
->cso
;
2555 radeon_set_context_reg(cs
, R_0288A4_SQ_PGM_START_FS
,
2556 (shader
->buffer
->gpu_address
+ shader
->offset
) >> 8);
2557 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2558 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, shader
->buffer
,
2560 RADEON_PRIO_SHADER_BINARY
));
2563 static void evergreen_emit_shader_stages(struct r600_context
*rctx
, struct r600_atom
*a
)
2565 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
2566 struct r600_shader_stages_state
*state
= (struct r600_shader_stages_state
*)a
;
2568 uint32_t v
= 0, v2
= 0, primid
= 0, tf_param
= 0;
2570 if (rctx
->vs_shader
->current
->shader
.vs_as_gs_a
) {
2571 v2
= S_028A40_MODE(V_028A40_GS_SCENARIO_A
);
2575 if (state
->geom_enable
) {
2578 if (rctx
->gs_shader
->gs_max_out_vertices
<= 128)
2579 cut_val
= V_028A40_GS_CUT_128
;
2580 else if (rctx
->gs_shader
->gs_max_out_vertices
<= 256)
2581 cut_val
= V_028A40_GS_CUT_256
;
2582 else if (rctx
->gs_shader
->gs_max_out_vertices
<= 512)
2583 cut_val
= V_028A40_GS_CUT_512
;
2585 cut_val
= V_028A40_GS_CUT_1024
;
2587 v
= S_028B54_GS_EN(1) |
2588 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
2589 if (!rctx
->tes_shader
)
2590 v
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
);
2592 v2
= S_028A40_MODE(V_028A40_GS_SCENARIO_G
) |
2593 S_028A40_CUT_MODE(cut_val
);
2595 if (rctx
->gs_shader
->current
->shader
.gs_prim_id_input
)
2599 if (rctx
->tes_shader
) {
2600 uint32_t type
, partitioning
, topology
;
2601 struct tgsi_shader_info
*info
= &rctx
->tes_shader
->current
->selector
->info
;
2602 unsigned tes_prim_mode
= info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
2603 unsigned tes_spacing
= info
->properties
[TGSI_PROPERTY_TES_SPACING
];
2604 bool tes_vertex_order_cw
= info
->properties
[TGSI_PROPERTY_TES_VERTEX_ORDER_CW
];
2605 bool tes_point_mode
= info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
];
2606 switch (tes_prim_mode
) {
2607 case PIPE_PRIM_LINES
:
2608 type
= V_028B6C_TESS_ISOLINE
;
2610 case PIPE_PRIM_TRIANGLES
:
2611 type
= V_028B6C_TESS_TRIANGLE
;
2613 case PIPE_PRIM_QUADS
:
2614 type
= V_028B6C_TESS_QUAD
;
2621 switch (tes_spacing
) {
2622 case PIPE_TESS_SPACING_FRACTIONAL_ODD
:
2623 partitioning
= V_028B6C_PART_FRAC_ODD
;
2625 case PIPE_TESS_SPACING_FRACTIONAL_EVEN
:
2626 partitioning
= V_028B6C_PART_FRAC_EVEN
;
2628 case PIPE_TESS_SPACING_EQUAL
:
2629 partitioning
= V_028B6C_PART_INTEGER
;
2637 topology
= V_028B6C_OUTPUT_POINT
;
2638 else if (tes_prim_mode
== PIPE_PRIM_LINES
)
2639 topology
= V_028B6C_OUTPUT_LINE
;
2640 else if (tes_vertex_order_cw
)
2641 /* XXX follow radeonsi and invert */
2642 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
2644 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
2646 tf_param
= S_028B6C_TYPE(type
) |
2647 S_028B6C_PARTITIONING(partitioning
) |
2648 S_028B6C_TOPOLOGY(topology
);
2651 if (rctx
->tes_shader
) {
2652 v
|= S_028B54_LS_EN(V_028B54_LS_STAGE_ON
) |
2654 if (!state
->geom_enable
)
2655 v
|= S_028B54_VS_EN(V_028B54_VS_STAGE_DS
);
2657 v
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
);
2660 radeon_set_context_reg(cs
, R_028AB8_VGT_VTX_CNT_EN
, v
? 1 : 0 );
2661 radeon_set_context_reg(cs
, R_028B54_VGT_SHADER_STAGES_EN
, v
);
2662 radeon_set_context_reg(cs
, R_028A40_VGT_GS_MODE
, v2
);
2663 radeon_set_context_reg(cs
, R_028A84_VGT_PRIMITIVEID_EN
, primid
);
2664 radeon_set_context_reg(cs
, R_028B6C_VGT_TF_PARAM
, tf_param
);
2667 static void evergreen_emit_gs_rings(struct r600_context
*rctx
, struct r600_atom
*a
)
2669 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
2670 struct r600_gs_rings_state
*state
= (struct r600_gs_rings_state
*)a
;
2671 struct r600_resource
*rbuffer
;
2673 radeon_set_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
2674 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
2675 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH
));
2677 if (state
->enable
) {
2678 rbuffer
=(struct r600_resource
*)state
->esgs_ring
.buffer
;
2679 radeon_set_config_reg(cs
, R_008C40_SQ_ESGS_RING_BASE
,
2680 rbuffer
->gpu_address
>> 8);
2681 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2682 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rbuffer
,
2683 RADEON_USAGE_READWRITE
,
2684 RADEON_PRIO_SHADER_RINGS
));
2685 radeon_set_config_reg(cs
, R_008C44_SQ_ESGS_RING_SIZE
,
2686 state
->esgs_ring
.buffer_size
>> 8);
2688 rbuffer
=(struct r600_resource
*)state
->gsvs_ring
.buffer
;
2689 radeon_set_config_reg(cs
, R_008C48_SQ_GSVS_RING_BASE
,
2690 rbuffer
->gpu_address
>> 8);
2691 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2692 radeon_emit(cs
, radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rbuffer
,
2693 RADEON_USAGE_READWRITE
,
2694 RADEON_PRIO_SHADER_RINGS
));
2695 radeon_set_config_reg(cs
, R_008C4C_SQ_GSVS_RING_SIZE
,
2696 state
->gsvs_ring
.buffer_size
>> 8);
2698 radeon_set_config_reg(cs
, R_008C44_SQ_ESGS_RING_SIZE
, 0);
2699 radeon_set_config_reg(cs
, R_008C4C_SQ_GSVS_RING_SIZE
, 0);
2702 radeon_set_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
2703 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
2704 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH
));
2707 void cayman_init_common_regs(struct r600_command_buffer
*cb
,
2708 enum chip_class ctx_chip_class
,
2709 enum radeon_family ctx_family
,
2712 r600_store_config_reg_seq(cb
, R_008C00_SQ_CONFIG
, 2);
2713 r600_store_value(cb
, S_008C00_EXPORT_SRC_C(1)); /* R_008C00_SQ_CONFIG */
2714 /* always set the temp clauses */
2715 r600_store_value(cb
, S_008C04_NUM_CLAUSE_TEMP_GPRS(4)); /* R_008C04_SQ_GPR_RESOURCE_MGMT_1 */
2717 r600_store_config_reg_seq(cb
, R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1
, 2);
2718 r600_store_value(cb
, 0); /* R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1 */
2719 r600_store_value(cb
, 0); /* R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2 */
2721 r600_store_config_reg(cb
, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
, (1 << 8));
2723 r600_store_context_reg_seq(cb
, R_028350_SX_MISC
, 2);
2724 r600_store_value(cb
, 0);
2725 r600_store_value(cb
, S_028354_SURFACE_SYNC_MASK(0xf));
2727 r600_store_context_reg(cb
, R_028800_DB_DEPTH_CONTROL
, 0);
2730 static void cayman_init_atom_start_cs(struct r600_context
*rctx
)
2732 struct r600_command_buffer
*cb
= &rctx
->start_cs_cmd
;
2735 r600_init_command_buffer(cb
, 338);
2737 /* This must be first. */
2738 r600_store_value(cb
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
2739 r600_store_value(cb
, 0x80000000);
2740 r600_store_value(cb
, 0x80000000);
2742 /* We're setting config registers here. */
2743 r600_store_value(cb
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
2744 r600_store_value(cb
, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
2746 /* This enables pipeline stat & streamout queries.
2747 * They are only disabled by blits.
2749 r600_store_value(cb
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
2750 r600_store_value(cb
, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START
) | EVENT_INDEX(0));
2752 cayman_init_common_regs(cb
, rctx
->b
.chip_class
,
2753 rctx
->b
.family
, rctx
->screen
->b
.info
.drm_minor
);
2755 r600_store_config_reg(cb
, R_009100_SPI_CONFIG_CNTL
, 0);
2756 r600_store_config_reg(cb
, R_00913C_SPI_CONFIG_CNTL_1
, S_00913C_VTX_DONE_DELAY(4));
2758 /* remove LS/HS from one SIMD for hw workaround */
2759 r600_store_config_reg_seq(cb
, R_008E20_SQ_STATIC_THREAD_MGMT1
, 3);
2760 r600_store_value(cb
, 0xffffffff);
2761 r600_store_value(cb
, 0xffffffff);
2762 r600_store_value(cb
, 0xfffffffe);
2764 r600_store_context_reg_seq(cb
, R_028900_SQ_ESGS_RING_ITEMSIZE
, 6);
2765 r600_store_value(cb
, 0); /* R_028900_SQ_ESGS_RING_ITEMSIZE */
2766 r600_store_value(cb
, 0); /* R_028904_SQ_GSVS_RING_ITEMSIZE */
2767 r600_store_value(cb
, 0); /* R_028908_SQ_ESTMP_RING_ITEMSIZE */
2768 r600_store_value(cb
, 0); /* R_02890C_SQ_GSTMP_RING_ITEMSIZE */
2769 r600_store_value(cb
, 0); /* R_028910_SQ_VSTMP_RING_ITEMSIZE */
2770 r600_store_value(cb
, 0); /* R_028914_SQ_PSTMP_RING_ITEMSIZE */
2772 r600_store_context_reg_seq(cb
, R_02891C_SQ_GS_VERT_ITEMSIZE
, 4);
2773 r600_store_value(cb
, 0); /* R_02891C_SQ_GS_VERT_ITEMSIZE */
2774 r600_store_value(cb
, 0); /* R_028920_SQ_GS_VERT_ITEMSIZE_1 */
2775 r600_store_value(cb
, 0); /* R_028924_SQ_GS_VERT_ITEMSIZE_2 */
2776 r600_store_value(cb
, 0); /* R_028928_SQ_GS_VERT_ITEMSIZE_3 */
2778 r600_store_context_reg_seq(cb
, R_028A10_VGT_OUTPUT_PATH_CNTL
, 13);
2779 r600_store_value(cb
, 0); /* R_028A10_VGT_OUTPUT_PATH_CNTL */
2780 r600_store_value(cb
, 0); /* R_028A14_VGT_HOS_CNTL */
2781 r600_store_value(cb
, fui(64)); /* R_028A18_VGT_HOS_MAX_TESS_LEVEL */
2782 r600_store_value(cb
, fui(0)); /* R_028A1C_VGT_HOS_MIN_TESS_LEVEL */
2783 r600_store_value(cb
, 16); /* R_028A20_VGT_HOS_REUSE_DEPTH */
2784 r600_store_value(cb
, 0); /* R_028A24_VGT_GROUP_PRIM_TYPE */
2785 r600_store_value(cb
, 0); /* R_028A28_VGT_GROUP_FIRST_DECR */
2786 r600_store_value(cb
, 0); /* R_028A2C_VGT_GROUP_DECR */
2787 r600_store_value(cb
, 0); /* R_028A30_VGT_GROUP_VECT_0_CNTL */
2788 r600_store_value(cb
, 0); /* R_028A34_VGT_GROUP_VECT_1_CNTL */
2789 r600_store_value(cb
, 0); /* R_028A38_VGT_GROUP_VECT_0_FMT_CNTL */
2790 r600_store_value(cb
, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */
2791 r600_store_value(cb
, 0); /* R_028A40_VGT_GS_MODE */
2793 r600_store_context_reg(cb
, R_028B98_VGT_STRMOUT_BUFFER_CONFIG
, 0);
2795 r600_store_config_reg(cb
, R_008A14_PA_CL_ENHANCE
, (3 << 1) | 1);
2797 r600_store_context_reg_seq(cb
, CM_R_028BD4_PA_SC_CENTROID_PRIORITY_0
, 2);
2798 r600_store_value(cb
, 0x76543210); /* CM_R_028BD4_PA_SC_CENTROID_PRIORITY_0 */
2799 r600_store_value(cb
, 0xfedcba98); /* CM_R_028BD8_PA_SC_CENTROID_PRIORITY_1 */
2801 r600_store_context_reg(cb
, R_028724_GDS_ADDR_SIZE
, 0x3fff);
2802 r600_store_context_reg_seq(cb
, R_0288E8_SQ_LDS_ALLOC
, 2);
2803 r600_store_value(cb
, 0); /* R_0288E8_SQ_LDS_ALLOC */
2804 r600_store_value(cb
, 0); /* R_0288EC_SQ_LDS_ALLOC_PS */
2806 r600_store_context_reg(cb
, R_0288F0_SQ_VTX_SEMANTIC_CLEAR
, ~0);
2808 r600_store_context_reg_seq(cb
, R_028400_VGT_MAX_VTX_INDX
, 2);
2809 r600_store_value(cb
, ~0); /* R_028400_VGT_MAX_VTX_INDX */
2810 r600_store_value(cb
, 0); /* R_028404_VGT_MIN_VTX_INDX */
2812 r600_store_ctl_const(cb
, R_03CFF0_SQ_VTX_BASE_VTX_LOC
, 0);
2814 r600_store_context_reg(cb
, R_028028_DB_STENCIL_CLEAR
, 0);
2816 r600_store_context_reg(cb
, R_0286DC_SPI_FOG_CNTL
, 0);
2818 r600_store_context_reg_seq(cb
, R_028AC0_DB_SRESULTS_COMPARE_STATE0
, 3);
2819 r600_store_value(cb
, 0); /* R_028AC0_DB_SRESULTS_COMPARE_STATE0 */
2820 r600_store_value(cb
, 0); /* R_028AC4_DB_SRESULTS_COMPARE_STATE1 */
2821 r600_store_value(cb
, 0); /* R_028AC8_DB_PRELOAD_CONTROL */
2823 r600_store_context_reg(cb
, R_028200_PA_SC_WINDOW_OFFSET
, 0);
2824 r600_store_context_reg(cb
, R_02820C_PA_SC_CLIPRECT_RULE
, 0xFFFF);
2826 r600_store_context_reg(cb
, R_028230_PA_SC_EDGERULE
, 0xAAAAAAAA);
2827 r600_store_context_reg(cb
, R_028820_PA_CL_NANINF_CNTL
, 0);
2829 r600_store_context_reg_seq(cb
, R_028240_PA_SC_GENERIC_SCISSOR_TL
, 2);
2830 r600_store_value(cb
, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */
2831 r600_store_value(cb
, S_028244_BR_X(16384) | S_028244_BR_Y(16384)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */
2833 r600_store_context_reg_seq(cb
, R_028030_PA_SC_SCREEN_SCISSOR_TL
, 2);
2834 r600_store_value(cb
, 0); /* R_028030_PA_SC_SCREEN_SCISSOR_TL */
2835 r600_store_value(cb
, S_028034_BR_X(16384) | S_028034_BR_Y(16384)); /* R_028034_PA_SC_SCREEN_SCISSOR_BR */
2837 r600_store_context_reg(cb
, R_028848_SQ_PGM_RESOURCES_2_PS
, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN
));
2838 r600_store_context_reg(cb
, R_028864_SQ_PGM_RESOURCES_2_VS
, S_028864_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN
));
2839 r600_store_context_reg(cb
, R_02887C_SQ_PGM_RESOURCES_2_GS
, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN
));
2840 r600_store_context_reg(cb
, R_028894_SQ_PGM_RESOURCES_2_ES
, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN
));
2841 r600_store_context_reg(cb
, R_0288C0_SQ_PGM_RESOURCES_2_HS
, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN
));
2842 r600_store_context_reg(cb
, R_0288D8_SQ_PGM_RESOURCES_2_LS
, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN
));
2844 r600_store_context_reg(cb
, R_0288A8_SQ_PGM_RESOURCES_FS
, 0);
2846 /* to avoid GPU doing any preloading of constant from random address */
2847 r600_store_context_reg_seq(cb
, R_028140_ALU_CONST_BUFFER_SIZE_PS_0
, 16);
2848 for (i
= 0; i
< 16; i
++)
2849 r600_store_value(cb
, 0);
2851 r600_store_context_reg_seq(cb
, R_028180_ALU_CONST_BUFFER_SIZE_VS_0
, 16);
2852 for (i
= 0; i
< 16; i
++)
2853 r600_store_value(cb
, 0);
2855 r600_store_context_reg_seq(cb
, R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0
, 16);
2856 for (i
= 0; i
< 16; i
++)
2857 r600_store_value(cb
, 0);
2859 r600_store_context_reg_seq(cb
, R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0
, 16);
2860 for (i
= 0; i
< 16; i
++)
2861 r600_store_value(cb
, 0);
2863 r600_store_context_reg_seq(cb
, R_028F80_ALU_CONST_BUFFER_SIZE_HS_0
, 16);
2864 for (i
= 0; i
< 16; i
++)
2865 r600_store_value(cb
, 0);
2867 if (rctx
->screen
->b
.has_streamout
) {
2868 r600_store_context_reg(cb
, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET
, 0);
2871 r600_store_context_reg(cb
, R_028010_DB_RENDER_OVERRIDE2
, 0);
2872 r600_store_context_reg(cb
, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET
, 0);
2873 r600_store_context_reg(cb
, R_0286C8_SPI_THREAD_GROUPING
, 0);
2874 r600_store_context_reg_seq(cb
, R_0286E4_SPI_PS_IN_CONTROL_2
, 2);
2875 r600_store_value(cb
, 0); /* R_0286E4_SPI_PS_IN_CONTROL_2 */
2876 r600_store_value(cb
, 0); /* R_0286E8_SPI_COMPUTE_INPUT_CNTL */
2878 r600_store_context_reg_seq(cb
, R_028B54_VGT_SHADER_STAGES_EN
, 2);
2879 r600_store_value(cb
, 0); /* R028B54_VGT_SHADER_STAGES_EN */
2880 r600_store_value(cb
, 0); /* R028B58_VGT_LS_HS_CONFIG */
2881 r600_store_context_reg(cb
, R_028B6C_VGT_TF_PARAM
, 0);
2882 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
, 0x01000FFF);
2883 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
+ (32 * 4), 0x01000FFF);
2884 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
+ (64 * 4), 0x01000FFF);
2885 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
+ (96 * 4), 0x01000FFF);
2886 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
+ (128 * 4), 0x01000FFF);
2889 void evergreen_init_common_regs(struct r600_context
*rctx
, struct r600_command_buffer
*cb
,
2890 enum chip_class ctx_chip_class
,
2891 enum radeon_family ctx_family
,
2913 rctx
->default_gprs
[R600_HW_STAGE_PS
] = 93;
2914 rctx
->default_gprs
[R600_HW_STAGE_VS
] = 46;
2915 rctx
->r6xx_num_clause_temp_gprs
= 4;
2916 rctx
->default_gprs
[R600_HW_STAGE_GS
] = 31;
2917 rctx
->default_gprs
[R600_HW_STAGE_ES
] = 31;
2918 rctx
->default_gprs
[EG_HW_STAGE_HS
] = 23;
2919 rctx
->default_gprs
[EG_HW_STAGE_LS
] = 23;
2922 switch (ctx_family
) {
2930 tmp
|= S_008C00_VC_ENABLE(1);
2933 tmp
|= S_008C00_EXPORT_SRC_C(1);
2934 tmp
|= S_008C00_CS_PRIO(cs_prio
);
2935 tmp
|= S_008C00_LS_PRIO(ls_prio
);
2936 tmp
|= S_008C00_HS_PRIO(hs_prio
);
2937 tmp
|= S_008C00_PS_PRIO(ps_prio
);
2938 tmp
|= S_008C00_VS_PRIO(vs_prio
);
2939 tmp
|= S_008C00_GS_PRIO(gs_prio
);
2940 tmp
|= S_008C00_ES_PRIO(es_prio
);
2942 r600_store_config_reg_seq(cb
, R_008C00_SQ_CONFIG
, 1);
2943 r600_store_value(cb
, tmp
); /* R_008C00_SQ_CONFIG */
2945 r600_store_config_reg_seq(cb
, R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1
, 2);
2946 r600_store_value(cb
, 0); /* R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1 */
2947 r600_store_value(cb
, 0); /* R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2 */
2949 /* The cs checker requires this register to be set. */
2950 r600_store_context_reg(cb
, R_028800_DB_DEPTH_CONTROL
, 0);
2952 r600_store_context_reg_seq(cb
, R_028350_SX_MISC
, 2);
2953 r600_store_value(cb
, 0);
2954 r600_store_value(cb
, S_028354_SURFACE_SYNC_MASK(0xf));
2959 void evergreen_init_atom_start_cs(struct r600_context
*rctx
)
2961 struct r600_command_buffer
*cb
= &rctx
->start_cs_cmd
;
2969 int num_ps_stack_entries
;
2970 int num_vs_stack_entries
;
2971 int num_gs_stack_entries
;
2972 int num_es_stack_entries
;
2973 int num_hs_stack_entries
;
2974 int num_ls_stack_entries
;
2975 enum radeon_family family
;
2978 if (rctx
->b
.chip_class
== CAYMAN
) {
2979 cayman_init_atom_start_cs(rctx
);
2983 r600_init_command_buffer(cb
, 338);
2985 /* This must be first. */
2986 r600_store_value(cb
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
2987 r600_store_value(cb
, 0x80000000);
2988 r600_store_value(cb
, 0x80000000);
2990 /* We're setting config registers here. */
2991 r600_store_value(cb
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
2992 r600_store_value(cb
, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
2994 /* This enables pipeline stat & streamout queries.
2995 * They are only disabled by blits.
2997 r600_store_value(cb
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
2998 r600_store_value(cb
, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START
) | EVENT_INDEX(0));
3000 evergreen_init_common_regs(rctx
, cb
, rctx
->b
.chip_class
,
3001 rctx
->b
.family
, rctx
->screen
->b
.info
.drm_minor
);
3003 family
= rctx
->b
.family
;
3007 num_ps_threads
= 96;
3008 num_vs_threads
= 16;
3009 num_gs_threads
= 16;
3010 num_es_threads
= 16;
3011 num_hs_threads
= 16;
3012 num_ls_threads
= 16;
3013 num_ps_stack_entries
= 42;
3014 num_vs_stack_entries
= 42;
3015 num_gs_stack_entries
= 42;
3016 num_es_stack_entries
= 42;
3017 num_hs_stack_entries
= 42;
3018 num_ls_stack_entries
= 42;
3021 num_ps_threads
= 128;
3022 num_vs_threads
= 20;
3023 num_gs_threads
= 20;
3024 num_es_threads
= 20;
3025 num_hs_threads
= 20;
3026 num_ls_threads
= 20;
3027 num_ps_stack_entries
= 42;
3028 num_vs_stack_entries
= 42;
3029 num_gs_stack_entries
= 42;
3030 num_es_stack_entries
= 42;
3031 num_hs_stack_entries
= 42;
3032 num_ls_stack_entries
= 42;
3035 num_ps_threads
= 128;
3036 num_vs_threads
= 20;
3037 num_gs_threads
= 20;
3038 num_es_threads
= 20;
3039 num_hs_threads
= 20;
3040 num_ls_threads
= 20;
3041 num_ps_stack_entries
= 85;
3042 num_vs_stack_entries
= 85;
3043 num_gs_stack_entries
= 85;
3044 num_es_stack_entries
= 85;
3045 num_hs_stack_entries
= 85;
3046 num_ls_stack_entries
= 85;
3050 num_ps_threads
= 128;
3051 num_vs_threads
= 20;
3052 num_gs_threads
= 20;
3053 num_es_threads
= 20;
3054 num_hs_threads
= 20;
3055 num_ls_threads
= 20;
3056 num_ps_stack_entries
= 85;
3057 num_vs_stack_entries
= 85;
3058 num_gs_stack_entries
= 85;
3059 num_es_stack_entries
= 85;
3060 num_hs_stack_entries
= 85;
3061 num_ls_stack_entries
= 85;
3064 num_ps_threads
= 96;
3065 num_vs_threads
= 16;
3066 num_gs_threads
= 16;
3067 num_es_threads
= 16;
3068 num_hs_threads
= 16;
3069 num_ls_threads
= 16;
3070 num_ps_stack_entries
= 42;
3071 num_vs_stack_entries
= 42;
3072 num_gs_stack_entries
= 42;
3073 num_es_stack_entries
= 42;
3074 num_hs_stack_entries
= 42;
3075 num_ls_stack_entries
= 42;
3078 num_ps_threads
= 96;
3079 num_vs_threads
= 25;
3080 num_gs_threads
= 25;
3081 num_es_threads
= 25;
3082 num_hs_threads
= 16;
3083 num_ls_threads
= 16;
3084 num_ps_stack_entries
= 42;
3085 num_vs_stack_entries
= 42;
3086 num_gs_stack_entries
= 42;
3087 num_es_stack_entries
= 42;
3088 num_hs_stack_entries
= 42;
3089 num_ls_stack_entries
= 42;
3092 num_ps_threads
= 96;
3093 num_vs_threads
= 25;
3094 num_gs_threads
= 25;
3095 num_es_threads
= 25;
3096 num_hs_threads
= 16;
3097 num_ls_threads
= 16;
3098 num_ps_stack_entries
= 85;
3099 num_vs_stack_entries
= 85;
3100 num_gs_stack_entries
= 85;
3101 num_es_stack_entries
= 85;
3102 num_hs_stack_entries
= 85;
3103 num_ls_stack_entries
= 85;
3106 num_ps_threads
= 128;
3107 num_vs_threads
= 20;
3108 num_gs_threads
= 20;
3109 num_es_threads
= 20;
3110 num_hs_threads
= 20;
3111 num_ls_threads
= 20;
3112 num_ps_stack_entries
= 85;
3113 num_vs_stack_entries
= 85;
3114 num_gs_stack_entries
= 85;
3115 num_es_stack_entries
= 85;
3116 num_hs_stack_entries
= 85;
3117 num_ls_stack_entries
= 85;
3120 num_ps_threads
= 128;
3121 num_vs_threads
= 20;
3122 num_gs_threads
= 20;
3123 num_es_threads
= 20;
3124 num_hs_threads
= 20;
3125 num_ls_threads
= 20;
3126 num_ps_stack_entries
= 42;
3127 num_vs_stack_entries
= 42;
3128 num_gs_stack_entries
= 42;
3129 num_es_stack_entries
= 42;
3130 num_hs_stack_entries
= 42;
3131 num_ls_stack_entries
= 42;
3134 num_ps_threads
= 96;
3135 num_vs_threads
= 10;
3136 num_gs_threads
= 10;
3137 num_es_threads
= 10;
3138 num_hs_threads
= 10;
3139 num_ls_threads
= 10;
3140 num_ps_stack_entries
= 42;
3141 num_vs_stack_entries
= 42;
3142 num_gs_stack_entries
= 42;
3143 num_es_stack_entries
= 42;
3144 num_hs_stack_entries
= 42;
3145 num_ls_stack_entries
= 42;
3149 tmp
= S_008C18_NUM_PS_THREADS(num_ps_threads
);
3150 tmp
|= S_008C18_NUM_VS_THREADS(num_vs_threads
);
3151 tmp
|= S_008C18_NUM_GS_THREADS(num_gs_threads
);
3152 tmp
|= S_008C18_NUM_ES_THREADS(num_es_threads
);
3154 r600_store_config_reg_seq(cb
, R_008C18_SQ_THREAD_RESOURCE_MGMT_1
, 5);
3155 r600_store_value(cb
, tmp
); /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1 */
3157 tmp
= S_008C1C_NUM_HS_THREADS(num_hs_threads
);
3158 tmp
|= S_008C1C_NUM_LS_THREADS(num_ls_threads
);
3159 r600_store_value(cb
, tmp
); /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2 */
3161 tmp
= S_008C20_NUM_PS_STACK_ENTRIES(num_ps_stack_entries
);
3162 tmp
|= S_008C20_NUM_VS_STACK_ENTRIES(num_vs_stack_entries
);
3163 r600_store_value(cb
, tmp
); /* R_008C20_SQ_STACK_RESOURCE_MGMT_1 */
3165 tmp
= S_008C24_NUM_GS_STACK_ENTRIES(num_gs_stack_entries
);
3166 tmp
|= S_008C24_NUM_ES_STACK_ENTRIES(num_es_stack_entries
);
3167 r600_store_value(cb
, tmp
); /* R_008C24_SQ_STACK_RESOURCE_MGMT_2 */
3169 tmp
= S_008C28_NUM_HS_STACK_ENTRIES(num_hs_stack_entries
);
3170 tmp
|= S_008C28_NUM_LS_STACK_ENTRIES(num_ls_stack_entries
);
3171 r600_store_value(cb
, tmp
); /* R_008C28_SQ_STACK_RESOURCE_MGMT_3 */
3173 r600_store_config_reg(cb
, R_008E2C_SQ_LDS_RESOURCE_MGMT
,
3174 S_008E2C_NUM_PS_LDS(0x1000) | S_008E2C_NUM_LS_LDS(0x1000));
3176 /* remove LS/HS from one SIMD for hw workaround */
3177 r600_store_config_reg_seq(cb
, R_008E20_SQ_STATIC_THREAD_MGMT1
, 3);
3178 r600_store_value(cb
, 0xffffffff);
3179 r600_store_value(cb
, 0xffffffff);
3180 r600_store_value(cb
, 0xfffffffe);
3182 r600_store_config_reg(cb
, R_009100_SPI_CONFIG_CNTL
, 0);
3183 r600_store_config_reg(cb
, R_00913C_SPI_CONFIG_CNTL_1
, S_00913C_VTX_DONE_DELAY(4));
3185 r600_store_context_reg_seq(cb
, R_028900_SQ_ESGS_RING_ITEMSIZE
, 6);
3186 r600_store_value(cb
, 0); /* R_028900_SQ_ESGS_RING_ITEMSIZE */
3187 r600_store_value(cb
, 0); /* R_028904_SQ_GSVS_RING_ITEMSIZE */
3188 r600_store_value(cb
, 0); /* R_028908_SQ_ESTMP_RING_ITEMSIZE */
3189 r600_store_value(cb
, 0); /* R_02890C_SQ_GSTMP_RING_ITEMSIZE */
3190 r600_store_value(cb
, 0); /* R_028910_SQ_VSTMP_RING_ITEMSIZE */
3191 r600_store_value(cb
, 0); /* R_028914_SQ_PSTMP_RING_ITEMSIZE */
3193 r600_store_context_reg_seq(cb
, R_02891C_SQ_GS_VERT_ITEMSIZE
, 4);
3194 r600_store_value(cb
, 0); /* R_02891C_SQ_GS_VERT_ITEMSIZE */
3195 r600_store_value(cb
, 0); /* R_028920_SQ_GS_VERT_ITEMSIZE_1 */
3196 r600_store_value(cb
, 0); /* R_028924_SQ_GS_VERT_ITEMSIZE_2 */
3197 r600_store_value(cb
, 0); /* R_028928_SQ_GS_VERT_ITEMSIZE_3 */
3199 r600_store_context_reg_seq(cb
, R_028A10_VGT_OUTPUT_PATH_CNTL
, 13);
3200 r600_store_value(cb
, 0); /* R_028A10_VGT_OUTPUT_PATH_CNTL */
3201 r600_store_value(cb
, 0); /* R_028A14_VGT_HOS_CNTL */
3202 r600_store_value(cb
, fui(64)); /* R_028A18_VGT_HOS_MAX_TESS_LEVEL */
3203 r600_store_value(cb
, fui(1.0)); /* R_028A1C_VGT_HOS_MIN_TESS_LEVEL */
3204 r600_store_value(cb
, 16); /* R_028A20_VGT_HOS_REUSE_DEPTH */
3205 r600_store_value(cb
, 0); /* R_028A24_VGT_GROUP_PRIM_TYPE */
3206 r600_store_value(cb
, 0); /* R_028A28_VGT_GROUP_FIRST_DECR */
3207 r600_store_value(cb
, 0); /* R_028A2C_VGT_GROUP_DECR */
3208 r600_store_value(cb
, 0); /* R_028A30_VGT_GROUP_VECT_0_CNTL */
3209 r600_store_value(cb
, 0); /* R_028A34_VGT_GROUP_VECT_1_CNTL */
3210 r600_store_value(cb
, 0); /* R_028A38_VGT_GROUP_VECT_0_FMT_CNTL */
3211 r600_store_value(cb
, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */
3212 r600_store_value(cb
, 0); /* R_028A40_VGT_GS_MODE */
3214 r600_store_config_reg(cb
, R_008A14_PA_CL_ENHANCE
, (3 << 1) | 1);
3216 r600_store_context_reg(cb
, R_0288F0_SQ_VTX_SEMANTIC_CLEAR
, ~0);
3218 r600_store_context_reg_seq(cb
, R_028400_VGT_MAX_VTX_INDX
, 2);
3219 r600_store_value(cb
, ~0); /* R_028400_VGT_MAX_VTX_INDX */
3220 r600_store_value(cb
, 0); /* R_028404_VGT_MIN_VTX_INDX */
3222 r600_store_ctl_const(cb
, R_03CFF0_SQ_VTX_BASE_VTX_LOC
, 0);
3224 r600_store_context_reg(cb
, R_028028_DB_STENCIL_CLEAR
, 0);
3226 r600_store_context_reg(cb
, R_028200_PA_SC_WINDOW_OFFSET
, 0);
3227 r600_store_context_reg(cb
, R_02820C_PA_SC_CLIPRECT_RULE
, 0xFFFF);
3228 r600_store_context_reg(cb
, R_028230_PA_SC_EDGERULE
, 0xAAAAAAAA);
3230 r600_store_context_reg(cb
, R_0286DC_SPI_FOG_CNTL
, 0);
3231 r600_store_context_reg(cb
, R_028820_PA_CL_NANINF_CNTL
, 0);
3233 r600_store_context_reg_seq(cb
, R_028AC0_DB_SRESULTS_COMPARE_STATE0
, 3);
3234 r600_store_value(cb
, 0); /* R_028AC0_DB_SRESULTS_COMPARE_STATE0 */
3235 r600_store_value(cb
, 0); /* R_028AC4_DB_SRESULTS_COMPARE_STATE1 */
3236 r600_store_value(cb
, 0); /* R_028AC8_DB_PRELOAD_CONTROL */
3238 r600_store_context_reg_seq(cb
, R_028240_PA_SC_GENERIC_SCISSOR_TL
, 2);
3239 r600_store_value(cb
, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */
3240 r600_store_value(cb
, S_028244_BR_X(16384) | S_028244_BR_Y(16384)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */
3242 r600_store_context_reg_seq(cb
, R_028030_PA_SC_SCREEN_SCISSOR_TL
, 2);
3243 r600_store_value(cb
, 0); /* R_028030_PA_SC_SCREEN_SCISSOR_TL */
3244 r600_store_value(cb
, S_028034_BR_X(16384) | S_028034_BR_Y(16384)); /* R_028034_PA_SC_SCREEN_SCISSOR_BR */
3246 r600_store_context_reg(cb
, R_028848_SQ_PGM_RESOURCES_2_PS
, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN
));
3247 r600_store_context_reg(cb
, R_028864_SQ_PGM_RESOURCES_2_VS
, S_028864_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN
));
3248 r600_store_context_reg(cb
, R_02887C_SQ_PGM_RESOURCES_2_GS
, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN
));
3249 r600_store_context_reg(cb
, R_028894_SQ_PGM_RESOURCES_2_ES
, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN
));
3250 r600_store_context_reg(cb
, R_0288A8_SQ_PGM_RESOURCES_FS
, 0);
3251 r600_store_context_reg(cb
, R_0288C0_SQ_PGM_RESOURCES_2_HS
, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN
));
3252 r600_store_context_reg(cb
, R_0288D8_SQ_PGM_RESOURCES_2_LS
, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN
));
3254 /* to avoid GPU doing any preloading of constant from random address */
3255 r600_store_context_reg_seq(cb
, R_028140_ALU_CONST_BUFFER_SIZE_PS_0
, 16);
3256 for (i
= 0; i
< 16; i
++)
3257 r600_store_value(cb
, 0);
3259 r600_store_context_reg_seq(cb
, R_028180_ALU_CONST_BUFFER_SIZE_VS_0
, 16);
3260 for (i
= 0; i
< 16; i
++)
3261 r600_store_value(cb
, 0);
3263 r600_store_context_reg_seq(cb
, R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0
, 16);
3264 for (i
= 0; i
< 16; i
++)
3265 r600_store_value(cb
, 0);
3267 r600_store_context_reg_seq(cb
, R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0
, 16);
3268 for (i
= 0; i
< 16; i
++)
3269 r600_store_value(cb
, 0);
3271 r600_store_context_reg_seq(cb
, R_028F80_ALU_CONST_BUFFER_SIZE_HS_0
, 16);
3272 for (i
= 0; i
< 16; i
++)
3273 r600_store_value(cb
, 0);
3275 r600_store_context_reg(cb
, R_028B98_VGT_STRMOUT_BUFFER_CONFIG
, 0);
3277 if (rctx
->screen
->b
.has_streamout
) {
3278 r600_store_context_reg(cb
, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET
, 0);
3281 r600_store_context_reg(cb
, R_028010_DB_RENDER_OVERRIDE2
, 0);
3282 r600_store_context_reg(cb
, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET
, 0);
3283 r600_store_context_reg(cb
, R_0286C8_SPI_THREAD_GROUPING
, 0);
3284 r600_store_context_reg_seq(cb
, R_0286E4_SPI_PS_IN_CONTROL_2
, 2);
3285 r600_store_value(cb
, 0); /* R_0286E4_SPI_PS_IN_CONTROL_2 */
3286 r600_store_value(cb
, 0); /* R_0286E8_SPI_COMPUTE_INPUT_CNTL */
3288 r600_store_context_reg_seq(cb
, R_0288E8_SQ_LDS_ALLOC
, 2);
3289 r600_store_value(cb
, 0); /* R_0288E8_SQ_LDS_ALLOC */
3290 r600_store_value(cb
, 0); /* R_0288EC_SQ_LDS_ALLOC_PS */
3292 if (rctx
->b
.family
== CHIP_CAICOS
) {
3293 r600_store_context_reg_seq(cb
, R_028B54_VGT_SHADER_STAGES_EN
, 2);
3294 r600_store_value(cb
, 0); /* R028B54_VGT_SHADER_STAGES_EN */
3295 r600_store_value(cb
, 0); /* R028B58_VGT_LS_HS_CONFIG */
3296 r600_store_context_reg(cb
, R_028B6C_VGT_TF_PARAM
, 0);
3298 r600_store_context_reg_seq(cb
, R_028B54_VGT_SHADER_STAGES_EN
, 7);
3299 r600_store_value(cb
, 0); /* R028B54_VGT_SHADER_STAGES_EN */
3300 r600_store_value(cb
, 0); /* R028B58_VGT_LS_HS_CONFIG */
3301 r600_store_value(cb
, 0); /* R028B5C_VGT_LS_SIZE */
3302 r600_store_value(cb
, 0); /* R028B60_VGT_HS_SIZE */
3303 r600_store_value(cb
, 0); /* R028B64_VGT_LS_HS_ALLOC */
3304 r600_store_value(cb
, 0); /* R028B68_VGT_HS_PATCH_CONST */
3305 r600_store_value(cb
, 0); /* R028B68_VGT_TF_PARAM */
3308 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
, 0x01000FFF);
3309 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
+ (32 * 4), 0x01000FFF);
3310 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
+ (64 * 4), 0x01000FFF);
3311 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
+ (96 * 4), 0x01000FFF);
3312 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
+ (128 * 4), 0x01000FFF);
3315 void evergreen_update_ps_state(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
3317 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
3318 struct r600_command_buffer
*cb
= &shader
->command_buffer
;
3319 struct r600_shader
*rshader
= &shader
->shader
;
3320 unsigned i
, exports_ps
, num_cout
, spi_ps_in_control_0
, spi_input_z
, spi_ps_in_control_1
, db_shader_control
= 0;
3321 int pos_index
= -1, face_index
= -1, fixed_pt_position_index
= -1;
3323 boolean have_perspective
= FALSE
, have_linear
= FALSE
;
3324 static const unsigned spi_baryc_enable_bit
[6] = {
3325 S_0286E0_PERSP_SAMPLE_ENA(1),
3326 S_0286E0_PERSP_CENTER_ENA(1),
3327 S_0286E0_PERSP_CENTROID_ENA(1),
3328 S_0286E0_LINEAR_SAMPLE_ENA(1),
3329 S_0286E0_LINEAR_CENTER_ENA(1),
3330 S_0286E0_LINEAR_CENTROID_ENA(1)
3332 unsigned spi_baryc_cntl
= 0, sid
, tmp
, num
= 0;
3333 unsigned z_export
= 0, stencil_export
= 0, mask_export
= 0;
3334 unsigned sprite_coord_enable
= rctx
->rasterizer
? rctx
->rasterizer
->sprite_coord_enable
: 0;
3335 uint32_t spi_ps_input_cntl
[32];
3338 r600_init_command_buffer(cb
, 64);
3343 for (i
= 0; i
< rshader
->ninput
; i
++) {
3344 /* evergreen NUM_INTERP only contains values interpolated into the LDS,
3345 POSITION goes via GPRs from the SC so isn't counted */
3346 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_POSITION
)
3348 else if (rshader
->input
[i
].name
== TGSI_SEMANTIC_FACE
) {
3349 if (face_index
== -1)
3352 else if (rshader
->input
[i
].name
== TGSI_SEMANTIC_SAMPLEMASK
) {
3353 if (face_index
== -1)
3354 face_index
= i
; /* lives in same register, same enable bit */
3356 else if (rshader
->input
[i
].name
== TGSI_SEMANTIC_SAMPLEID
) {
3357 fixed_pt_position_index
= i
;
3361 int k
= eg_get_interpolator_index(
3362 rshader
->input
[i
].interpolate
,
3363 rshader
->input
[i
].interpolate_location
);
3365 spi_baryc_cntl
|= spi_baryc_enable_bit
[k
];
3366 have_perspective
|= k
< 3;
3367 have_linear
|= !(k
< 3);
3368 if (rshader
->input
[i
].uses_interpolate_at_centroid
) {
3369 k
= eg_get_interpolator_index(
3370 rshader
->input
[i
].interpolate
,
3371 TGSI_INTERPOLATE_LOC_CENTROID
);
3372 spi_baryc_cntl
|= spi_baryc_enable_bit
[k
];
3377 sid
= rshader
->input
[i
].spi_sid
;
3380 tmp
= S_028644_SEMANTIC(sid
);
3382 /* D3D 9 behaviour. GL is undefined */
3383 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_COLOR
&& rshader
->input
[i
].sid
== 0)
3384 tmp
|= S_028644_DEFAULT_VAL(3);
3386 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_POSITION
||
3387 rshader
->input
[i
].interpolate
== TGSI_INTERPOLATE_CONSTANT
||
3388 (rshader
->input
[i
].interpolate
== TGSI_INTERPOLATE_COLOR
&&
3389 rctx
->rasterizer
&& rctx
->rasterizer
->flatshade
)) {
3390 tmp
|= S_028644_FLAT_SHADE(1);
3393 if (rshader
->input
[i
].name
== TGSI_SEMANTIC_PCOORD
||
3394 (rshader
->input
[i
].name
== TGSI_SEMANTIC_TEXCOORD
&&
3395 (sprite_coord_enable
& (1 << rshader
->input
[i
].sid
)))) {
3396 tmp
|= S_028644_PT_SPRITE_TEX(1);
3399 spi_ps_input_cntl
[num
++] = tmp
;
3403 r600_store_context_reg_seq(cb
, R_028644_SPI_PS_INPUT_CNTL_0
, num
);
3404 r600_store_array(cb
, num
, spi_ps_input_cntl
);
3406 for (i
= 0; i
< rshader
->noutput
; i
++) {
3407 if (rshader
->output
[i
].name
== TGSI_SEMANTIC_POSITION
)
3409 if (rshader
->output
[i
].name
== TGSI_SEMANTIC_STENCIL
)
3411 if (rshader
->output
[i
].name
== TGSI_SEMANTIC_SAMPLEMASK
&&
3412 rctx
->framebuffer
.nr_samples
> 1 && rctx
->ps_iter_samples
> 0)
3415 if (rshader
->uses_kill
)
3416 db_shader_control
|= S_02880C_KILL_ENABLE(1);
3418 db_shader_control
|= S_02880C_Z_EXPORT_ENABLE(z_export
);
3419 db_shader_control
|= S_02880C_STENCIL_EXPORT_ENABLE(stencil_export
);
3420 db_shader_control
|= S_02880C_MASK_EXPORT_ENABLE(mask_export
);
3422 if (shader
->selector
->info
.properties
[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL
]) {
3423 db_shader_control
|= S_02880C_DEPTH_BEFORE_SHADER(1) |
3424 S_02880C_EXEC_ON_NOOP(shader
->selector
->info
.writes_memory
);
3425 } else if (shader
->selector
->info
.writes_memory
) {
3426 db_shader_control
|= S_02880C_EXEC_ON_HIER_FAIL(1);
3429 switch (rshader
->ps_conservative_z
) {
3430 default: /* fall through */
3431 case TGSI_FS_DEPTH_LAYOUT_ANY
:
3432 db_shader_control
|= S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_ANY_Z
);
3434 case TGSI_FS_DEPTH_LAYOUT_GREATER
:
3435 db_shader_control
|= S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_GREATER_THAN_Z
);
3437 case TGSI_FS_DEPTH_LAYOUT_LESS
:
3438 db_shader_control
|= S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_LESS_THAN_Z
);
3443 for (i
= 0; i
< rshader
->noutput
; i
++) {
3444 if (rshader
->output
[i
].name
== TGSI_SEMANTIC_POSITION
||
3445 rshader
->output
[i
].name
== TGSI_SEMANTIC_STENCIL
||
3446 rshader
->output
[i
].name
== TGSI_SEMANTIC_SAMPLEMASK
)
3450 num_cout
= rshader
->ps_export_highest
+ 1;
3452 exports_ps
|= S_02884C_EXPORT_COLORS(num_cout
);
3454 /* always at least export 1 component per pixel */
3457 shader
->nr_ps_color_outputs
= num_cout
;
3458 shader
->ps_color_export_mask
= rshader
->ps_color_export_mask
;
3461 have_perspective
= TRUE
;
3463 if (!spi_baryc_cntl
)
3464 spi_baryc_cntl
|= spi_baryc_enable_bit
[0];
3466 if (!have_perspective
&& !have_linear
)
3467 have_perspective
= TRUE
;
3469 spi_ps_in_control_0
= S_0286CC_NUM_INTERP(ninterp
) |
3470 S_0286CC_PERSP_GRADIENT_ENA(have_perspective
) |
3471 S_0286CC_LINEAR_GRADIENT_ENA(have_linear
);
3473 if (pos_index
!= -1) {
3474 spi_ps_in_control_0
|= S_0286CC_POSITION_ENA(1) |
3475 S_0286CC_POSITION_CENTROID(rshader
->input
[pos_index
].interpolate_location
== TGSI_INTERPOLATE_LOC_CENTROID
) |
3476 S_0286CC_POSITION_ADDR(rshader
->input
[pos_index
].gpr
);
3477 spi_input_z
|= S_0286D8_PROVIDE_Z_TO_SPI(1);
3480 spi_ps_in_control_1
= 0;
3481 if (face_index
!= -1) {
3482 spi_ps_in_control_1
|= S_0286D0_FRONT_FACE_ENA(1) |
3483 S_0286D0_FRONT_FACE_ADDR(rshader
->input
[face_index
].gpr
);
3485 if (fixed_pt_position_index
!= -1) {
3486 spi_ps_in_control_1
|= S_0286D0_FIXED_PT_POSITION_ENA(1) |
3487 S_0286D0_FIXED_PT_POSITION_ADDR(rshader
->input
[fixed_pt_position_index
].gpr
);
3490 r600_store_context_reg_seq(cb
, R_0286CC_SPI_PS_IN_CONTROL_0
, 2);
3491 r600_store_value(cb
, spi_ps_in_control_0
); /* R_0286CC_SPI_PS_IN_CONTROL_0 */
3492 r600_store_value(cb
, spi_ps_in_control_1
); /* R_0286D0_SPI_PS_IN_CONTROL_1 */
3494 r600_store_context_reg(cb
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
3495 r600_store_context_reg(cb
, R_0286D8_SPI_INPUT_Z
, spi_input_z
);
3496 r600_store_context_reg(cb
, R_02884C_SQ_PGM_EXPORTS_PS
, exports_ps
);
3498 r600_store_context_reg_seq(cb
, R_028840_SQ_PGM_START_PS
, 2);
3499 r600_store_value(cb
, shader
->bo
->gpu_address
>> 8);
3500 r600_store_value(cb
, /* R_028844_SQ_PGM_RESOURCES_PS */
3501 S_028844_NUM_GPRS(rshader
->bc
.ngpr
) |
3502 S_028844_PRIME_CACHE_ON_DRAW(1) |
3503 S_028844_DX10_CLAMP(1) |
3504 S_028844_STACK_SIZE(rshader
->bc
.nstack
));
3505 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
3507 shader
->db_shader_control
= db_shader_control
;
3508 shader
->ps_depth_export
= z_export
| stencil_export
| mask_export
;
3510 shader
->sprite_coord_enable
= sprite_coord_enable
;
3511 if (rctx
->rasterizer
)
3512 shader
->flatshade
= rctx
->rasterizer
->flatshade
;
3515 void evergreen_update_es_state(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
3517 struct r600_command_buffer
*cb
= &shader
->command_buffer
;
3518 struct r600_shader
*rshader
= &shader
->shader
;
3520 r600_init_command_buffer(cb
, 32);
3522 r600_store_context_reg(cb
, R_028890_SQ_PGM_RESOURCES_ES
,
3523 S_028890_NUM_GPRS(rshader
->bc
.ngpr
) |
3524 S_028890_DX10_CLAMP(1) |
3525 S_028890_STACK_SIZE(rshader
->bc
.nstack
));
3526 r600_store_context_reg(cb
, R_02888C_SQ_PGM_START_ES
,
3527 shader
->bo
->gpu_address
>> 8);
3528 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
3531 void evergreen_update_gs_state(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
3533 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
3534 struct r600_command_buffer
*cb
= &shader
->command_buffer
;
3535 struct r600_shader
*rshader
= &shader
->shader
;
3536 struct r600_shader
*cp_shader
= &shader
->gs_copy_shader
->shader
;
3537 unsigned gsvs_itemsizes
[4] = {
3538 (cp_shader
->ring_item_sizes
[0] * shader
->selector
->gs_max_out_vertices
) >> 2,
3539 (cp_shader
->ring_item_sizes
[1] * shader
->selector
->gs_max_out_vertices
) >> 2,
3540 (cp_shader
->ring_item_sizes
[2] * shader
->selector
->gs_max_out_vertices
) >> 2,
3541 (cp_shader
->ring_item_sizes
[3] * shader
->selector
->gs_max_out_vertices
) >> 2
3544 r600_init_command_buffer(cb
, 64);
3546 /* VGT_GS_MODE is written by evergreen_emit_shader_stages */
3549 r600_store_context_reg(cb
, R_028B38_VGT_GS_MAX_VERT_OUT
,
3550 S_028B38_MAX_VERT_OUT(shader
->selector
->gs_max_out_vertices
));
3551 r600_store_context_reg(cb
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
,
3552 r600_conv_prim_to_gs_out(shader
->selector
->gs_output_prim
));
3554 if (rctx
->screen
->b
.info
.drm_minor
>= 35) {
3555 r600_store_context_reg(cb
, R_028B90_VGT_GS_INSTANCE_CNT
,
3556 S_028B90_CNT(MIN2(shader
->selector
->gs_num_invocations
, 127)) |
3557 S_028B90_ENABLE(shader
->selector
->gs_num_invocations
> 0));
3559 r600_store_context_reg_seq(cb
, R_02891C_SQ_GS_VERT_ITEMSIZE
, 4);
3560 r600_store_value(cb
, cp_shader
->ring_item_sizes
[0] >> 2);
3561 r600_store_value(cb
, cp_shader
->ring_item_sizes
[1] >> 2);
3562 r600_store_value(cb
, cp_shader
->ring_item_sizes
[2] >> 2);
3563 r600_store_value(cb
, cp_shader
->ring_item_sizes
[3] >> 2);
3565 r600_store_context_reg(cb
, R_028900_SQ_ESGS_RING_ITEMSIZE
,
3566 (rshader
->ring_item_sizes
[0]) >> 2);
3568 r600_store_context_reg(cb
, R_028904_SQ_GSVS_RING_ITEMSIZE
,
3574 r600_store_context_reg_seq(cb
, R_02892C_SQ_GSVS_RING_OFFSET_1
, 3);
3575 r600_store_value(cb
, gsvs_itemsizes
[0]);
3576 r600_store_value(cb
, gsvs_itemsizes
[0] + gsvs_itemsizes
[1]);
3577 r600_store_value(cb
, gsvs_itemsizes
[0] + gsvs_itemsizes
[1] + gsvs_itemsizes
[2]);
3579 /* FIXME calculate these values somehow ??? */
3580 r600_store_context_reg_seq(cb
, R_028A54_GS_PER_ES
, 3);
3581 r600_store_value(cb
, 0x80); /* GS_PER_ES */
3582 r600_store_value(cb
, 0x100); /* ES_PER_GS */
3583 r600_store_value(cb
, 0x2); /* GS_PER_VS */
3585 r600_store_context_reg(cb
, R_028878_SQ_PGM_RESOURCES_GS
,
3586 S_028878_NUM_GPRS(rshader
->bc
.ngpr
) |
3587 S_028878_DX10_CLAMP(1) |
3588 S_028878_STACK_SIZE(rshader
->bc
.nstack
));
3589 r600_store_context_reg(cb
, R_028874_SQ_PGM_START_GS
,
3590 shader
->bo
->gpu_address
>> 8);
3591 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
3595 void evergreen_update_vs_state(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
3597 struct r600_command_buffer
*cb
= &shader
->command_buffer
;
3598 struct r600_shader
*rshader
= &shader
->shader
;
3599 unsigned spi_vs_out_id
[10] = {};
3600 unsigned i
, tmp
, nparams
= 0;
3602 for (i
= 0; i
< rshader
->noutput
; i
++) {
3603 if (rshader
->output
[i
].spi_sid
) {
3604 tmp
= rshader
->output
[i
].spi_sid
<< ((nparams
& 3) * 8);
3605 spi_vs_out_id
[nparams
/ 4] |= tmp
;
3610 r600_init_command_buffer(cb
, 32);
3612 r600_store_context_reg_seq(cb
, R_02861C_SPI_VS_OUT_ID_0
, 10);
3613 for (i
= 0; i
< 10; i
++) {
3614 r600_store_value(cb
, spi_vs_out_id
[i
]);
3617 /* Certain attributes (position, psize, etc.) don't count as params.
3618 * VS is required to export at least one param and r600_shader_from_tgsi()
3619 * takes care of adding a dummy export.
3624 r600_store_context_reg(cb
, R_0286C4_SPI_VS_OUT_CONFIG
,
3625 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
3626 r600_store_context_reg(cb
, R_028860_SQ_PGM_RESOURCES_VS
,
3627 S_028860_NUM_GPRS(rshader
->bc
.ngpr
) |
3628 S_028860_DX10_CLAMP(1) |
3629 S_028860_STACK_SIZE(rshader
->bc
.nstack
));
3630 if (rshader
->vs_position_window_space
) {
3631 r600_store_context_reg(cb
, R_028818_PA_CL_VTE_CNTL
,
3632 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
3634 r600_store_context_reg(cb
, R_028818_PA_CL_VTE_CNTL
,
3635 S_028818_VTX_W0_FMT(1) |
3636 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
3637 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
3638 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
3641 r600_store_context_reg(cb
, R_02885C_SQ_PGM_START_VS
,
3642 shader
->bo
->gpu_address
>> 8);
3643 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
3645 shader
->pa_cl_vs_out_cntl
=
3646 S_02881C_VS_OUT_CCDIST0_VEC_ENA((rshader
->cc_dist_mask
& 0x0F) != 0) |
3647 S_02881C_VS_OUT_CCDIST1_VEC_ENA((rshader
->cc_dist_mask
& 0xF0) != 0) |
3648 S_02881C_VS_OUT_MISC_VEC_ENA(rshader
->vs_out_misc_write
) |
3649 S_02881C_USE_VTX_POINT_SIZE(rshader
->vs_out_point_size
) |
3650 S_02881C_USE_VTX_EDGE_FLAG(rshader
->vs_out_edgeflag
) |
3651 S_02881C_USE_VTX_VIEWPORT_INDX(rshader
->vs_out_viewport
) |
3652 S_02881C_USE_VTX_RENDER_TARGET_INDX(rshader
->vs_out_layer
);
3655 void evergreen_update_hs_state(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
3657 struct r600_command_buffer
*cb
= &shader
->command_buffer
;
3658 struct r600_shader
*rshader
= &shader
->shader
;
3660 r600_init_command_buffer(cb
, 32);
3661 r600_store_context_reg(cb
, R_0288BC_SQ_PGM_RESOURCES_HS
,
3662 S_0288BC_NUM_GPRS(rshader
->bc
.ngpr
) |
3663 S_0288BC_DX10_CLAMP(1) |
3664 S_0288BC_STACK_SIZE(rshader
->bc
.nstack
));
3665 r600_store_context_reg(cb
, R_0288B8_SQ_PGM_START_HS
,
3666 shader
->bo
->gpu_address
>> 8);
3669 void evergreen_update_ls_state(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
)
3671 struct r600_command_buffer
*cb
= &shader
->command_buffer
;
3672 struct r600_shader
*rshader
= &shader
->shader
;
3674 r600_init_command_buffer(cb
, 32);
3675 r600_store_context_reg(cb
, R_0288D4_SQ_PGM_RESOURCES_LS
,
3676 S_0288D4_NUM_GPRS(rshader
->bc
.ngpr
) |
3677 S_0288D4_DX10_CLAMP(1) |
3678 S_0288D4_STACK_SIZE(rshader
->bc
.nstack
));
3679 r600_store_context_reg(cb
, R_0288D0_SQ_PGM_START_LS
,
3680 shader
->bo
->gpu_address
>> 8);
3682 void *evergreen_create_resolve_blend(struct r600_context
*rctx
)
3684 struct pipe_blend_state blend
;
3686 memset(&blend
, 0, sizeof(blend
));
3687 blend
.independent_blend_enable
= true;
3688 blend
.rt
[0].colormask
= 0xf;
3689 return evergreen_create_blend_state_mode(&rctx
->b
.b
, &blend
, V_028808_CB_RESOLVE
);
3692 void *evergreen_create_decompress_blend(struct r600_context
*rctx
)
3694 struct pipe_blend_state blend
;
3695 unsigned mode
= rctx
->screen
->has_compressed_msaa_texturing
?
3696 V_028808_CB_FMASK_DECOMPRESS
: V_028808_CB_DECOMPRESS
;
3698 memset(&blend
, 0, sizeof(blend
));
3699 blend
.independent_blend_enable
= true;
3700 blend
.rt
[0].colormask
= 0xf;
3701 return evergreen_create_blend_state_mode(&rctx
->b
.b
, &blend
, mode
);
3704 void *evergreen_create_fastclear_blend(struct r600_context
*rctx
)
3706 struct pipe_blend_state blend
;
3707 unsigned mode
= V_028808_CB_ELIMINATE_FAST_CLEAR
;
3709 memset(&blend
, 0, sizeof(blend
));
3710 blend
.independent_blend_enable
= true;
3711 blend
.rt
[0].colormask
= 0xf;
3712 return evergreen_create_blend_state_mode(&rctx
->b
.b
, &blend
, mode
);
3715 void *evergreen_create_db_flush_dsa(struct r600_context
*rctx
)
3717 struct pipe_depth_stencil_alpha_state dsa
= {{0}};
3719 return rctx
->b
.b
.create_depth_stencil_alpha_state(&rctx
->b
.b
, &dsa
);
3722 void evergreen_update_db_shader_control(struct r600_context
* rctx
)
3725 unsigned db_shader_control
;
3727 if (!rctx
->ps_shader
) {
3731 dual_export
= rctx
->framebuffer
.export_16bpc
&&
3732 !rctx
->ps_shader
->current
->ps_depth_export
;
3734 db_shader_control
= rctx
->ps_shader
->current
->db_shader_control
|
3735 S_02880C_DUAL_EXPORT_ENABLE(dual_export
) |
3736 S_02880C_DB_SOURCE_FORMAT(dual_export
? V_02880C_EXPORT_DB_TWO
:
3737 V_02880C_EXPORT_DB_FULL
) |
3738 S_02880C_ALPHA_TO_MASK_DISABLE(rctx
->framebuffer
.cb0_is_integer
);
3740 /* When alpha test is enabled we can't trust the hw to make the proper
3741 * decision on the order in which ztest should be run related to fragment
3744 * If alpha test is enabled perform early z rejection (RE_Z) but don't early
3745 * write to the zbuffer. Write to zbuffer is delayed after fragment shader
3746 * execution and thus after alpha test so if discarded by the alpha test
3747 * the z value is not written.
3748 * If ReZ is enabled, and the zfunc/zenable/zwrite values change you can
3749 * get a hang unless you flush the DB in between. For now just use
3752 if (rctx
->alphatest_state
.sx_alpha_test_control
|| rctx
->ps_shader
->info
.writes_memory
) {
3753 db_shader_control
|= S_02880C_Z_ORDER(V_02880C_LATE_Z
);
3755 db_shader_control
|= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z
);
3758 if (db_shader_control
!= rctx
->db_misc_state
.db_shader_control
) {
3759 rctx
->db_misc_state
.db_shader_control
= db_shader_control
;
3760 r600_mark_atom_dirty(rctx
, &rctx
->db_misc_state
.atom
);
3764 static void evergreen_dma_copy_tile(struct r600_context
*rctx
,
3765 struct pipe_resource
*dst
,
3770 struct pipe_resource
*src
,
3775 unsigned copy_height
,
3779 struct radeon_cmdbuf
*cs
= rctx
->b
.dma
.cs
;
3780 struct r600_texture
*rsrc
= (struct r600_texture
*)src
;
3781 struct r600_texture
*rdst
= (struct r600_texture
*)dst
;
3782 unsigned array_mode
, lbpp
, pitch_tile_max
, slice_tile_max
, size
;
3783 unsigned ncopy
, height
, cheight
, detile
, i
, x
, y
, z
, src_mode
, dst_mode
;
3784 unsigned sub_cmd
, bank_h
, bank_w
, mt_aspect
, nbanks
, tile_split
, non_disp_tiling
= 0;
3785 uint64_t base
, addr
;
3787 dst_mode
= rdst
->surface
.u
.legacy
.level
[dst_level
].mode
;
3788 src_mode
= rsrc
->surface
.u
.legacy
.level
[src_level
].mode
;
3789 assert(dst_mode
!= src_mode
);
3791 /* non_disp_tiling bit needs to be set for depth, stencil, and fmask surfaces */
3792 if (util_format_has_depth(util_format_description(src
->format
)))
3793 non_disp_tiling
= 1;
3796 sub_cmd
= EG_DMA_COPY_TILED
;
3797 lbpp
= util_logbase2(bpp
);
3798 pitch_tile_max
= ((pitch
/ bpp
) / 8) - 1;
3799 nbanks
= eg_num_banks(rctx
->screen
->b
.info
.r600_num_banks
);
3801 if (dst_mode
== RADEON_SURF_MODE_LINEAR_ALIGNED
) {
3803 array_mode
= evergreen_array_mode(src_mode
);
3804 slice_tile_max
= (rsrc
->surface
.u
.legacy
.level
[src_level
].nblk_x
* rsrc
->surface
.u
.legacy
.level
[src_level
].nblk_y
) / (8*8);
3805 slice_tile_max
= slice_tile_max
? slice_tile_max
- 1 : 0;
3806 /* linear height must be the same as the slice tile max height, it's ok even
3807 * if the linear destination/source have smaller heigh as the size of the
3808 * dma packet will be using the copy_height which is always smaller or equal
3809 * to the linear height
3811 height
= u_minify(rsrc
->resource
.b
.b
.height0
, src_level
);
3816 base
= rsrc
->surface
.u
.legacy
.level
[src_level
].offset
;
3817 addr
= rdst
->surface
.u
.legacy
.level
[dst_level
].offset
;
3818 addr
+= (uint64_t)rdst
->surface
.u
.legacy
.level
[dst_level
].slice_size_dw
* 4 * dst_z
;
3819 addr
+= dst_y
* pitch
+ dst_x
* bpp
;
3820 bank_h
= eg_bank_wh(rsrc
->surface
.u
.legacy
.bankh
);
3821 bank_w
= eg_bank_wh(rsrc
->surface
.u
.legacy
.bankw
);
3822 mt_aspect
= eg_macro_tile_aspect(rsrc
->surface
.u
.legacy
.mtilea
);
3823 tile_split
= eg_tile_split(rsrc
->surface
.u
.legacy
.tile_split
);
3824 base
+= rsrc
->resource
.gpu_address
;
3825 addr
+= rdst
->resource
.gpu_address
;
3828 array_mode
= evergreen_array_mode(dst_mode
);
3829 slice_tile_max
= (rdst
->surface
.u
.legacy
.level
[dst_level
].nblk_x
* rdst
->surface
.u
.legacy
.level
[dst_level
].nblk_y
) / (8*8);
3830 slice_tile_max
= slice_tile_max
? slice_tile_max
- 1 : 0;
3831 /* linear height must be the same as the slice tile max height, it's ok even
3832 * if the linear destination/source have smaller heigh as the size of the
3833 * dma packet will be using the copy_height which is always smaller or equal
3834 * to the linear height
3836 height
= u_minify(rdst
->resource
.b
.b
.height0
, dst_level
);
3841 base
= rdst
->surface
.u
.legacy
.level
[dst_level
].offset
;
3842 addr
= rsrc
->surface
.u
.legacy
.level
[src_level
].offset
;
3843 addr
+= (uint64_t)rsrc
->surface
.u
.legacy
.level
[src_level
].slice_size_dw
* 4 * src_z
;
3844 addr
+= src_y
* pitch
+ src_x
* bpp
;
3845 bank_h
= eg_bank_wh(rdst
->surface
.u
.legacy
.bankh
);
3846 bank_w
= eg_bank_wh(rdst
->surface
.u
.legacy
.bankw
);
3847 mt_aspect
= eg_macro_tile_aspect(rdst
->surface
.u
.legacy
.mtilea
);
3848 tile_split
= eg_tile_split(rdst
->surface
.u
.legacy
.tile_split
);
3849 base
+= rdst
->resource
.gpu_address
;
3850 addr
+= rsrc
->resource
.gpu_address
;
3853 size
= (copy_height
* pitch
) / 4;
3854 ncopy
= (size
/ EG_DMA_COPY_MAX_SIZE
) + !!(size
% EG_DMA_COPY_MAX_SIZE
);
3855 r600_need_dma_space(&rctx
->b
, ncopy
* 9, &rdst
->resource
, &rsrc
->resource
);
3857 for (i
= 0; i
< ncopy
; i
++) {
3858 cheight
= copy_height
;
3859 if (((cheight
* pitch
) / 4) > EG_DMA_COPY_MAX_SIZE
) {
3860 cheight
= (EG_DMA_COPY_MAX_SIZE
* 4) / pitch
;
3862 size
= (cheight
* pitch
) / 4;
3863 /* emit reloc before writing cs so that cs is always in consistent state */
3864 radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.dma
, &rsrc
->resource
,
3865 RADEON_USAGE_READ
, 0);
3866 radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.dma
, &rdst
->resource
,
3867 RADEON_USAGE_WRITE
, 0);
3868 radeon_emit(cs
, DMA_PACKET(DMA_PACKET_COPY
, sub_cmd
, size
));
3869 radeon_emit(cs
, base
>> 8);
3870 radeon_emit(cs
, (detile
<< 31) | (array_mode
<< 27) |
3871 (lbpp
<< 24) | (bank_h
<< 21) |
3872 (bank_w
<< 18) | (mt_aspect
<< 16));
3873 radeon_emit(cs
, (pitch_tile_max
<< 0) | ((height
- 1) << 16));
3874 radeon_emit(cs
, (slice_tile_max
<< 0));
3875 radeon_emit(cs
, (x
<< 0) | (z
<< 18));
3876 radeon_emit(cs
, (y
<< 0) | (tile_split
<< 21) | (nbanks
<< 25) | (non_disp_tiling
<< 28));
3877 radeon_emit(cs
, addr
& 0xfffffffc);
3878 radeon_emit(cs
, (addr
>> 32UL) & 0xff);
3879 copy_height
-= cheight
;
3880 addr
+= cheight
* pitch
;
3885 static void evergreen_dma_copy(struct pipe_context
*ctx
,
3886 struct pipe_resource
*dst
,
3888 unsigned dstx
, unsigned dsty
, unsigned dstz
,
3889 struct pipe_resource
*src
,
3891 const struct pipe_box
*src_box
)
3893 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
3894 struct r600_texture
*rsrc
= (struct r600_texture
*)src
;
3895 struct r600_texture
*rdst
= (struct r600_texture
*)dst
;
3896 unsigned dst_pitch
, src_pitch
, bpp
, dst_mode
, src_mode
, copy_height
;
3897 unsigned src_w
, dst_w
;
3898 unsigned src_x
, src_y
;
3899 unsigned dst_x
= dstx
, dst_y
= dsty
, dst_z
= dstz
;
3901 if (rctx
->b
.dma
.cs
== NULL
) {
3905 if (rctx
->cmd_buf_is_compute
) {
3906 rctx
->b
.gfx
.flush(rctx
, PIPE_FLUSH_ASYNC
, NULL
);
3907 rctx
->cmd_buf_is_compute
= false;
3910 if (dst
->target
== PIPE_BUFFER
&& src
->target
== PIPE_BUFFER
) {
3911 evergreen_dma_copy_buffer(rctx
, dst
, src
, dst_x
, src_box
->x
, src_box
->width
);
3915 if (src_box
->depth
> 1 ||
3916 !r600_prepare_for_dma_blit(&rctx
->b
, rdst
, dst_level
, dstx
, dsty
,
3917 dstz
, rsrc
, src_level
, src_box
))
3920 src_x
= util_format_get_nblocksx(src
->format
, src_box
->x
);
3921 dst_x
= util_format_get_nblocksx(src
->format
, dst_x
);
3922 src_y
= util_format_get_nblocksy(src
->format
, src_box
->y
);
3923 dst_y
= util_format_get_nblocksy(src
->format
, dst_y
);
3925 bpp
= rdst
->surface
.bpe
;
3926 dst_pitch
= rdst
->surface
.u
.legacy
.level
[dst_level
].nblk_x
* rdst
->surface
.bpe
;
3927 src_pitch
= rsrc
->surface
.u
.legacy
.level
[src_level
].nblk_x
* rsrc
->surface
.bpe
;
3928 src_w
= u_minify(rsrc
->resource
.b
.b
.width0
, src_level
);
3929 dst_w
= u_minify(rdst
->resource
.b
.b
.width0
, dst_level
);
3930 copy_height
= src_box
->height
/ rsrc
->surface
.blk_h
;
3932 dst_mode
= rdst
->surface
.u
.legacy
.level
[dst_level
].mode
;
3933 src_mode
= rsrc
->surface
.u
.legacy
.level
[src_level
].mode
;
3935 if (src_pitch
!= dst_pitch
|| src_box
->x
|| dst_x
|| src_w
!= dst_w
) {
3936 /* FIXME evergreen can do partial blit */
3939 /* the x test here are currently useless (because we don't support partial blit)
3940 * but keep them around so we don't forget about those
3942 if (src_pitch
% 8 || src_box
->x
% 8 || dst_x
% 8 || src_box
->y
% 8 || dst_y
% 8) {
3946 /* 128 bpp surfaces require non_disp_tiling for both
3947 * tiled and linear buffers on cayman. However, async
3948 * DMA only supports it on the tiled side. As such
3949 * the tile order is backwards after a L2T/T2L packet.
3951 if ((rctx
->b
.chip_class
== CAYMAN
) &&
3952 (src_mode
!= dst_mode
) &&
3953 (util_format_get_blocksize(src
->format
) >= 16)) {
3957 if (src_mode
== dst_mode
) {
3958 uint64_t dst_offset
, src_offset
;
3959 /* simple dma blit would do NOTE code here assume :
3962 * dst_pitch == src_pitch
3964 src_offset
= rsrc
->surface
.u
.legacy
.level
[src_level
].offset
;
3965 src_offset
+= (uint64_t)rsrc
->surface
.u
.legacy
.level
[src_level
].slice_size_dw
* 4 * src_box
->z
;
3966 src_offset
+= src_y
* src_pitch
+ src_x
* bpp
;
3967 dst_offset
= rdst
->surface
.u
.legacy
.level
[dst_level
].offset
;
3968 dst_offset
+= (uint64_t)rdst
->surface
.u
.legacy
.level
[dst_level
].slice_size_dw
* 4 * dst_z
;
3969 dst_offset
+= dst_y
* dst_pitch
+ dst_x
* bpp
;
3970 evergreen_dma_copy_buffer(rctx
, dst
, src
, dst_offset
, src_offset
,
3971 src_box
->height
* src_pitch
);
3973 evergreen_dma_copy_tile(rctx
, dst
, dst_level
, dst_x
, dst_y
, dst_z
,
3974 src
, src_level
, src_x
, src_y
, src_box
->z
,
3975 copy_height
, dst_pitch
, bpp
);
3980 r600_resource_copy_region(ctx
, dst
, dst_level
, dstx
, dsty
, dstz
,
3981 src
, src_level
, src_box
);
3984 static void evergreen_set_tess_state(struct pipe_context
*ctx
,
3985 const float default_outer_level
[4],
3986 const float default_inner_level
[2])
3988 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
3990 memcpy(rctx
->tess_state
, default_outer_level
, sizeof(float) * 4);
3991 memcpy(rctx
->tess_state
+4, default_inner_level
, sizeof(float) * 2);
3992 rctx
->driver_consts
[PIPE_SHADER_TESS_CTRL
].tcs_default_levels_dirty
= true;
3995 static void evergreen_setup_immed_buffer(struct r600_context
*rctx
,
3996 struct r600_image_view
*rview
,
3997 enum pipe_format pformat
)
3999 struct r600_screen
*rscreen
= (struct r600_screen
*)rctx
->b
.b
.screen
;
4000 uint32_t immed_size
= rscreen
->b
.info
.max_se
* 256 * 64 * util_format_get_blocksize(pformat
);
4001 struct eg_buf_res_params buf_params
;
4002 bool skip_reloc
= false;
4003 struct r600_resource
*resource
= (struct r600_resource
*)rview
->base
.resource
;
4004 if (!resource
->immed_buffer
) {
4005 eg_resource_alloc_immed(&rscreen
->b
, resource
, immed_size
);
4008 memset(&buf_params
, 0, sizeof(buf_params
));
4009 buf_params
.pipe_format
= pformat
;
4010 buf_params
.size
= resource
->immed_buffer
->b
.b
.width0
;
4011 buf_params
.swizzle
[0] = PIPE_SWIZZLE_X
;
4012 buf_params
.swizzle
[1] = PIPE_SWIZZLE_Y
;
4013 buf_params
.swizzle
[2] = PIPE_SWIZZLE_Z
;
4014 buf_params
.swizzle
[3] = PIPE_SWIZZLE_W
;
4015 buf_params
.uncached
= 1;
4016 evergreen_fill_buffer_resource_words(rctx
, &resource
->immed_buffer
->b
.b
,
4017 &buf_params
, &skip_reloc
,
4018 rview
->immed_resource_words
);
4021 static void evergreen_set_hw_atomic_buffers(struct pipe_context
*ctx
,
4022 unsigned start_slot
,
4024 const struct pipe_shader_buffer
*buffers
)
4026 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
4027 struct r600_atomic_buffer_state
*astate
;
4030 astate
= &rctx
->atomic_buffer_state
;
4032 /* we'd probably like to expand this to 8 later so put the logic in */
4033 for (i
= start_slot
, idx
= 0; i
< start_slot
+ count
; i
++, idx
++) {
4034 const struct pipe_shader_buffer
*buf
;
4035 struct pipe_shader_buffer
*abuf
;
4037 abuf
= &astate
->buffer
[i
];
4039 if (!buffers
|| !buffers
[idx
].buffer
) {
4040 pipe_resource_reference(&abuf
->buffer
, NULL
);
4043 buf
= &buffers
[idx
];
4045 pipe_resource_reference(&abuf
->buffer
, buf
->buffer
);
4046 abuf
->buffer_offset
= buf
->buffer_offset
;
4047 abuf
->buffer_size
= buf
->buffer_size
;
4051 static void evergreen_set_shader_buffers(struct pipe_context
*ctx
,
4052 enum pipe_shader_type shader
, unsigned start_slot
,
4054 const struct pipe_shader_buffer
*buffers
,
4055 unsigned writable_bitmask
)
4057 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
4058 struct r600_image_state
*istate
= NULL
;
4059 struct r600_image_view
*rview
;
4060 struct r600_tex_color_info color
;
4061 struct eg_buf_res_params buf_params
;
4062 struct r600_resource
*resource
;
4066 if (shader
!= PIPE_SHADER_FRAGMENT
&&
4067 shader
!= PIPE_SHADER_COMPUTE
&& count
== 0)
4070 if (shader
== PIPE_SHADER_FRAGMENT
)
4071 istate
= &rctx
->fragment_buffers
;
4072 else if (shader
== PIPE_SHADER_COMPUTE
)
4073 istate
= &rctx
->compute_buffers
;
4075 old_mask
= istate
->enabled_mask
;
4076 for (i
= start_slot
, idx
= 0; i
< start_slot
+ count
; i
++, idx
++) {
4077 const struct pipe_shader_buffer
*buf
;
4080 rview
= &istate
->views
[i
];
4082 if (!buffers
|| !buffers
[idx
].buffer
) {
4083 pipe_resource_reference((struct pipe_resource
**)&rview
->base
.resource
, NULL
);
4084 istate
->enabled_mask
&= ~(1 << i
);
4088 buf
= &buffers
[idx
];
4089 pipe_resource_reference((struct pipe_resource
**)&rview
->base
.resource
, buf
->buffer
);
4091 resource
= (struct r600_resource
*)rview
->base
.resource
;
4093 evergreen_setup_immed_buffer(rctx
, rview
, PIPE_FORMAT_R32_UINT
);
4097 evergreen_set_color_surface_buffer(rctx
, resource
,
4098 PIPE_FORMAT_R32_UINT
,
4100 buf
->buffer_offset
+ buf
->buffer_size
,
4103 res_type
= V_028C70_BUFFER
;
4105 rview
->cb_color_base
= color
.offset
;
4106 rview
->cb_color_dim
= color
.dim
;
4107 rview
->cb_color_info
= color
.info
|
4109 S_028C70_RESOURCE_TYPE(res_type
);
4110 rview
->cb_color_pitch
= color
.pitch
;
4111 rview
->cb_color_slice
= color
.slice
;
4112 rview
->cb_color_view
= color
.view
;
4113 rview
->cb_color_attrib
= color
.attrib
;
4114 rview
->cb_color_fmask
= color
.fmask
;
4115 rview
->cb_color_fmask_slice
= color
.fmask_slice
;
4117 memset(&buf_params
, 0, sizeof(buf_params
));
4118 buf_params
.pipe_format
= PIPE_FORMAT_R32_UINT
;
4119 buf_params
.offset
= buf
->buffer_offset
;
4120 buf_params
.size
= buf
->buffer_size
;
4121 buf_params
.swizzle
[0] = PIPE_SWIZZLE_X
;
4122 buf_params
.swizzle
[1] = PIPE_SWIZZLE_Y
;
4123 buf_params
.swizzle
[2] = PIPE_SWIZZLE_Z
;
4124 buf_params
.swizzle
[3] = PIPE_SWIZZLE_W
;
4125 buf_params
.force_swizzle
= true;
4126 buf_params
.uncached
= 1;
4127 buf_params
.size_in_bytes
= true;
4128 evergreen_fill_buffer_resource_words(rctx
, &resource
->b
.b
,
4130 &rview
->skip_mip_address_reloc
,
4131 rview
->resource_words
);
4133 istate
->enabled_mask
|= (1 << i
);
4136 istate
->atom
.num_dw
= util_bitcount(istate
->enabled_mask
) * 46;
4138 if (old_mask
!= istate
->enabled_mask
)
4139 r600_mark_atom_dirty(rctx
, &rctx
->framebuffer
.atom
);
4141 /* construct the target mask */
4142 if (rctx
->cb_misc_state
.buffer_rat_enabled_mask
!= istate
->enabled_mask
) {
4143 rctx
->cb_misc_state
.buffer_rat_enabled_mask
= istate
->enabled_mask
;
4144 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
4147 if (shader
== PIPE_SHADER_FRAGMENT
)
4148 r600_mark_atom_dirty(rctx
, &istate
->atom
);
4151 static void evergreen_set_shader_images(struct pipe_context
*ctx
,
4152 enum pipe_shader_type shader
, unsigned start_slot
,
4154 const struct pipe_image_view
*images
)
4156 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
4158 struct r600_image_view
*rview
;
4159 struct pipe_resource
*image
;
4160 struct r600_resource
*resource
;
4161 struct r600_tex_color_info color
;
4162 struct eg_buf_res_params buf_params
;
4163 struct eg_tex_res_params tex_params
;
4165 struct r600_image_state
*istate
= NULL
;
4167 if (shader
!= PIPE_SHADER_FRAGMENT
&& shader
!= PIPE_SHADER_COMPUTE
&& count
== 0)
4170 if (shader
== PIPE_SHADER_FRAGMENT
)
4171 istate
= &rctx
->fragment_images
;
4172 else if (shader
== PIPE_SHADER_COMPUTE
)
4173 istate
= &rctx
->compute_images
;
4175 assert (shader
== PIPE_SHADER_FRAGMENT
|| shader
== PIPE_SHADER_COMPUTE
);
4177 old_mask
= istate
->enabled_mask
;
4178 for (i
= start_slot
, idx
= 0; i
< start_slot
+ count
; i
++, idx
++) {
4180 const struct pipe_image_view
*iview
;
4181 rview
= &istate
->views
[i
];
4183 if (!images
|| !images
[idx
].resource
) {
4184 pipe_resource_reference((struct pipe_resource
**)&rview
->base
.resource
, NULL
);
4185 istate
->enabled_mask
&= ~(1 << i
);
4186 istate
->compressed_colortex_mask
&= ~(1 << i
);
4187 istate
->compressed_depthtex_mask
&= ~(1 << i
);
4191 iview
= &images
[idx
];
4192 image
= iview
->resource
;
4193 resource
= (struct r600_resource
*)image
;
4195 r600_context_add_resource_size(ctx
, image
);
4197 rview
->base
= *iview
;
4198 rview
->base
.resource
= NULL
;
4199 pipe_resource_reference((struct pipe_resource
**)&rview
->base
.resource
, image
);
4201 evergreen_setup_immed_buffer(rctx
, rview
, iview
->format
);
4203 bool is_buffer
= image
->target
== PIPE_BUFFER
;
4204 struct r600_texture
*rtex
= (struct r600_texture
*)image
;
4205 if (!is_buffer
& rtex
->db_compatible
)
4206 istate
->compressed_depthtex_mask
|= 1 << i
;
4208 istate
->compressed_depthtex_mask
&= ~(1 << i
);
4210 if (!is_buffer
&& rtex
->cmask
.size
)
4211 istate
->compressed_colortex_mask
|= 1 << i
;
4213 istate
->compressed_colortex_mask
&= ~(1 << i
);
4216 evergreen_set_color_surface_common(rctx
, rtex
,
4218 iview
->u
.tex
.first_layer
,
4219 iview
->u
.tex
.last_layer
,
4222 color
.dim
= S_028C78_WIDTH_MAX(u_minify(image
->width0
, iview
->u
.tex
.level
) - 1) |
4223 S_028C78_HEIGHT_MAX(u_minify(image
->height0
, iview
->u
.tex
.level
) - 1);
4227 evergreen_set_color_surface_buffer(rctx
, resource
,
4229 iview
->u
.buf
.offset
,
4234 switch (image
->target
) {
4236 res_type
= V_028C70_BUFFER
;
4238 case PIPE_TEXTURE_1D
:
4239 res_type
= V_028C70_TEXTURE1D
;
4241 case PIPE_TEXTURE_1D_ARRAY
:
4242 res_type
= V_028C70_TEXTURE1DARRAY
;
4244 case PIPE_TEXTURE_2D
:
4245 case PIPE_TEXTURE_RECT
:
4246 res_type
= V_028C70_TEXTURE2D
;
4248 case PIPE_TEXTURE_3D
:
4249 res_type
= V_028C70_TEXTURE3D
;
4251 case PIPE_TEXTURE_2D_ARRAY
:
4252 case PIPE_TEXTURE_CUBE
:
4253 case PIPE_TEXTURE_CUBE_ARRAY
:
4254 res_type
= V_028C70_TEXTURE2DARRAY
;
4262 rview
->cb_color_base
= color
.offset
;
4263 rview
->cb_color_dim
= color
.dim
;
4264 rview
->cb_color_info
= color
.info
|
4266 S_028C70_RESOURCE_TYPE(res_type
);
4267 rview
->cb_color_pitch
= color
.pitch
;
4268 rview
->cb_color_slice
= color
.slice
;
4269 rview
->cb_color_view
= color
.view
;
4270 rview
->cb_color_attrib
= color
.attrib
;
4271 rview
->cb_color_fmask
= color
.fmask
;
4272 rview
->cb_color_fmask_slice
= color
.fmask_slice
;
4274 if (image
->target
!= PIPE_BUFFER
) {
4275 memset(&tex_params
, 0, sizeof(tex_params
));
4276 tex_params
.pipe_format
= iview
->format
;
4277 tex_params
.force_level
= 0;
4278 tex_params
.width0
= image
->width0
;
4279 tex_params
.height0
= image
->height0
;
4280 tex_params
.first_level
= iview
->u
.tex
.level
;
4281 tex_params
.last_level
= iview
->u
.tex
.level
;
4282 tex_params
.first_layer
= iview
->u
.tex
.first_layer
;
4283 tex_params
.last_layer
= iview
->u
.tex
.last_layer
;
4284 tex_params
.target
= image
->target
;
4285 tex_params
.swizzle
[0] = PIPE_SWIZZLE_X
;
4286 tex_params
.swizzle
[1] = PIPE_SWIZZLE_Y
;
4287 tex_params
.swizzle
[2] = PIPE_SWIZZLE_Z
;
4288 tex_params
.swizzle
[3] = PIPE_SWIZZLE_W
;
4289 evergreen_fill_tex_resource_words(rctx
, &resource
->b
.b
, &tex_params
,
4290 &rview
->skip_mip_address_reloc
,
4291 rview
->resource_words
);
4294 memset(&buf_params
, 0, sizeof(buf_params
));
4295 buf_params
.pipe_format
= iview
->format
;
4296 buf_params
.size
= iview
->u
.buf
.size
;
4297 buf_params
.offset
= iview
->u
.buf
.offset
;
4298 buf_params
.swizzle
[0] = PIPE_SWIZZLE_X
;
4299 buf_params
.swizzle
[1] = PIPE_SWIZZLE_Y
;
4300 buf_params
.swizzle
[2] = PIPE_SWIZZLE_Z
;
4301 buf_params
.swizzle
[3] = PIPE_SWIZZLE_W
;
4302 evergreen_fill_buffer_resource_words(rctx
, &resource
->b
.b
,
4304 &rview
->skip_mip_address_reloc
,
4305 rview
->resource_words
);
4307 istate
->enabled_mask
|= (1 << i
);
4310 istate
->atom
.num_dw
= util_bitcount(istate
->enabled_mask
) * 46;
4311 istate
->dirty_buffer_constants
= TRUE
;
4312 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
| R600_CONTEXT_FLUSH_AND_INV
;
4313 rctx
->b
.flags
|= R600_CONTEXT_FLUSH_AND_INV_CB
|
4314 R600_CONTEXT_FLUSH_AND_INV_CB_META
;
4316 if (old_mask
!= istate
->enabled_mask
)
4317 r600_mark_atom_dirty(rctx
, &rctx
->framebuffer
.atom
);
4319 if (rctx
->cb_misc_state
.image_rat_enabled_mask
!= istate
->enabled_mask
) {
4320 rctx
->cb_misc_state
.image_rat_enabled_mask
= istate
->enabled_mask
;
4321 r600_mark_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
4324 if (shader
== PIPE_SHADER_FRAGMENT
)
4325 r600_mark_atom_dirty(rctx
, &istate
->atom
);
4328 static void evergreen_get_pipe_constant_buffer(struct r600_context
*rctx
,
4329 enum pipe_shader_type shader
, uint slot
,
4330 struct pipe_constant_buffer
*cbuf
)
4332 struct r600_constbuf_state
*state
= &rctx
->constbuf_state
[shader
];
4333 struct pipe_constant_buffer
*cb
;
4334 cbuf
->user_buffer
= NULL
;
4336 cb
= &state
->cb
[slot
];
4338 cbuf
->buffer_size
= cb
->buffer_size
;
4339 pipe_resource_reference(&cbuf
->buffer
, cb
->buffer
);
4342 static void evergreen_get_shader_buffers(struct r600_context
*rctx
,
4343 enum pipe_shader_type shader
,
4344 uint start_slot
, uint count
,
4345 struct pipe_shader_buffer
*sbuf
)
4347 assert(shader
== PIPE_SHADER_COMPUTE
);
4349 struct r600_image_state
*istate
= &rctx
->compute_buffers
;
4350 struct r600_image_view
*rview
;
4352 for (i
= start_slot
, idx
= 0; i
< start_slot
+ count
; i
++, idx
++) {
4354 rview
= &istate
->views
[i
];
4356 pipe_resource_reference(&sbuf
[idx
].buffer
, rview
->base
.resource
);
4357 if (rview
->base
.resource
) {
4358 uint64_t rview_va
= ((struct r600_resource
*)rview
->base
.resource
)->gpu_address
;
4360 uint64_t prog_va
= rview
->resource_words
[0];
4362 prog_va
+= ((uint64_t)G_030008_BASE_ADDRESS_HI(rview
->resource_words
[2])) << 32;
4363 prog_va
-= rview_va
;
4365 sbuf
[idx
].buffer_offset
= prog_va
& 0xffffffff;
4366 sbuf
[idx
].buffer_size
= rview
->resource_words
[1] + 1;;
4368 sbuf
[idx
].buffer_offset
= 0;
4369 sbuf
[idx
].buffer_size
= 0;
4374 static void evergreen_save_qbo_state(struct pipe_context
*ctx
, struct r600_qbo_state
*st
)
4376 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
4377 st
->saved_compute
= rctx
->cs_shader_state
.shader
;
4379 /* save constant buffer 0 */
4380 evergreen_get_pipe_constant_buffer(rctx
, PIPE_SHADER_COMPUTE
, 0, &st
->saved_const0
);
4382 evergreen_get_shader_buffers(rctx
, PIPE_SHADER_COMPUTE
, 0, 3, st
->saved_ssbo
);
4386 void evergreen_init_state_functions(struct r600_context
*rctx
)
4391 * To avoid GPU lockup registers must be emitted in a specific order
4392 * (no kidding ...). The order below is important and have been
4393 * partially inferred from analyzing fglrx command stream.
4395 * Don't reorder atom without carefully checking the effect (GPU lockup
4396 * or piglit regression).
4399 if (rctx
->b
.chip_class
== EVERGREEN
) {
4400 r600_init_atom(rctx
, &rctx
->config_state
.atom
, id
++, evergreen_emit_config_state
, 11);
4401 rctx
->config_state
.dyn_gpr_enabled
= true;
4403 r600_init_atom(rctx
, &rctx
->framebuffer
.atom
, id
++, evergreen_emit_framebuffer_state
, 0);
4404 r600_init_atom(rctx
, &rctx
->fragment_images
.atom
, id
++, evergreen_emit_fragment_image_state
, 0);
4405 r600_init_atom(rctx
, &rctx
->compute_images
.atom
, id
++, evergreen_emit_compute_image_state
, 0);
4406 r600_init_atom(rctx
, &rctx
->fragment_buffers
.atom
, id
++, evergreen_emit_fragment_buffer_state
, 0);
4407 r600_init_atom(rctx
, &rctx
->compute_buffers
.atom
, id
++, evergreen_emit_compute_buffer_state
, 0);
4409 r600_init_atom(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_VERTEX
].atom
, id
++, evergreen_emit_vs_constant_buffers
, 0);
4410 r600_init_atom(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_GEOMETRY
].atom
, id
++, evergreen_emit_gs_constant_buffers
, 0);
4411 r600_init_atom(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_FRAGMENT
].atom
, id
++, evergreen_emit_ps_constant_buffers
, 0);
4412 r600_init_atom(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_TESS_CTRL
].atom
, id
++, evergreen_emit_tcs_constant_buffers
, 0);
4413 r600_init_atom(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_TESS_EVAL
].atom
, id
++, evergreen_emit_tes_constant_buffers
, 0);
4414 r600_init_atom(rctx
, &rctx
->constbuf_state
[PIPE_SHADER_COMPUTE
].atom
, id
++, evergreen_emit_cs_constant_buffers
, 0);
4415 /* shader program */
4416 r600_init_atom(rctx
, &rctx
->cs_shader_state
.atom
, id
++, evergreen_emit_cs_shader
, 0);
4418 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_VERTEX
].states
.atom
, id
++, evergreen_emit_vs_sampler_states
, 0);
4419 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_GEOMETRY
].states
.atom
, id
++, evergreen_emit_gs_sampler_states
, 0);
4420 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_TESS_CTRL
].states
.atom
, id
++, evergreen_emit_tcs_sampler_states
, 0);
4421 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_TESS_EVAL
].states
.atom
, id
++, evergreen_emit_tes_sampler_states
, 0);
4422 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_FRAGMENT
].states
.atom
, id
++, evergreen_emit_ps_sampler_states
, 0);
4423 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_COMPUTE
].states
.atom
, id
++, evergreen_emit_cs_sampler_states
, 0);
4425 r600_init_atom(rctx
, &rctx
->vertex_buffer_state
.atom
, id
++, evergreen_fs_emit_vertex_buffers
, 0);
4426 r600_init_atom(rctx
, &rctx
->cs_vertex_buffer_state
.atom
, id
++, evergreen_cs_emit_vertex_buffers
, 0);
4427 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_VERTEX
].views
.atom
, id
++, evergreen_emit_vs_sampler_views
, 0);
4428 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_GEOMETRY
].views
.atom
, id
++, evergreen_emit_gs_sampler_views
, 0);
4429 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_TESS_CTRL
].views
.atom
, id
++, evergreen_emit_tcs_sampler_views
, 0);
4430 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_TESS_EVAL
].views
.atom
, id
++, evergreen_emit_tes_sampler_views
, 0);
4431 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_FRAGMENT
].views
.atom
, id
++, evergreen_emit_ps_sampler_views
, 0);
4432 r600_init_atom(rctx
, &rctx
->samplers
[PIPE_SHADER_COMPUTE
].views
.atom
, id
++, evergreen_emit_cs_sampler_views
, 0);
4434 r600_init_atom(rctx
, &rctx
->vgt_state
.atom
, id
++, r600_emit_vgt_state
, 10);
4436 if (rctx
->b
.chip_class
== EVERGREEN
) {
4437 r600_init_atom(rctx
, &rctx
->sample_mask
.atom
, id
++, evergreen_emit_sample_mask
, 3);
4439 r600_init_atom(rctx
, &rctx
->sample_mask
.atom
, id
++, cayman_emit_sample_mask
, 4);
4441 rctx
->sample_mask
.sample_mask
= ~0;
4443 r600_init_atom(rctx
, &rctx
->alphatest_state
.atom
, id
++, r600_emit_alphatest_state
, 6);
4444 r600_init_atom(rctx
, &rctx
->blend_color
.atom
, id
++, r600_emit_blend_color
, 6);
4445 r600_init_atom(rctx
, &rctx
->blend_state
.atom
, id
++, r600_emit_cso_state
, 0);
4446 r600_init_atom(rctx
, &rctx
->cb_misc_state
.atom
, id
++, evergreen_emit_cb_misc_state
, 4);
4447 r600_init_atom(rctx
, &rctx
->clip_misc_state
.atom
, id
++, r600_emit_clip_misc_state
, 9);
4448 r600_init_atom(rctx
, &rctx
->clip_state
.atom
, id
++, evergreen_emit_clip_state
, 26);
4449 r600_init_atom(rctx
, &rctx
->db_misc_state
.atom
, id
++, evergreen_emit_db_misc_state
, 10);
4450 r600_init_atom(rctx
, &rctx
->db_state
.atom
, id
++, evergreen_emit_db_state
, 14);
4451 r600_init_atom(rctx
, &rctx
->dsa_state
.atom
, id
++, r600_emit_cso_state
, 0);
4452 r600_init_atom(rctx
, &rctx
->poly_offset_state
.atom
, id
++, evergreen_emit_polygon_offset
, 9);
4453 r600_init_atom(rctx
, &rctx
->rasterizer_state
.atom
, id
++, r600_emit_cso_state
, 0);
4454 r600_add_atom(rctx
, &rctx
->b
.scissors
.atom
, id
++);
4455 r600_add_atom(rctx
, &rctx
->b
.viewports
.atom
, id
++);
4456 r600_init_atom(rctx
, &rctx
->stencil_ref
.atom
, id
++, r600_emit_stencil_ref
, 4);
4457 r600_init_atom(rctx
, &rctx
->vertex_fetch_shader
.atom
, id
++, evergreen_emit_vertex_fetch_shader
, 5);
4458 r600_add_atom(rctx
, &rctx
->b
.render_cond_atom
, id
++);
4459 r600_add_atom(rctx
, &rctx
->b
.streamout
.begin_atom
, id
++);
4460 r600_add_atom(rctx
, &rctx
->b
.streamout
.enable_atom
, id
++);
4461 for (i
= 0; i
< EG_NUM_HW_STAGES
; i
++)
4462 r600_init_atom(rctx
, &rctx
->hw_shader_stages
[i
].atom
, id
++, r600_emit_shader
, 0);
4463 r600_init_atom(rctx
, &rctx
->shader_stages
.atom
, id
++, evergreen_emit_shader_stages
, 15);
4464 r600_init_atom(rctx
, &rctx
->gs_rings
.atom
, id
++, evergreen_emit_gs_rings
, 26);
4466 rctx
->b
.b
.create_blend_state
= evergreen_create_blend_state
;
4467 rctx
->b
.b
.create_depth_stencil_alpha_state
= evergreen_create_dsa_state
;
4468 rctx
->b
.b
.create_rasterizer_state
= evergreen_create_rs_state
;
4469 rctx
->b
.b
.create_sampler_state
= evergreen_create_sampler_state
;
4470 rctx
->b
.b
.create_sampler_view
= evergreen_create_sampler_view
;
4471 rctx
->b
.b
.set_framebuffer_state
= evergreen_set_framebuffer_state
;
4472 rctx
->b
.b
.set_polygon_stipple
= evergreen_set_polygon_stipple
;
4473 rctx
->b
.b
.set_min_samples
= evergreen_set_min_samples
;
4474 rctx
->b
.b
.set_tess_state
= evergreen_set_tess_state
;
4475 rctx
->b
.b
.set_hw_atomic_buffers
= evergreen_set_hw_atomic_buffers
;
4476 rctx
->b
.b
.set_shader_images
= evergreen_set_shader_images
;
4477 rctx
->b
.b
.set_shader_buffers
= evergreen_set_shader_buffers
;
4478 if (rctx
->b
.chip_class
== EVERGREEN
)
4479 rctx
->b
.b
.get_sample_position
= evergreen_get_sample_position
;
4481 rctx
->b
.b
.get_sample_position
= cayman_get_sample_position
;
4482 rctx
->b
.dma_copy
= evergreen_dma_copy
;
4483 rctx
->b
.save_qbo_state
= evergreen_save_qbo_state
;
4485 evergreen_init_compute_state_functions(rctx
);
4489 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
4491 * The information about LDS and other non-compile-time parameters is then
4492 * written to the const buffer.
4494 * const buffer contains -
4495 * uint32_t input_patch_size
4496 * uint32_t input_vertex_size
4497 * uint32_t num_tcs_input_cp
4498 * uint32_t num_tcs_output_cp;
4499 * uint32_t output_patch_size
4500 * uint32_t output_vertex_size
4501 * uint32_t output_patch0_offset
4502 * uint32_t perpatch_output_offset
4503 * and the same constbuf is bound to LS/HS/VS(ES).
4505 void evergreen_setup_tess_constants(struct r600_context
*rctx
, const struct pipe_draw_info
*info
, unsigned *num_patches
)
4507 struct pipe_constant_buffer constbuf
= {0};
4508 struct r600_pipe_shader_selector
*tcs
= rctx
->tcs_shader
? rctx
->tcs_shader
: rctx
->tes_shader
;
4509 struct r600_pipe_shader_selector
*ls
= rctx
->vs_shader
;
4510 unsigned num_tcs_input_cp
= info
->vertices_per_patch
;
4511 unsigned num_tcs_outputs
;
4512 unsigned num_tcs_output_cp
;
4513 unsigned num_tcs_patch_outputs
;
4514 unsigned num_tcs_inputs
;
4515 unsigned input_vertex_size
, output_vertex_size
;
4516 unsigned input_patch_size
, pervertex_output_patch_size
, output_patch_size
;
4517 unsigned output_patch0_offset
, perpatch_output_offset
, lds_size
;
4520 unsigned num_pipes
= rctx
->screen
->b
.info
.r600_max_quad_pipes
;
4521 unsigned wave_divisor
= (16 * num_pipes
);
4525 if (!rctx
->tes_shader
) {
4526 rctx
->lds_alloc
= 0;
4527 rctx
->b
.b
.set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_VERTEX
,
4528 R600_LDS_INFO_CONST_BUFFER
, NULL
);
4529 rctx
->b
.b
.set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_TESS_CTRL
,
4530 R600_LDS_INFO_CONST_BUFFER
, NULL
);
4531 rctx
->b
.b
.set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_TESS_EVAL
,
4532 R600_LDS_INFO_CONST_BUFFER
, NULL
);
4536 if (rctx
->lds_alloc
!= 0 &&
4537 rctx
->last_ls
== ls
&&
4538 rctx
->last_num_tcs_input_cp
== num_tcs_input_cp
&&
4539 rctx
->last_tcs
== tcs
)
4542 num_tcs_inputs
= util_last_bit64(ls
->lds_outputs_written_mask
);
4544 if (rctx
->tcs_shader
) {
4545 num_tcs_outputs
= util_last_bit64(tcs
->lds_outputs_written_mask
);
4546 num_tcs_output_cp
= tcs
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
];
4547 num_tcs_patch_outputs
= util_last_bit64(tcs
->lds_patch_outputs_written_mask
);
4549 num_tcs_outputs
= num_tcs_inputs
;
4550 num_tcs_output_cp
= num_tcs_input_cp
;
4551 num_tcs_patch_outputs
= 2; /* TESSINNER + TESSOUTER */
4555 input_vertex_size
= num_tcs_inputs
* 16;
4556 output_vertex_size
= num_tcs_outputs
* 16;
4558 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
4560 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
4561 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
4563 output_patch0_offset
= rctx
->tcs_shader
? input_patch_size
* *num_patches
: 0;
4564 perpatch_output_offset
= output_patch0_offset
+ pervertex_output_patch_size
;
4566 lds_size
= output_patch0_offset
+ output_patch_size
* *num_patches
;
4568 values
[0] = input_patch_size
;
4569 values
[1] = input_vertex_size
;
4570 values
[2] = num_tcs_input_cp
;
4571 values
[3] = num_tcs_output_cp
;
4573 values
[4] = output_patch_size
;
4574 values
[5] = output_vertex_size
;
4575 values
[6] = output_patch0_offset
;
4576 values
[7] = perpatch_output_offset
;
4578 /* docs say HS_NUM_WAVES - CEIL((LS_HS_CONFIG.NUM_PATCHES *
4579 LS_HS_CONFIG.HS_NUM_OUTPUT_CP) / (NUM_GOOD_PIPES * 16)) */
4580 num_waves
= ceilf((float)(*num_patches
* num_tcs_output_cp
) / (float)wave_divisor
);
4582 rctx
->lds_alloc
= (lds_size
| (num_waves
<< 14));
4585 rctx
->last_tcs
= tcs
;
4586 rctx
->last_num_tcs_input_cp
= num_tcs_input_cp
;
4588 constbuf
.user_buffer
= values
;
4589 constbuf
.buffer_size
= 8 * 4;
4591 rctx
->b
.b
.set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_VERTEX
,
4592 R600_LDS_INFO_CONST_BUFFER
, &constbuf
);
4593 rctx
->b
.b
.set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_TESS_CTRL
,
4594 R600_LDS_INFO_CONST_BUFFER
, &constbuf
);
4595 rctx
->b
.b
.set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_TESS_EVAL
,
4596 R600_LDS_INFO_CONST_BUFFER
, &constbuf
);
4597 pipe_resource_reference(&constbuf
.buffer
, NULL
);
4600 uint32_t evergreen_get_ls_hs_config(struct r600_context
*rctx
,
4601 const struct pipe_draw_info
*info
,
4602 unsigned num_patches
)
4604 unsigned num_output_cp
;
4606 if (!rctx
->tes_shader
)
4609 num_output_cp
= rctx
->tcs_shader
?
4610 rctx
->tcs_shader
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
] :
4611 info
->vertices_per_patch
;
4613 return S_028B58_NUM_PATCHES(num_patches
) |
4614 S_028B58_HS_NUM_INPUT_CP(info
->vertices_per_patch
) |
4615 S_028B58_HS_NUM_OUTPUT_CP(num_output_cp
);
4618 void evergreen_set_ls_hs_config(struct r600_context
*rctx
,
4619 struct radeon_cmdbuf
*cs
,
4620 uint32_t ls_hs_config
)
4622 radeon_set_context_reg(cs
, R_028B58_VGT_LS_HS_CONFIG
, ls_hs_config
);
4625 void evergreen_set_lds_alloc(struct r600_context
*rctx
,
4626 struct radeon_cmdbuf
*cs
,
4629 radeon_set_context_reg(cs
, R_0288E8_SQ_LDS_ALLOC
, lds_alloc
);
4632 /* on evergreen if you are running tessellation you need to disable dynamic
4633 GPRs to workaround a hardware bug.*/
4634 bool evergreen_adjust_gprs(struct r600_context
*rctx
)
4636 unsigned num_gprs
[EG_NUM_HW_STAGES
];
4637 unsigned def_gprs
[EG_NUM_HW_STAGES
];
4638 unsigned cur_gprs
[EG_NUM_HW_STAGES
];
4639 unsigned new_gprs
[EG_NUM_HW_STAGES
];
4640 unsigned def_num_clause_temp_gprs
= rctx
->r6xx_num_clause_temp_gprs
;
4643 unsigned total_gprs
;
4645 bool rework
= false, set_default
= false, set_dirty
= false;
4647 for (i
= 0; i
< EG_NUM_HW_STAGES
; i
++) {
4648 def_gprs
[i
] = rctx
->default_gprs
[i
];
4649 max_gprs
+= def_gprs
[i
];
4651 max_gprs
+= def_num_clause_temp_gprs
* 2;
4653 /* if we have no TESS and dyn gpr is enabled then do nothing. */
4654 if (!rctx
->hw_shader_stages
[EG_HW_STAGE_HS
].shader
) {
4655 if (rctx
->config_state
.dyn_gpr_enabled
)
4658 /* transition back to dyn gpr enabled state */
4659 rctx
->config_state
.dyn_gpr_enabled
= true;
4660 r600_mark_atom_dirty(rctx
, &rctx
->config_state
.atom
);
4661 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
4666 /* gather required shader gprs */
4667 for (i
= 0; i
< EG_NUM_HW_STAGES
; i
++) {
4668 if (rctx
->hw_shader_stages
[i
].shader
)
4669 num_gprs
[i
] = rctx
->hw_shader_stages
[i
].shader
->shader
.bc
.ngpr
;
4674 cur_gprs
[R600_HW_STAGE_PS
] = G_008C04_NUM_PS_GPRS(rctx
->config_state
.sq_gpr_resource_mgmt_1
);
4675 cur_gprs
[R600_HW_STAGE_VS
] = G_008C04_NUM_VS_GPRS(rctx
->config_state
.sq_gpr_resource_mgmt_1
);
4676 cur_gprs
[R600_HW_STAGE_GS
] = G_008C08_NUM_GS_GPRS(rctx
->config_state
.sq_gpr_resource_mgmt_2
);
4677 cur_gprs
[R600_HW_STAGE_ES
] = G_008C08_NUM_ES_GPRS(rctx
->config_state
.sq_gpr_resource_mgmt_2
);
4678 cur_gprs
[EG_HW_STAGE_LS
] = G_008C0C_NUM_LS_GPRS(rctx
->config_state
.sq_gpr_resource_mgmt_3
);
4679 cur_gprs
[EG_HW_STAGE_HS
] = G_008C0C_NUM_HS_GPRS(rctx
->config_state
.sq_gpr_resource_mgmt_3
);
4682 for (i
= 0; i
< EG_NUM_HW_STAGES
; i
++) {
4683 new_gprs
[i
] = num_gprs
[i
];
4684 total_gprs
+= num_gprs
[i
];
4687 if (total_gprs
> (max_gprs
- (2 * def_num_clause_temp_gprs
)))
4690 for (i
= 0; i
< EG_NUM_HW_STAGES
; i
++) {
4691 if (new_gprs
[i
] > cur_gprs
[i
]) {
4697 if (rctx
->config_state
.dyn_gpr_enabled
) {
4699 rctx
->config_state
.dyn_gpr_enabled
= false;
4704 for (i
= 0; i
< EG_NUM_HW_STAGES
; i
++) {
4705 if (new_gprs
[i
] > def_gprs
[i
])
4706 set_default
= false;
4710 for (i
= 0; i
< EG_NUM_HW_STAGES
; i
++) {
4711 new_gprs
[i
] = def_gprs
[i
];
4714 unsigned ps_value
= max_gprs
;
4716 ps_value
-= (def_num_clause_temp_gprs
* 2);
4717 for (i
= R600_HW_STAGE_VS
; i
< EG_NUM_HW_STAGES
; i
++)
4718 ps_value
-= new_gprs
[i
];
4720 new_gprs
[R600_HW_STAGE_PS
] = ps_value
;
4723 tmp
[0] = S_008C04_NUM_PS_GPRS(new_gprs
[R600_HW_STAGE_PS
]) |
4724 S_008C04_NUM_VS_GPRS(new_gprs
[R600_HW_STAGE_VS
]) |
4725 S_008C04_NUM_CLAUSE_TEMP_GPRS(def_num_clause_temp_gprs
);
4727 tmp
[1] = S_008C08_NUM_ES_GPRS(new_gprs
[R600_HW_STAGE_ES
]) |
4728 S_008C08_NUM_GS_GPRS(new_gprs
[R600_HW_STAGE_GS
]);
4730 tmp
[2] = S_008C0C_NUM_HS_GPRS(new_gprs
[EG_HW_STAGE_HS
]) |
4731 S_008C0C_NUM_LS_GPRS(new_gprs
[EG_HW_STAGE_LS
]);
4733 if (rctx
->config_state
.sq_gpr_resource_mgmt_1
!= tmp
[0] ||
4734 rctx
->config_state
.sq_gpr_resource_mgmt_2
!= tmp
[1] ||
4735 rctx
->config_state
.sq_gpr_resource_mgmt_3
!= tmp
[2]) {
4736 rctx
->config_state
.sq_gpr_resource_mgmt_1
= tmp
[0];
4737 rctx
->config_state
.sq_gpr_resource_mgmt_2
= tmp
[1];
4738 rctx
->config_state
.sq_gpr_resource_mgmt_3
= tmp
[2];
4745 r600_mark_atom_dirty(rctx
, &rctx
->config_state
.atom
);
4746 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
4751 #define AC_ENCODE_TRACE_POINT(id) (0xcafe0000 | ((id) & 0xffff))
4753 void eg_trace_emit(struct r600_context
*rctx
)
4755 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
4758 if (rctx
->b
.chip_class
< EVERGREEN
)
4761 /* This must be done after r600_need_cs_space. */
4762 reloc
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
4763 (struct r600_resource
*)rctx
->trace_buf
, RADEON_USAGE_WRITE
,
4764 RADEON_PRIO_CP_DMA
);
4767 radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
, rctx
->trace_buf
,
4768 RADEON_USAGE_READWRITE
, RADEON_PRIO_TRACE
);
4769 radeon_emit(cs
, PKT3(PKT3_MEM_WRITE
, 3, 0));
4770 radeon_emit(cs
, rctx
->trace_buf
->gpu_address
);
4771 radeon_emit(cs
, rctx
->trace_buf
->gpu_address
>> 32 | MEM_WRITE_32_BITS
| MEM_WRITE_CONFIRM
);
4772 radeon_emit(cs
, rctx
->trace_id
);
4774 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
4775 radeon_emit(cs
, reloc
);
4776 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
4777 radeon_emit(cs
, AC_ENCODE_TRACE_POINT(rctx
->trace_id
));
4780 static void evergreen_emit_set_append_cnt(struct r600_context
*rctx
,
4781 struct r600_shader_atomic
*atomic
,
4782 struct r600_resource
*resource
,
4785 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
4786 unsigned reloc
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
4789 RADEON_PRIO_SHADER_RW_BUFFER
);
4790 uint64_t dst_offset
= resource
->gpu_address
+ (atomic
->start
* 4);
4791 uint32_t base_reg_0
= R_02872C_GDS_APPEND_COUNT_0
;
4793 uint32_t reg_val
= (base_reg_0
+ atomic
->hw_idx
* 4 - EVERGREEN_CONTEXT_REG_OFFSET
) >> 2;
4795 radeon_emit(cs
, PKT3(PKT3_SET_APPEND_CNT
, 2, 0) | pkt_flags
);
4796 radeon_emit(cs
, (reg_val
<< 16) | 0x3);
4797 radeon_emit(cs
, dst_offset
& 0xfffffffc);
4798 radeon_emit(cs
, (dst_offset
>> 32) & 0xff);
4799 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
4800 radeon_emit(cs
, reloc
);
4803 static void evergreen_emit_event_write_eos(struct r600_context
*rctx
,
4804 struct r600_shader_atomic
*atomic
,
4805 struct r600_resource
*resource
,
4808 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
4809 uint32_t event
= EVENT_TYPE_PS_DONE
;
4810 uint32_t base_reg_0
= R_02872C_GDS_APPEND_COUNT_0
;
4811 uint32_t reloc
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
4814 RADEON_PRIO_SHADER_RW_BUFFER
);
4815 uint64_t dst_offset
= resource
->gpu_address
+ (atomic
->start
* 4);
4816 uint32_t reg_val
= (base_reg_0
+ atomic
->hw_idx
* 4) >> 2;
4818 if (pkt_flags
== RADEON_CP_PACKET3_COMPUTE_MODE
)
4819 event
= EVENT_TYPE_CS_DONE
;
4821 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOS
, 3, 0) | pkt_flags
);
4822 radeon_emit(cs
, EVENT_TYPE(event
) | EVENT_INDEX(6));
4823 radeon_emit(cs
, (dst_offset
) & 0xffffffff);
4824 radeon_emit(cs
, (0 << 29) | ((dst_offset
>> 32) & 0xff));
4825 radeon_emit(cs
, reg_val
);
4826 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
4827 radeon_emit(cs
, reloc
);
4830 static void cayman_emit_event_write_eos(struct r600_context
*rctx
,
4831 struct r600_shader_atomic
*atomic
,
4832 struct r600_resource
*resource
,
4835 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
4836 uint32_t event
= EVENT_TYPE_PS_DONE
;
4837 uint32_t reloc
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
4840 RADEON_PRIO_SHADER_RW_BUFFER
);
4841 uint64_t dst_offset
= resource
->gpu_address
+ (atomic
->start
* 4);
4843 if (pkt_flags
== RADEON_CP_PACKET3_COMPUTE_MODE
)
4844 event
= EVENT_TYPE_CS_DONE
;
4846 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOS
, 3, 0) | pkt_flags
);
4847 radeon_emit(cs
, EVENT_TYPE(event
) | EVENT_INDEX(6));
4848 radeon_emit(cs
, (dst_offset
) & 0xffffffff);
4849 radeon_emit(cs
, (1 << 29) | ((dst_offset
>> 32) & 0xff));
4850 radeon_emit(cs
, (atomic
->hw_idx
) | (1 << 16));
4851 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
4852 radeon_emit(cs
, reloc
);
4855 /* writes count from a buffer into GDS */
4856 static void cayman_write_count_to_gds(struct r600_context
*rctx
,
4857 struct r600_shader_atomic
*atomic
,
4858 struct r600_resource
*resource
,
4861 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
4862 unsigned reloc
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
4865 RADEON_PRIO_SHADER_RW_BUFFER
);
4866 uint64_t dst_offset
= resource
->gpu_address
+ (atomic
->start
* 4);
4868 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, 0) | pkt_flags
);
4869 radeon_emit(cs
, dst_offset
& 0xffffffff);
4870 radeon_emit(cs
, PKT3_CP_DMA_CP_SYNC
| PKT3_CP_DMA_DST_SEL(1) | ((dst_offset
>> 32) & 0xff));// GDS
4871 radeon_emit(cs
, atomic
->hw_idx
* 4);
4873 radeon_emit(cs
, PKT3_CP_DMA_CMD_DAS
| 4);
4874 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
4875 radeon_emit(cs
, reloc
);
4878 void evergreen_emit_atomic_buffer_setup_count(struct r600_context
*rctx
,
4879 struct r600_pipe_shader
*cs_shader
,
4880 struct r600_shader_atomic
*combined_atomics
,
4881 uint8_t *atomic_used_mask_p
)
4883 uint8_t atomic_used_mask
= 0;
4885 bool is_compute
= cs_shader
? true : false;
4887 for (i
= 0; i
< (is_compute
? 1 : EG_NUM_HW_STAGES
); i
++) {
4888 uint8_t num_atomic_stage
;
4889 struct r600_pipe_shader
*pshader
;
4892 pshader
= cs_shader
;
4894 pshader
= rctx
->hw_shader_stages
[i
].shader
;
4898 num_atomic_stage
= pshader
->shader
.nhwatomic_ranges
;
4899 if (!num_atomic_stage
)
4902 for (j
= 0; j
< num_atomic_stage
; j
++) {
4903 struct r600_shader_atomic
*atomic
= &pshader
->shader
.atomics
[j
];
4904 int natomics
= atomic
->end
- atomic
->start
+ 1;
4906 for (k
= 0; k
< natomics
; k
++) {
4907 /* seen this in a previous stage */
4908 if (atomic_used_mask
& (1u << (atomic
->hw_idx
+ k
)))
4911 combined_atomics
[atomic
->hw_idx
+ k
].hw_idx
= atomic
->hw_idx
+ k
;
4912 combined_atomics
[atomic
->hw_idx
+ k
].buffer_id
= atomic
->buffer_id
;
4913 combined_atomics
[atomic
->hw_idx
+ k
].start
= atomic
->start
+ k
;
4914 combined_atomics
[atomic
->hw_idx
+ k
].end
= combined_atomics
[atomic
->hw_idx
+ k
].start
+ 1;
4915 atomic_used_mask
|= (1u << (atomic
->hw_idx
+ k
));
4919 *atomic_used_mask_p
= atomic_used_mask
;
4922 void evergreen_emit_atomic_buffer_setup(struct r600_context
*rctx
,
4924 struct r600_shader_atomic
*combined_atomics
,
4925 uint8_t atomic_used_mask
)
4927 struct r600_atomic_buffer_state
*astate
= &rctx
->atomic_buffer_state
;
4928 unsigned pkt_flags
= 0;
4932 pkt_flags
= RADEON_CP_PACKET3_COMPUTE_MODE
;
4934 mask
= atomic_used_mask
;
4939 unsigned atomic_index
= u_bit_scan(&mask
);
4940 struct r600_shader_atomic
*atomic
= &combined_atomics
[atomic_index
];
4941 struct r600_resource
*resource
= r600_resource(astate
->buffer
[atomic
->buffer_id
].buffer
);
4944 if (rctx
->b
.chip_class
== CAYMAN
)
4945 cayman_write_count_to_gds(rctx
, atomic
, resource
, pkt_flags
);
4947 evergreen_emit_set_append_cnt(rctx
, atomic
, resource
, pkt_flags
);
4951 void evergreen_emit_atomic_buffer_save(struct r600_context
*rctx
,
4953 struct r600_shader_atomic
*combined_atomics
,
4954 uint8_t *atomic_used_mask_p
)
4956 struct radeon_cmdbuf
*cs
= rctx
->b
.gfx
.cs
;
4957 struct r600_atomic_buffer_state
*astate
= &rctx
->atomic_buffer_state
;
4958 uint32_t pkt_flags
= 0;
4959 uint32_t event
= EVENT_TYPE_PS_DONE
;
4961 uint64_t dst_offset
;
4965 pkt_flags
= RADEON_CP_PACKET3_COMPUTE_MODE
;
4967 mask
= *atomic_used_mask_p
;
4972 unsigned atomic_index
= u_bit_scan(&mask
);
4973 struct r600_shader_atomic
*atomic
= &combined_atomics
[atomic_index
];
4974 struct r600_resource
*resource
= r600_resource(astate
->buffer
[atomic
->buffer_id
].buffer
);
4977 if (rctx
->b
.chip_class
== CAYMAN
)
4978 cayman_emit_event_write_eos(rctx
, atomic
, resource
, pkt_flags
);
4980 evergreen_emit_event_write_eos(rctx
, atomic
, resource
, pkt_flags
);
4983 if (pkt_flags
== RADEON_CP_PACKET3_COMPUTE_MODE
)
4984 event
= EVENT_TYPE_CS_DONE
;
4986 ++rctx
->append_fence_id
;
4987 reloc
= radeon_add_to_buffer_list(&rctx
->b
, &rctx
->b
.gfx
,
4988 r600_resource(rctx
->append_fence
),
4989 RADEON_USAGE_READWRITE
,
4990 RADEON_PRIO_SHADER_RW_BUFFER
);
4991 dst_offset
= r600_resource(rctx
->append_fence
)->gpu_address
;
4992 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOS
, 3, 0) | pkt_flags
);
4993 radeon_emit(cs
, EVENT_TYPE(event
) | EVENT_INDEX(6));
4994 radeon_emit(cs
, dst_offset
& 0xffffffff);
4995 radeon_emit(cs
, (2 << 29) | ((dst_offset
>> 32) & 0xff));
4996 radeon_emit(cs
, rctx
->append_fence_id
);
4997 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
4998 radeon_emit(cs
, reloc
);
5000 radeon_emit(cs
, PKT3(PKT3_WAIT_REG_MEM
, 5, 0) | pkt_flags
);
5001 radeon_emit(cs
, WAIT_REG_MEM_GEQUAL
| WAIT_REG_MEM_MEMORY
| (1 << 8));
5002 radeon_emit(cs
, dst_offset
& 0xffffffff);
5003 radeon_emit(cs
, ((dst_offset
>> 32) & 0xff));
5004 radeon_emit(cs
, rctx
->append_fence_id
);
5005 radeon_emit(cs
, 0xffffffff);
5006 radeon_emit(cs
, 0xa);
5007 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
5008 radeon_emit(cs
, reloc
);