r300g: rework vertex format fallback
[mesa.git] / src / gallium / drivers / r300 / r300_state.c
1 /*
2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright 2009 Marek Olšák <maraeo@gmail.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
23
24 #include "draw/draw_context.h"
25
26 #include "util/u_framebuffer.h"
27 #include "util/u_math.h"
28 #include "util/u_mm.h"
29 #include "util/u_memory.h"
30 #include "util/u_pack_color.h"
31
32 #include "tgsi/tgsi_parse.h"
33
34 #include "pipe/p_config.h"
35
36 #include "r300_cb.h"
37 #include "r300_context.h"
38 #include "r300_emit.h"
39 #include "r300_reg.h"
40 #include "r300_screen.h"
41 #include "r300_screen_buffer.h"
42 #include "r300_state_inlines.h"
43 #include "r300_fs.h"
44 #include "r300_texture.h"
45 #include "r300_vs.h"
46 #include "r300_winsys.h"
47 #include "r300_hyperz.h"
48
49 /* r300_state: Functions used to intialize state context by translating
50 * Gallium state objects into semi-native r300 state objects. */
51
52 #define UPDATE_STATE(cso, atom) \
53 if (cso != atom.state) { \
54 atom.state = cso; \
55 r300_mark_atom_dirty(r300, &(atom)); \
56 }
57
58 static boolean blend_discard_if_src_alpha_0(unsigned srcRGB, unsigned srcA,
59 unsigned dstRGB, unsigned dstA)
60 {
61 /* If the blend equation is ADD or REVERSE_SUBTRACT,
62 * SRC_ALPHA == 0, and the following state is set, the colorbuffer
63 * will not be changed.
64 * Notice that the dst factors are the src factors inverted. */
65 return (srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
66 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
67 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
68 (srcA == PIPE_BLENDFACTOR_SRC_COLOR ||
69 srcA == PIPE_BLENDFACTOR_SRC_ALPHA ||
70 srcA == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
71 srcA == PIPE_BLENDFACTOR_ZERO) &&
72 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
73 dstRGB == PIPE_BLENDFACTOR_ONE) &&
74 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
75 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
76 dstA == PIPE_BLENDFACTOR_ONE);
77 }
78
79 static boolean blend_discard_if_src_alpha_1(unsigned srcRGB, unsigned srcA,
80 unsigned dstRGB, unsigned dstA)
81 {
82 /* If the blend equation is ADD or REVERSE_SUBTRACT,
83 * SRC_ALPHA == 1, and the following state is set, the colorbuffer
84 * will not be changed.
85 * Notice that the dst factors are the src factors inverted. */
86 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
87 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
88 (srcA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
89 srcA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
90 srcA == PIPE_BLENDFACTOR_ZERO) &&
91 (dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
92 dstRGB == PIPE_BLENDFACTOR_ONE) &&
93 (dstA == PIPE_BLENDFACTOR_SRC_COLOR ||
94 dstA == PIPE_BLENDFACTOR_SRC_ALPHA ||
95 dstA == PIPE_BLENDFACTOR_ONE);
96 }
97
98 static boolean blend_discard_if_src_color_0(unsigned srcRGB, unsigned srcA,
99 unsigned dstRGB, unsigned dstA)
100 {
101 /* If the blend equation is ADD or REVERSE_SUBTRACT,
102 * SRC_COLOR == (0,0,0), and the following state is set, the colorbuffer
103 * will not be changed.
104 * Notice that the dst factors are the src factors inverted. */
105 return (srcRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
106 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
107 (srcA == PIPE_BLENDFACTOR_ZERO) &&
108 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
109 dstRGB == PIPE_BLENDFACTOR_ONE) &&
110 (dstA == PIPE_BLENDFACTOR_ONE);
111 }
112
113 static boolean blend_discard_if_src_color_1(unsigned srcRGB, unsigned srcA,
114 unsigned dstRGB, unsigned dstA)
115 {
116 /* If the blend equation is ADD or REVERSE_SUBTRACT,
117 * SRC_COLOR == (1,1,1), and the following state is set, the colorbuffer
118 * will not be changed.
119 * Notice that the dst factors are the src factors inverted. */
120 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
121 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
122 (srcA == PIPE_BLENDFACTOR_ZERO) &&
123 (dstRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
124 dstRGB == PIPE_BLENDFACTOR_ONE) &&
125 (dstA == PIPE_BLENDFACTOR_ONE);
126 }
127
128 static boolean blend_discard_if_src_alpha_color_0(unsigned srcRGB, unsigned srcA,
129 unsigned dstRGB, unsigned dstA)
130 {
131 /* If the blend equation is ADD or REVERSE_SUBTRACT,
132 * SRC_ALPHA_COLOR == (0,0,0,0), and the following state is set,
133 * the colorbuffer will not be changed.
134 * Notice that the dst factors are the src factors inverted. */
135 return (srcRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
136 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
137 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
138 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
139 (srcA == PIPE_BLENDFACTOR_SRC_COLOR ||
140 srcA == PIPE_BLENDFACTOR_SRC_ALPHA ||
141 srcA == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
142 srcA == PIPE_BLENDFACTOR_ZERO) &&
143 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
144 dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
145 dstRGB == PIPE_BLENDFACTOR_ONE) &&
146 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
147 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
148 dstA == PIPE_BLENDFACTOR_ONE);
149 }
150
151 static boolean blend_discard_if_src_alpha_color_1(unsigned srcRGB, unsigned srcA,
152 unsigned dstRGB, unsigned dstA)
153 {
154 /* If the blend equation is ADD or REVERSE_SUBTRACT,
155 * SRC_ALPHA_COLOR == (1,1,1,1), and the following state is set,
156 * the colorbuffer will not be changed.
157 * Notice that the dst factors are the src factors inverted. */
158 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
159 srcRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
160 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
161 (srcA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
162 srcA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
163 srcA == PIPE_BLENDFACTOR_ZERO) &&
164 (dstRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
165 dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
166 dstRGB == PIPE_BLENDFACTOR_ONE) &&
167 (dstA == PIPE_BLENDFACTOR_SRC_COLOR ||
168 dstA == PIPE_BLENDFACTOR_SRC_ALPHA ||
169 dstA == PIPE_BLENDFACTOR_ONE);
170 }
171
172 static unsigned bgra_cmask(unsigned mask)
173 {
174 /* Gallium uses RGBA color ordering while R300 expects BGRA. */
175
176 return ((mask & PIPE_MASK_R) << 2) |
177 ((mask & PIPE_MASK_B) >> 2) |
178 (mask & (PIPE_MASK_G | PIPE_MASK_A));
179 }
180
181 /* Create a new blend state based on the CSO blend state.
182 *
183 * This encompasses alpha blending, logic/raster ops, and blend dithering. */
184 static void* r300_create_blend_state(struct pipe_context* pipe,
185 const struct pipe_blend_state* state)
186 {
187 struct r300_screen* r300screen = r300_screen(pipe->screen);
188 struct r300_blend_state* blend = CALLOC_STRUCT(r300_blend_state);
189 uint32_t blend_control = 0; /* R300_RB3D_CBLEND: 0x4e04 */
190 uint32_t alpha_blend_control = 0; /* R300_RB3D_ABLEND: 0x4e08 */
191 uint32_t color_channel_mask = 0; /* R300_RB3D_COLOR_CHANNEL_MASK: 0x4e0c */
192 uint32_t rop = 0; /* R300_RB3D_ROPCNTL: 0x4e18 */
193 uint32_t dither = 0; /* R300_RB3D_DITHER_CTL: 0x4e50 */
194 CB_LOCALS;
195
196 if (state->rt[0].blend_enable)
197 {
198 unsigned eqRGB = state->rt[0].rgb_func;
199 unsigned srcRGB = state->rt[0].rgb_src_factor;
200 unsigned dstRGB = state->rt[0].rgb_dst_factor;
201
202 unsigned eqA = state->rt[0].alpha_func;
203 unsigned srcA = state->rt[0].alpha_src_factor;
204 unsigned dstA = state->rt[0].alpha_dst_factor;
205
206 /* despite the name, ALPHA_BLEND_ENABLE has nothing to do with alpha,
207 * this is just the crappy D3D naming */
208 blend_control = R300_ALPHA_BLEND_ENABLE |
209 r300_translate_blend_function(eqRGB) |
210 ( r300_translate_blend_factor(srcRGB) << R300_SRC_BLEND_SHIFT) |
211 ( r300_translate_blend_factor(dstRGB) << R300_DST_BLEND_SHIFT);
212
213 /* Optimization: some operations do not require the destination color.
214 *
215 * When SRC_ALPHA_SATURATE is used, colorbuffer reads must be enabled,
216 * otherwise blending gives incorrect results. It seems to be
217 * a hardware bug. */
218 if (eqRGB == PIPE_BLEND_MIN || eqA == PIPE_BLEND_MIN ||
219 eqRGB == PIPE_BLEND_MAX || eqA == PIPE_BLEND_MAX ||
220 dstRGB != PIPE_BLENDFACTOR_ZERO ||
221 dstA != PIPE_BLENDFACTOR_ZERO ||
222 srcRGB == PIPE_BLENDFACTOR_DST_COLOR ||
223 srcRGB == PIPE_BLENDFACTOR_DST_ALPHA ||
224 srcRGB == PIPE_BLENDFACTOR_INV_DST_COLOR ||
225 srcRGB == PIPE_BLENDFACTOR_INV_DST_ALPHA ||
226 srcA == PIPE_BLENDFACTOR_DST_COLOR ||
227 srcA == PIPE_BLENDFACTOR_DST_ALPHA ||
228 srcA == PIPE_BLENDFACTOR_INV_DST_COLOR ||
229 srcA == PIPE_BLENDFACTOR_INV_DST_ALPHA ||
230 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE) {
231 /* Enable reading from the colorbuffer. */
232 blend_control |= R300_READ_ENABLE;
233
234 if (r300screen->caps.is_r500) {
235 /* Optimization: Depending on incoming pixels, we can
236 * conditionally disable the reading in hardware... */
237 if (eqRGB != PIPE_BLEND_MIN && eqA != PIPE_BLEND_MIN &&
238 eqRGB != PIPE_BLEND_MAX && eqA != PIPE_BLEND_MAX) {
239 /* Disable reading if SRC_ALPHA == 0. */
240 if ((dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
241 dstRGB == PIPE_BLENDFACTOR_ZERO) &&
242 (dstA == PIPE_BLENDFACTOR_SRC_COLOR ||
243 dstA == PIPE_BLENDFACTOR_SRC_ALPHA ||
244 dstA == PIPE_BLENDFACTOR_ZERO)) {
245 blend_control |= R500_SRC_ALPHA_0_NO_READ;
246 }
247
248 /* Disable reading if SRC_ALPHA == 1. */
249 if ((dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
250 dstRGB == PIPE_BLENDFACTOR_ZERO) &&
251 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
252 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
253 dstA == PIPE_BLENDFACTOR_ZERO)) {
254 blend_control |= R500_SRC_ALPHA_1_NO_READ;
255 }
256 }
257 }
258 }
259
260 /* Optimization: discard pixels which don't change the colorbuffer.
261 *
262 * The code below is non-trivial and some math is involved.
263 *
264 * Discarding pixels must be disabled when FP16 AA is enabled.
265 * This is a hardware bug. Also, this implementation wouldn't work
266 * with FP blending enabled and equation clamping disabled.
267 *
268 * Equations other than ADD are rarely used and therefore won't be
269 * optimized. */
270 if ((eqRGB == PIPE_BLEND_ADD || eqRGB == PIPE_BLEND_REVERSE_SUBTRACT) &&
271 (eqA == PIPE_BLEND_ADD || eqA == PIPE_BLEND_REVERSE_SUBTRACT)) {
272 /* ADD: X+Y
273 * REVERSE_SUBTRACT: Y-X
274 *
275 * The idea is:
276 * If X = src*srcFactor = 0 and Y = dst*dstFactor = 1,
277 * then CB will not be changed.
278 *
279 * Given the srcFactor and dstFactor variables, we can derive
280 * what src and dst should be equal to and discard appropriate
281 * pixels.
282 */
283 if (blend_discard_if_src_alpha_0(srcRGB, srcA, dstRGB, dstA)) {
284 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_ALPHA_0;
285 } else if (blend_discard_if_src_alpha_1(srcRGB, srcA,
286 dstRGB, dstA)) {
287 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_ALPHA_1;
288 } else if (blend_discard_if_src_color_0(srcRGB, srcA,
289 dstRGB, dstA)) {
290 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_COLOR_0;
291 } else if (blend_discard_if_src_color_1(srcRGB, srcA,
292 dstRGB, dstA)) {
293 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_COLOR_1;
294 } else if (blend_discard_if_src_alpha_color_0(srcRGB, srcA,
295 dstRGB, dstA)) {
296 blend_control |=
297 R300_DISCARD_SRC_PIXELS_SRC_ALPHA_COLOR_0;
298 } else if (blend_discard_if_src_alpha_color_1(srcRGB, srcA,
299 dstRGB, dstA)) {
300 blend_control |=
301 R300_DISCARD_SRC_PIXELS_SRC_ALPHA_COLOR_1;
302 }
303 }
304
305 /* separate alpha */
306 if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) {
307 blend_control |= R300_SEPARATE_ALPHA_ENABLE;
308 alpha_blend_control =
309 r300_translate_blend_function(eqA) |
310 (r300_translate_blend_factor(srcA) << R300_SRC_BLEND_SHIFT) |
311 (r300_translate_blend_factor(dstA) << R300_DST_BLEND_SHIFT);
312 }
313 }
314
315 /* PIPE_LOGICOP_* don't need to be translated, fortunately. */
316 if (state->logicop_enable) {
317 rop = R300_RB3D_ROPCNTL_ROP_ENABLE |
318 (state->logicop_func) << R300_RB3D_ROPCNTL_ROP_SHIFT;
319 }
320
321 /* Color channel masks for all MRTs. */
322 color_channel_mask = bgra_cmask(state->rt[0].colormask);
323 if (r300screen->caps.is_r500 && state->independent_blend_enable) {
324 if (state->rt[1].blend_enable) {
325 color_channel_mask |= bgra_cmask(state->rt[1].colormask) << 4;
326 }
327 if (state->rt[2].blend_enable) {
328 color_channel_mask |= bgra_cmask(state->rt[2].colormask) << 8;
329 }
330 if (state->rt[3].blend_enable) {
331 color_channel_mask |= bgra_cmask(state->rt[3].colormask) << 12;
332 }
333 }
334
335 /* Neither fglrx nor classic r300 ever set this, regardless of dithering
336 * state. Since it's an optional implementation detail, we can leave it
337 * out and never dither.
338 *
339 * This could be revisited if we ever get quality or conformance hints.
340 *
341 if (state->dither) {
342 dither = R300_RB3D_DITHER_CTL_DITHER_MODE_LUT |
343 R300_RB3D_DITHER_CTL_ALPHA_DITHER_MODE_LUT;
344 }
345 */
346
347 /* Build a command buffer. */
348 BEGIN_CB(blend->cb, 8);
349 OUT_CB_REG(R300_RB3D_ROPCNTL, rop);
350 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3);
351 OUT_CB(blend_control);
352 OUT_CB(alpha_blend_control);
353 OUT_CB(color_channel_mask);
354 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither);
355 END_CB;
356
357 /* The same as above, but with no colorbuffer reads and writes. */
358 BEGIN_CB(blend->cb_no_readwrite, 8);
359 OUT_CB_REG(R300_RB3D_ROPCNTL, rop);
360 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3);
361 OUT_CB(0);
362 OUT_CB(0);
363 OUT_CB(0);
364 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither);
365 END_CB;
366
367 return (void*)blend;
368 }
369
370 /* Bind blend state. */
371 static void r300_bind_blend_state(struct pipe_context* pipe,
372 void* state)
373 {
374 struct r300_context* r300 = r300_context(pipe);
375
376 UPDATE_STATE(state, r300->blend_state);
377 }
378
379 /* Free blend state. */
380 static void r300_delete_blend_state(struct pipe_context* pipe,
381 void* state)
382 {
383 FREE(state);
384 }
385
386 /* Convert float to 10bit integer */
387 static unsigned float_to_fixed10(float f)
388 {
389 return CLAMP((unsigned)(f * 1023.9f), 0, 1023);
390 }
391
392 /* Set blend color.
393 * Setup both R300 and R500 registers, figure out later which one to write. */
394 static void r300_set_blend_color(struct pipe_context* pipe,
395 const struct pipe_blend_color* color)
396 {
397 struct r300_context* r300 = r300_context(pipe);
398 struct r300_blend_color_state* state =
399 (struct r300_blend_color_state*)r300->blend_color_state.state;
400 CB_LOCALS;
401
402 if (r300->screen->caps.is_r500) {
403 /* XXX if FP16 blending is enabled, we should use the FP16 format */
404 BEGIN_CB(state->cb, 3);
405 OUT_CB_REG_SEQ(R500_RB3D_CONSTANT_COLOR_AR, 2);
406 OUT_CB(float_to_fixed10(color->color[0]) |
407 (float_to_fixed10(color->color[3]) << 16));
408 OUT_CB(float_to_fixed10(color->color[2]) |
409 (float_to_fixed10(color->color[1]) << 16));
410 END_CB;
411 } else {
412 union util_color uc;
413 util_pack_color(color->color, PIPE_FORMAT_B8G8R8A8_UNORM, &uc);
414
415 BEGIN_CB(state->cb, 2);
416 OUT_CB_REG(R300_RB3D_BLEND_COLOR, uc.ui);
417 END_CB;
418 }
419
420 r300_mark_atom_dirty(r300, &r300->blend_color_state);
421 }
422
423 static void r300_set_clip_state(struct pipe_context* pipe,
424 const struct pipe_clip_state* state)
425 {
426 struct r300_context* r300 = r300_context(pipe);
427 struct r300_clip_state *clip =
428 (struct r300_clip_state*)r300->clip_state.state;
429 CB_LOCALS;
430
431 clip->clip = *state;
432
433 if (r300->screen->caps.has_tcl) {
434 r300->clip_state.size = 2 + !!state->nr * 3 + state->nr * 4;
435
436 BEGIN_CB(clip->cb, r300->clip_state.size);
437 if (state->nr) {
438 OUT_CB_REG(R300_VAP_PVS_VECTOR_INDX_REG,
439 (r300->screen->caps.is_r500 ?
440 R500_PVS_UCP_START : R300_PVS_UCP_START));
441 OUT_CB_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, state->nr * 4);
442 OUT_CB_TABLE(state->ucp, state->nr * 4);
443 }
444 OUT_CB_REG(R300_VAP_CLIP_CNTL, ((1 << state->nr) - 1) |
445 R300_PS_UCP_MODE_CLIP_AS_TRIFAN);
446 END_CB;
447
448 r300_mark_atom_dirty(r300, &r300->clip_state);
449 } else {
450 draw_set_clip_state(r300->draw, state);
451 }
452 }
453
454 static void
455 r300_set_sample_mask(struct pipe_context *pipe,
456 unsigned sample_mask)
457 {
458 }
459
460
461 /* Create a new depth, stencil, and alpha state based on the CSO dsa state.
462 *
463 * This contains the depth buffer, stencil buffer, alpha test, and such.
464 * On the Radeon, depth and stencil buffer setup are intertwined, which is
465 * the reason for some of the strange-looking assignments across registers. */
466 static void*
467 r300_create_dsa_state(struct pipe_context* pipe,
468 const struct pipe_depth_stencil_alpha_state* state)
469 {
470 struct r300_capabilities *caps = &r300_screen(pipe->screen)->caps;
471 struct r300_dsa_state* dsa = CALLOC_STRUCT(r300_dsa_state);
472 CB_LOCALS;
473
474 dsa->dsa = *state;
475
476 /* Depth test setup. - separate write mask depth for decomp flush */
477 if (state->depth.writemask) {
478 dsa->z_buffer_control |= R300_Z_WRITE_ENABLE;
479 }
480
481 if (state->depth.enabled) {
482 dsa->z_buffer_control |= R300_Z_ENABLE;
483
484 dsa->z_stencil_control |=
485 (r300_translate_depth_stencil_function(state->depth.func) <<
486 R300_Z_FUNC_SHIFT);
487 }
488
489 /* Stencil buffer setup. */
490 if (state->stencil[0].enabled) {
491 dsa->z_buffer_control |= R300_STENCIL_ENABLE;
492 dsa->z_stencil_control |=
493 (r300_translate_depth_stencil_function(state->stencil[0].func) <<
494 R300_S_FRONT_FUNC_SHIFT) |
495 (r300_translate_stencil_op(state->stencil[0].fail_op) <<
496 R300_S_FRONT_SFAIL_OP_SHIFT) |
497 (r300_translate_stencil_op(state->stencil[0].zpass_op) <<
498 R300_S_FRONT_ZPASS_OP_SHIFT) |
499 (r300_translate_stencil_op(state->stencil[0].zfail_op) <<
500 R300_S_FRONT_ZFAIL_OP_SHIFT);
501
502 dsa->stencil_ref_mask =
503 (state->stencil[0].valuemask << R300_STENCILMASK_SHIFT) |
504 (state->stencil[0].writemask << R300_STENCILWRITEMASK_SHIFT);
505
506 if (state->stencil[1].enabled) {
507 dsa->two_sided = TRUE;
508
509 dsa->z_buffer_control |= R300_STENCIL_FRONT_BACK;
510 dsa->z_stencil_control |=
511 (r300_translate_depth_stencil_function(state->stencil[1].func) <<
512 R300_S_BACK_FUNC_SHIFT) |
513 (r300_translate_stencil_op(state->stencil[1].fail_op) <<
514 R300_S_BACK_SFAIL_OP_SHIFT) |
515 (r300_translate_stencil_op(state->stencil[1].zpass_op) <<
516 R300_S_BACK_ZPASS_OP_SHIFT) |
517 (r300_translate_stencil_op(state->stencil[1].zfail_op) <<
518 R300_S_BACK_ZFAIL_OP_SHIFT);
519
520 dsa->stencil_ref_bf =
521 (state->stencil[1].valuemask << R300_STENCILMASK_SHIFT) |
522 (state->stencil[1].writemask << R300_STENCILWRITEMASK_SHIFT);
523
524 if (caps->is_r500) {
525 dsa->z_buffer_control |= R500_STENCIL_REFMASK_FRONT_BACK;
526 } else {
527 dsa->two_sided_stencil_ref =
528 (state->stencil[0].valuemask != state->stencil[1].valuemask ||
529 state->stencil[0].writemask != state->stencil[1].writemask);
530 }
531 }
532 }
533
534 /* Alpha test setup. */
535 if (state->alpha.enabled) {
536 dsa->alpha_function =
537 r300_translate_alpha_function(state->alpha.func) |
538 R300_FG_ALPHA_FUNC_ENABLE;
539
540 /* We could use 10bit alpha ref but who needs that? */
541 dsa->alpha_function |= float_to_ubyte(state->alpha.ref_value);
542
543 if (caps->is_r500)
544 dsa->alpha_function |= R500_FG_ALPHA_FUNC_8BIT;
545 }
546
547 BEGIN_CB(&dsa->cb_begin, 8);
548 OUT_CB_REG(R300_FG_ALPHA_FUNC, dsa->alpha_function);
549 OUT_CB_REG_SEQ(R300_ZB_CNTL, 3);
550 OUT_CB(dsa->z_buffer_control);
551 OUT_CB(dsa->z_stencil_control);
552 OUT_CB(dsa->stencil_ref_mask);
553 OUT_CB_REG(R500_ZB_STENCILREFMASK_BF, dsa->stencil_ref_bf);
554 END_CB;
555
556 BEGIN_CB(dsa->cb_no_readwrite, 8);
557 OUT_CB_REG(R300_FG_ALPHA_FUNC, dsa->alpha_function);
558 OUT_CB_REG_SEQ(R300_ZB_CNTL, 3);
559 OUT_CB(0);
560 OUT_CB(0);
561 OUT_CB(0);
562 OUT_CB_REG(R500_ZB_STENCILREFMASK_BF, 0);
563 END_CB;
564
565 return (void*)dsa;
566 }
567
568 static void r300_dsa_inject_stencilref(struct r300_context *r300)
569 {
570 struct r300_dsa_state *dsa =
571 (struct r300_dsa_state*)r300->dsa_state.state;
572
573 if (!dsa)
574 return;
575
576 dsa->stencil_ref_mask =
577 (dsa->stencil_ref_mask & ~R300_STENCILREF_MASK) |
578 r300->stencil_ref.ref_value[0];
579 dsa->stencil_ref_bf =
580 (dsa->stencil_ref_bf & ~R300_STENCILREF_MASK) |
581 r300->stencil_ref.ref_value[1];
582 }
583
584 /* Bind DSA state. */
585 static void r300_bind_dsa_state(struct pipe_context* pipe,
586 void* state)
587 {
588 struct r300_context* r300 = r300_context(pipe);
589
590 if (!state) {
591 return;
592 }
593
594 UPDATE_STATE(state, r300->dsa_state);
595
596 r300_mark_atom_dirty(r300, &r300->hyperz_state); /* Will be updated before the emission. */
597 r300_dsa_inject_stencilref(r300);
598 }
599
600 /* Free DSA state. */
601 static void r300_delete_dsa_state(struct pipe_context* pipe,
602 void* state)
603 {
604 FREE(state);
605 }
606
607 static void r300_set_stencil_ref(struct pipe_context* pipe,
608 const struct pipe_stencil_ref* sr)
609 {
610 struct r300_context* r300 = r300_context(pipe);
611
612 r300->stencil_ref = *sr;
613
614 r300_dsa_inject_stencilref(r300);
615 r300_mark_atom_dirty(r300, &r300->dsa_state);
616 }
617
618 static void r300_tex_set_tiling_flags(struct r300_context *r300,
619 struct r300_texture *tex, unsigned level)
620 {
621 /* Check if the macrotile flag needs to be changed.
622 * Skip changing the flags otherwise. */
623 if (tex->desc.macrotile[tex->surface_level] !=
624 tex->desc.macrotile[level]) {
625 /* Tiling determines how DRM treats the buffer data.
626 * We must flush CS when changing it if the buffer is referenced. */
627 if (r300->rws->cs_is_buffer_referenced(r300->cs,
628 tex->cs_buffer, R300_REF_CS))
629 r300->context.flush(&r300->context, 0, NULL);
630
631 r300->rws->buffer_set_tiling(r300->rws, tex->buffer,
632 tex->desc.microtile, tex->desc.macrotile[level],
633 tex->desc.stride_in_bytes[0]);
634
635 tex->surface_level = level;
636 }
637 }
638
639 /* This switcheroo is needed just because of goddamned MACRO_SWITCH. */
640 static void r300_fb_set_tiling_flags(struct r300_context *r300,
641 const struct pipe_framebuffer_state *state)
642 {
643 unsigned i;
644
645 /* Set tiling flags for new surfaces. */
646 for (i = 0; i < state->nr_cbufs; i++) {
647 r300_tex_set_tiling_flags(r300,
648 r300_texture(state->cbufs[i]->texture),
649 state->cbufs[i]->u.tex.level);
650 }
651 if (state->zsbuf) {
652 r300_tex_set_tiling_flags(r300,
653 r300_texture(state->zsbuf->texture),
654 state->zsbuf->u.tex.level);
655 }
656 }
657
658 static void r300_print_fb_surf_info(struct pipe_surface *surf, unsigned index,
659 const char *binding)
660 {
661 struct pipe_resource *tex = surf->texture;
662 struct r300_texture *rtex = r300_texture(tex);
663
664 fprintf(stderr,
665 "r300: %s[%i] Dim: %ix%i, Firstlayer: %i, "
666 "Lastlayer: %i, Level: %i, Format: %s\n"
667
668 "r300: TEX: Macro: %s, Micro: %s, Pitch: %i, "
669 "Dim: %ix%ix%i, LastLevel: %i, Format: %s\n",
670
671 binding, index, surf->width, surf->height,
672 surf->u.tex.first_layer, surf->u.tex.last_layer, surf->u.tex.level,
673 util_format_short_name(surf->format),
674
675 rtex->desc.macrotile[0] ? "YES" : " NO",
676 rtex->desc.microtile ? "YES" : " NO",
677 rtex->desc.stride_in_pixels[0],
678 tex->width0, tex->height0, tex->depth0,
679 tex->last_level, util_format_short_name(tex->format));
680 }
681
682 void r300_mark_fb_state_dirty(struct r300_context *r300,
683 enum r300_fb_state_change change)
684 {
685 struct pipe_framebuffer_state *state = r300->fb_state.state;
686 boolean can_hyperz = r300->rws->get_value(r300->rws, R300_CAN_HYPERZ);
687
688 r300_mark_atom_dirty(r300, &r300->gpu_flush);
689 r300_mark_atom_dirty(r300, &r300->fb_state);
690
691 /* What is marked as dirty depends on the enum r300_fb_state_change. */
692 if (change == R300_CHANGED_FB_STATE) {
693 r300_mark_atom_dirty(r300, &r300->aa_state);
694 }
695
696 if (change == R300_CHANGED_FB_STATE ||
697 change == R300_CHANGED_HYPERZ_FLAG) {
698 r300_mark_atom_dirty(r300, &r300->hyperz_state);
699 }
700
701 if (change == R300_CHANGED_FB_STATE ||
702 change == R300_CHANGED_MULTIWRITE) {
703 r300_mark_atom_dirty(r300, &r300->fb_state_pipelined);
704 }
705
706 /* Now compute the fb_state atom size. */
707 r300->fb_state.size = 2 + (8 * state->nr_cbufs);
708
709 if (r300->cbzb_clear)
710 r300->fb_state.size += 10;
711 else if (state->zsbuf) {
712 r300->fb_state.size += 10;
713 if (can_hyperz)
714 r300->fb_state.size += r300->screen->caps.hiz_ram ? 8 : 4;
715 }
716
717 /* The size of the rest of atoms stays the same. */
718 }
719
720 static void
721 r300_set_framebuffer_state(struct pipe_context* pipe,
722 const struct pipe_framebuffer_state* state)
723 {
724 struct r300_context* r300 = r300_context(pipe);
725 struct r300_aa_state *aa = (struct r300_aa_state*)r300->aa_state.state;
726 struct pipe_framebuffer_state *old_state = r300->fb_state.state;
727 boolean can_hyperz = r300->rws->get_value(r300->rws, R300_CAN_HYPERZ);
728 unsigned max_width, max_height, i;
729 uint32_t zbuffer_bpp = 0;
730
731 if (r300->screen->caps.is_r500) {
732 max_width = max_height = 4096;
733 } else if (r300->screen->caps.is_r400) {
734 max_width = max_height = 4021;
735 } else {
736 max_width = max_height = 2560;
737 }
738
739 if (state->width > max_width || state->height > max_height) {
740 fprintf(stderr, "r300: Implementation error: Render targets are too "
741 "big in %s, refusing to bind framebuffer state!\n", __FUNCTION__);
742 return;
743 }
744
745 if (old_state->zsbuf && r300->zmask_in_use && !r300->zmask_locked) {
746 /* There is a zmask in use, what are we gonna do? */
747 if (state->zsbuf) {
748 if (!pipe_surface_equal(old_state->zsbuf, state->zsbuf)) {
749 /* Decompress the currently bound zbuffer before we bind another one. */
750 r300_decompress_zmask(r300);
751 }
752 } else {
753 /* We don't bind another zbuffer, so lock the current one. */
754 r300->zmask_locked = TRUE;
755 pipe_surface_reference(&r300->locked_zbuffer, old_state->zsbuf);
756 }
757 } else if (r300->zmask_locked && r300->locked_zbuffer) {
758 /* We have a locked zbuffer now, what are we gonna do? */
759 if (state->zsbuf) {
760 if (!pipe_surface_equal(r300->locked_zbuffer, state->zsbuf)) {
761 /* We are binding some other zbuffer, so decompress the locked one,
762 * it gets unlocked automatically. */
763 r300_decompress_zmask_locked_unsafe(r300);
764 } else {
765 /* We are binding the locked zbuffer again, so unlock it. */
766 r300->zmask_locked = FALSE;
767 }
768 }
769 }
770
771 /* If nr_cbufs is changed from zero to non-zero or vice versa... */
772 if (!!old_state->nr_cbufs != !!state->nr_cbufs) {
773 r300_mark_atom_dirty(r300, &r300->blend_state);
774 }
775 /* If zsbuf is set from NULL to non-NULL or vice versa.. */
776 if (!!old_state->zsbuf != !!state->zsbuf) {
777 r300_mark_atom_dirty(r300, &r300->dsa_state);
778 }
779
780 /* The tiling flags are dependent on the surface miplevel, unfortunately. */
781 r300_fb_set_tiling_flags(r300, state);
782
783 util_copy_framebuffer_state(r300->fb_state.state, state);
784
785 if (!r300->zmask_locked) {
786 pipe_surface_reference(&r300->locked_zbuffer, NULL);
787 }
788
789 r300_mark_fb_state_dirty(r300, R300_CHANGED_FB_STATE);
790 r300->validate_buffers = TRUE;
791
792 if (state->zsbuf) {
793 switch (util_format_get_blocksize(state->zsbuf->texture->format)) {
794 case 2:
795 zbuffer_bpp = 16;
796 break;
797 case 4:
798 zbuffer_bpp = 24;
799 break;
800 }
801
802 /* Setup Hyper-Z. */
803 if (can_hyperz) {
804 struct r300_surface *zs_surf = r300_surface(state->zsbuf);
805 struct r300_texture *tex = r300_texture(zs_surf->base.texture);
806 int level = zs_surf->base.u.tex.level;
807
808 /* work out whether we can support hiz on this buffer */
809 r300_hiz_alloc_block(r300, zs_surf);
810
811 DBG(r300, DBG_HYPERZ,
812 "hyper-z features: hiz: %d @ %08x\n", tex->hiz_mem[level] ? 1 : 0,
813 tex->hiz_mem[level] ? tex->hiz_mem[level]->ofs : 0xdeadbeef);
814 }
815
816 /* Polygon offset depends on the zbuffer bit depth. */
817 if (r300->zbuffer_bpp != zbuffer_bpp) {
818 r300->zbuffer_bpp = zbuffer_bpp;
819
820 if (r300->polygon_offset_enabled)
821 r300_mark_atom_dirty(r300, &r300->rs_state);
822 }
823 }
824
825 /* Set up AA config. */
826 if (r300->rws->get_value(r300->rws, R300_VID_DRM_2_3_0)) {
827 if (state->nr_cbufs && state->cbufs[0]->texture->nr_samples > 1) {
828 aa->aa_config = R300_GB_AA_CONFIG_AA_ENABLE;
829
830 switch (state->cbufs[0]->texture->nr_samples) {
831 case 2:
832 aa->aa_config |= R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_2;
833 break;
834 case 3:
835 aa->aa_config |= R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_3;
836 break;
837 case 4:
838 aa->aa_config |= R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_4;
839 break;
840 case 6:
841 aa->aa_config |= R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_6;
842 break;
843 }
844 } else {
845 aa->aa_config = 0;
846 }
847 }
848
849 if (DBG_ON(r300, DBG_FB)) {
850 fprintf(stderr, "r300: set_framebuffer_state:\n");
851 for (i = 0; i < state->nr_cbufs; i++) {
852 r300_print_fb_surf_info(state->cbufs[i], i, "CB");
853 }
854 if (state->zsbuf) {
855 r300_print_fb_surf_info(state->zsbuf, 0, "ZB");
856 }
857 }
858 }
859
860 /* Create fragment shader state. */
861 static void* r300_create_fs_state(struct pipe_context* pipe,
862 const struct pipe_shader_state* shader)
863 {
864 struct r300_fragment_shader* fs = NULL;
865
866 fs = (struct r300_fragment_shader*)CALLOC_STRUCT(r300_fragment_shader);
867
868 /* Copy state directly into shader. */
869 fs->state = *shader;
870 fs->state.tokens = tgsi_dup_tokens(shader->tokens);
871
872 return (void*)fs;
873 }
874
875 void r300_mark_fs_code_dirty(struct r300_context *r300)
876 {
877 struct r300_fragment_shader* fs = r300_fs(r300);
878
879 r300_mark_atom_dirty(r300, &r300->fs);
880 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state);
881 r300_mark_atom_dirty(r300, &r300->fs_constants);
882 r300->fs.size = fs->shader->cb_code_size;
883
884 if (r300->screen->caps.is_r500) {
885 r300->fs_rc_constant_state.size = fs->shader->rc_state_count * 7;
886 r300->fs_constants.size = fs->shader->externals_count * 4 + 3;
887 } else {
888 r300->fs_rc_constant_state.size = fs->shader->rc_state_count * 5;
889 r300->fs_constants.size = fs->shader->externals_count * 4 + 1;
890 }
891
892 ((struct r300_constant_buffer*)r300->fs_constants.state)->remap_table =
893 fs->shader->code.constants_remap_table;
894 }
895
896 /* Bind fragment shader state. */
897 static void r300_bind_fs_state(struct pipe_context* pipe, void* shader)
898 {
899 struct r300_context* r300 = r300_context(pipe);
900 struct r300_fragment_shader* fs = (struct r300_fragment_shader*)shader;
901 struct pipe_framebuffer_state *fb = r300->fb_state.state;
902 boolean last_multi_write;
903
904 if (fs == NULL) {
905 r300->fs.state = NULL;
906 return;
907 }
908
909 last_multi_write = r300_fragment_shader_writes_all(r300_fs(r300));
910
911 r300->fs.state = fs;
912 r300_pick_fragment_shader(r300);
913 r300_mark_fs_code_dirty(r300);
914
915 if (fb->nr_cbufs > 1 &&
916 last_multi_write != r300_fragment_shader_writes_all(fs)) {
917 r300_mark_fb_state_dirty(r300, R300_CHANGED_MULTIWRITE);
918 }
919
920 r300_mark_atom_dirty(r300, &r300->rs_block_state); /* Will be updated before the emission. */
921 }
922
923 /* Delete fragment shader state. */
924 static void r300_delete_fs_state(struct pipe_context* pipe, void* shader)
925 {
926 struct r300_fragment_shader* fs = (struct r300_fragment_shader*)shader;
927 struct r300_fragment_shader_code *tmp, *ptr = fs->first;
928
929 while (ptr) {
930 tmp = ptr;
931 ptr = ptr->next;
932 rc_constants_destroy(&tmp->code.constants);
933 FREE(tmp->cb_code);
934 FREE(tmp);
935 }
936 FREE((void*)fs->state.tokens);
937 FREE(shader);
938 }
939
940 static void r300_set_polygon_stipple(struct pipe_context* pipe,
941 const struct pipe_poly_stipple* state)
942 {
943 /* XXX no idea how to set this up, but not terribly important */
944 }
945
946 /* Create a new rasterizer state based on the CSO rasterizer state.
947 *
948 * This is a very large chunk of state, and covers most of the graphics
949 * backend (GB), geometry assembly (GA), and setup unit (SU) blocks.
950 *
951 * In a not entirely unironic sidenote, this state has nearly nothing to do
952 * with the actual block on the Radeon called the rasterizer (RS). */
953 static void* r300_create_rs_state(struct pipe_context* pipe,
954 const struct pipe_rasterizer_state* state)
955 {
956 struct r300_rs_state* rs = CALLOC_STRUCT(r300_rs_state);
957 float psiz;
958 uint32_t vap_control_status; /* R300_VAP_CNTL_STATUS: 0x2140 */
959 uint32_t point_size; /* R300_GA_POINT_SIZE: 0x421c */
960 uint32_t point_minmax; /* R300_GA_POINT_MINMAX: 0x4230 */
961 uint32_t line_control; /* R300_GA_LINE_CNTL: 0x4234 */
962 uint32_t polygon_offset_enable; /* R300_SU_POLY_OFFSET_ENABLE: 0x42b4 */
963 uint32_t cull_mode; /* R300_SU_CULL_MODE: 0x42b8 */
964 uint32_t line_stipple_config; /* R300_GA_LINE_STIPPLE_CONFIG: 0x4328 */
965 uint32_t line_stipple_value; /* R300_GA_LINE_STIPPLE_VALUE: 0x4260 */
966 uint32_t polygon_mode; /* R300_GA_POLY_MODE: 0x4288 */
967 uint32_t clip_rule; /* R300_SC_CLIP_RULE: 0x43D0 */
968
969 /* Point sprites texture coordinates, 0: lower left, 1: upper right */
970 float point_texcoord_left = 0; /* R300_GA_POINT_S0: 0x4200 */
971 float point_texcoord_bottom = 0;/* R300_GA_POINT_T0: 0x4204 */
972 float point_texcoord_right = 1; /* R300_GA_POINT_S1: 0x4208 */
973 float point_texcoord_top = 0; /* R300_GA_POINT_T1: 0x420c */
974 CB_LOCALS;
975
976 /* Copy rasterizer state. */
977 rs->rs = *state;
978 rs->rs_draw = *state;
979
980 rs->rs.sprite_coord_enable = state->point_quad_rasterization *
981 state->sprite_coord_enable;
982
983 /* Override some states for Draw. */
984 rs->rs_draw.sprite_coord_enable = 0; /* We can do this in HW. */
985
986 #ifdef PIPE_ARCH_LITTLE_ENDIAN
987 vap_control_status = R300_VC_NO_SWAP;
988 #else
989 vap_control_status = R300_VC_32BIT_SWAP;
990 #endif
991
992 /* If no TCL engine is present, turn off the HW TCL. */
993 if (!r300_screen(pipe->screen)->caps.has_tcl) {
994 vap_control_status |= R300_VAP_TCL_BYPASS;
995 }
996
997 /* Point size width and height. */
998 point_size =
999 pack_float_16_6x(state->point_size) |
1000 (pack_float_16_6x(state->point_size) << R300_POINTSIZE_X_SHIFT);
1001
1002 /* Point size clamping. */
1003 if (state->point_size_per_vertex) {
1004 /* Per-vertex point size.
1005 * Clamp to [0, max FB size] */
1006 psiz = pipe->screen->get_paramf(pipe->screen,
1007 PIPE_CAP_MAX_POINT_WIDTH);
1008 point_minmax =
1009 pack_float_16_6x(psiz) << R300_GA_POINT_MINMAX_MAX_SHIFT;
1010 } else {
1011 /* We cannot disable the point-size vertex output,
1012 * so clamp it. */
1013 psiz = state->point_size;
1014 point_minmax =
1015 (pack_float_16_6x(psiz) << R300_GA_POINT_MINMAX_MIN_SHIFT) |
1016 (pack_float_16_6x(psiz) << R300_GA_POINT_MINMAX_MAX_SHIFT);
1017 }
1018
1019 /* Line control. */
1020 line_control = pack_float_16_6x(state->line_width) |
1021 R300_GA_LINE_CNTL_END_TYPE_COMP;
1022
1023 /* Enable polygon mode */
1024 polygon_mode = 0;
1025 if (state->fill_front != PIPE_POLYGON_MODE_FILL ||
1026 state->fill_back != PIPE_POLYGON_MODE_FILL) {
1027 polygon_mode = R300_GA_POLY_MODE_DUAL;
1028 }
1029
1030 /* Front face */
1031 if (state->front_ccw)
1032 cull_mode = R300_FRONT_FACE_CCW;
1033 else
1034 cull_mode = R300_FRONT_FACE_CW;
1035
1036 /* Polygon offset */
1037 polygon_offset_enable = 0;
1038 if (util_get_offset(state, state->fill_front)) {
1039 polygon_offset_enable |= R300_FRONT_ENABLE;
1040 }
1041 if (util_get_offset(state, state->fill_back)) {
1042 polygon_offset_enable |= R300_BACK_ENABLE;
1043 }
1044
1045 rs->polygon_offset_enable = polygon_offset_enable != 0;
1046
1047 /* Polygon mode */
1048 if (polygon_mode) {
1049 polygon_mode |=
1050 r300_translate_polygon_mode_front(state->fill_front);
1051 polygon_mode |=
1052 r300_translate_polygon_mode_back(state->fill_back);
1053 }
1054
1055 if (state->cull_face & PIPE_FACE_FRONT) {
1056 cull_mode |= R300_CULL_FRONT;
1057 }
1058 if (state->cull_face & PIPE_FACE_BACK) {
1059 cull_mode |= R300_CULL_BACK;
1060 }
1061
1062 if (state->line_stipple_enable) {
1063 line_stipple_config =
1064 R300_GA_LINE_STIPPLE_CONFIG_LINE_RESET_LINE |
1065 (fui((float)state->line_stipple_factor) &
1066 R300_GA_LINE_STIPPLE_CONFIG_STIPPLE_SCALE_MASK);
1067 /* XXX this might need to be scaled up */
1068 line_stipple_value = state->line_stipple_pattern;
1069 } else {
1070 line_stipple_config = 0;
1071 line_stipple_value = 0;
1072 }
1073
1074 if (state->flatshade) {
1075 rs->color_control = R300_SHADE_MODEL_FLAT;
1076 } else {
1077 rs->color_control = R300_SHADE_MODEL_SMOOTH;
1078 }
1079
1080 clip_rule = state->scissor ? 0xAAAA : 0xFFFF;
1081
1082 /* Point sprites coord mode */
1083 if (rs->rs.sprite_coord_enable) {
1084 switch (state->sprite_coord_mode) {
1085 case PIPE_SPRITE_COORD_UPPER_LEFT:
1086 point_texcoord_top = 0.0f;
1087 point_texcoord_bottom = 1.0f;
1088 break;
1089 case PIPE_SPRITE_COORD_LOWER_LEFT:
1090 point_texcoord_top = 1.0f;
1091 point_texcoord_bottom = 0.0f;
1092 break;
1093 }
1094 }
1095
1096 /* Build the main command buffer. */
1097 BEGIN_CB(rs->cb_main, RS_STATE_MAIN_SIZE);
1098 OUT_CB_REG(R300_VAP_CNTL_STATUS, vap_control_status);
1099 OUT_CB_REG(R300_GA_POINT_SIZE, point_size);
1100 OUT_CB_REG_SEQ(R300_GA_POINT_MINMAX, 2);
1101 OUT_CB(point_minmax);
1102 OUT_CB(line_control);
1103 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_ENABLE, 2);
1104 OUT_CB(polygon_offset_enable);
1105 rs->cull_mode_index = 9;
1106 OUT_CB(cull_mode);
1107 OUT_CB_REG(R300_GA_LINE_STIPPLE_CONFIG, line_stipple_config);
1108 OUT_CB_REG(R300_GA_LINE_STIPPLE_VALUE, line_stipple_value);
1109 OUT_CB_REG(R300_GA_POLY_MODE, polygon_mode);
1110 OUT_CB_REG(R300_SC_CLIP_RULE, clip_rule);
1111 OUT_CB_REG_SEQ(R300_GA_POINT_S0, 4);
1112 OUT_CB_32F(point_texcoord_left);
1113 OUT_CB_32F(point_texcoord_bottom);
1114 OUT_CB_32F(point_texcoord_right);
1115 OUT_CB_32F(point_texcoord_top);
1116 END_CB;
1117
1118 /* Build the two command buffers for polygon offset setup. */
1119 if (polygon_offset_enable) {
1120 float scale = state->offset_scale * 12;
1121 float offset = state->offset_units * 4;
1122
1123 BEGIN_CB(rs->cb_poly_offset_zb16, 5);
1124 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE, 4);
1125 OUT_CB_32F(scale);
1126 OUT_CB_32F(offset);
1127 OUT_CB_32F(scale);
1128 OUT_CB_32F(offset);
1129 END_CB;
1130
1131 offset = state->offset_units * 2;
1132
1133 BEGIN_CB(rs->cb_poly_offset_zb24, 5);
1134 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE, 4);
1135 OUT_CB_32F(scale);
1136 OUT_CB_32F(offset);
1137 OUT_CB_32F(scale);
1138 OUT_CB_32F(offset);
1139 END_CB;
1140 }
1141
1142 return (void*)rs;
1143 }
1144
1145 /* Bind rasterizer state. */
1146 static void r300_bind_rs_state(struct pipe_context* pipe, void* state)
1147 {
1148 struct r300_context* r300 = r300_context(pipe);
1149 struct r300_rs_state* rs = (struct r300_rs_state*)state;
1150 int last_sprite_coord_enable = r300->sprite_coord_enable;
1151 boolean last_two_sided_color = r300->two_sided_color;
1152
1153 if (r300->draw && rs) {
1154 draw_set_rasterizer_state(r300->draw, &rs->rs_draw, state);
1155 }
1156
1157 if (rs) {
1158 r300->polygon_offset_enabled = rs->polygon_offset_enable;
1159 r300->sprite_coord_enable = rs->rs.sprite_coord_enable;
1160 r300->two_sided_color = rs->rs.light_twoside;
1161 } else {
1162 r300->polygon_offset_enabled = FALSE;
1163 r300->sprite_coord_enable = 0;
1164 r300->two_sided_color = FALSE;
1165 }
1166
1167 UPDATE_STATE(state, r300->rs_state);
1168 r300->rs_state.size = RS_STATE_MAIN_SIZE + (r300->polygon_offset_enabled ? 5 : 0);
1169
1170 if (last_sprite_coord_enable != r300->sprite_coord_enable ||
1171 last_two_sided_color != r300->two_sided_color) {
1172 r300_mark_atom_dirty(r300, &r300->rs_block_state);
1173 }
1174 }
1175
1176 /* Free rasterizer state. */
1177 static void r300_delete_rs_state(struct pipe_context* pipe, void* state)
1178 {
1179 FREE(state);
1180 }
1181
1182 static void*
1183 r300_create_sampler_state(struct pipe_context* pipe,
1184 const struct pipe_sampler_state* state)
1185 {
1186 struct r300_context* r300 = r300_context(pipe);
1187 struct r300_sampler_state* sampler = CALLOC_STRUCT(r300_sampler_state);
1188 boolean is_r500 = r300->screen->caps.is_r500;
1189 int lod_bias;
1190
1191 sampler->state = *state;
1192
1193 /* r300 doesn't handle CLAMP and MIRROR_CLAMP correctly when either MAG
1194 * or MIN filter is NEAREST. Since texwrap produces same results
1195 * for CLAMP and CLAMP_TO_EDGE, we use them instead. */
1196 if (sampler->state.min_img_filter == PIPE_TEX_FILTER_NEAREST ||
1197 sampler->state.mag_img_filter == PIPE_TEX_FILTER_NEAREST) {
1198 /* Wrap S. */
1199 if (sampler->state.wrap_s == PIPE_TEX_WRAP_CLAMP)
1200 sampler->state.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1201 else if (sampler->state.wrap_s == PIPE_TEX_WRAP_MIRROR_CLAMP)
1202 sampler->state.wrap_s = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
1203
1204 /* Wrap T. */
1205 if (sampler->state.wrap_t == PIPE_TEX_WRAP_CLAMP)
1206 sampler->state.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1207 else if (sampler->state.wrap_t == PIPE_TEX_WRAP_MIRROR_CLAMP)
1208 sampler->state.wrap_t = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
1209
1210 /* Wrap R. */
1211 if (sampler->state.wrap_r == PIPE_TEX_WRAP_CLAMP)
1212 sampler->state.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1213 else if (sampler->state.wrap_r == PIPE_TEX_WRAP_MIRROR_CLAMP)
1214 sampler->state.wrap_r = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
1215 }
1216
1217 sampler->filter0 |=
1218 (r300_translate_wrap(sampler->state.wrap_s) << R300_TX_WRAP_S_SHIFT) |
1219 (r300_translate_wrap(sampler->state.wrap_t) << R300_TX_WRAP_T_SHIFT) |
1220 (r300_translate_wrap(sampler->state.wrap_r) << R300_TX_WRAP_R_SHIFT);
1221
1222 sampler->filter0 |= r300_translate_tex_filters(state->min_img_filter,
1223 state->mag_img_filter,
1224 state->min_mip_filter,
1225 state->max_anisotropy > 0);
1226
1227 sampler->filter0 |= r300_anisotropy(state->max_anisotropy);
1228
1229 /* Unfortunately, r300-r500 don't support floating-point mipmap lods. */
1230 /* We must pass these to the merge function to clamp them properly. */
1231 sampler->min_lod = (unsigned)MAX2(state->min_lod, 0);
1232 sampler->max_lod = (unsigned)MAX2(ceilf(state->max_lod), 0);
1233
1234 lod_bias = CLAMP((int)(state->lod_bias * 32 + 1), -(1 << 9), (1 << 9) - 1);
1235
1236 sampler->filter1 |= (lod_bias << R300_LOD_BIAS_SHIFT) & R300_LOD_BIAS_MASK;
1237
1238 /* This is very high quality anisotropic filtering for R5xx.
1239 * It's good for benchmarking the performance of texturing but
1240 * in practice we don't want to slow down the driver because it's
1241 * a pretty good performance killer. Feel free to play with it. */
1242 if (DBG_ON(r300, DBG_ANISOHQ) && is_r500) {
1243 sampler->filter1 |= r500_anisotropy(state->max_anisotropy);
1244 }
1245
1246 /* R500-specific fixups and optimizations */
1247 if (r300->screen->caps.is_r500) {
1248 sampler->filter1 |= R500_BORDER_FIX;
1249 }
1250
1251 return (void*)sampler;
1252 }
1253
1254 static void r300_bind_sampler_states(struct pipe_context* pipe,
1255 unsigned count,
1256 void** states)
1257 {
1258 struct r300_context* r300 = r300_context(pipe);
1259 struct r300_textures_state* state =
1260 (struct r300_textures_state*)r300->textures_state.state;
1261 unsigned tex_units = r300->screen->caps.num_tex_units;
1262
1263 if (count > tex_units) {
1264 return;
1265 }
1266
1267 memcpy(state->sampler_states, states, sizeof(void*) * count);
1268 state->sampler_state_count = count;
1269
1270 r300_mark_atom_dirty(r300, &r300->textures_state);
1271 }
1272
1273 static void r300_lacks_vertex_textures(struct pipe_context* pipe,
1274 unsigned count,
1275 void** states)
1276 {
1277 }
1278
1279 static void r300_delete_sampler_state(struct pipe_context* pipe, void* state)
1280 {
1281 FREE(state);
1282 }
1283
1284 static uint32_t r300_assign_texture_cache_region(unsigned index, unsigned num)
1285 {
1286 /* This looks like a hack, but I believe it's suppose to work like
1287 * that. To illustrate how this works, let's assume you have 5 textures.
1288 * From docs, 5 and the successive numbers are:
1289 *
1290 * FOURTH_1 = 5
1291 * FOURTH_2 = 6
1292 * FOURTH_3 = 7
1293 * EIGHTH_0 = 8
1294 * EIGHTH_1 = 9
1295 *
1296 * First 3 textures will get 3/4 of size of the cache, divived evenly
1297 * between them. The last 1/4 of the cache must be divided between
1298 * the last 2 textures, each will therefore get 1/8 of the cache.
1299 * Why not just to use "5 + texture_index" ?
1300 *
1301 * This simple trick works for all "num" <= 16.
1302 */
1303 if (num <= 1)
1304 return R300_TX_CACHE(R300_TX_CACHE_WHOLE);
1305 else
1306 return R300_TX_CACHE(num + index);
1307 }
1308
1309 static void r300_set_fragment_sampler_views(struct pipe_context* pipe,
1310 unsigned count,
1311 struct pipe_sampler_view** views)
1312 {
1313 struct r300_context* r300 = r300_context(pipe);
1314 struct r300_textures_state* state =
1315 (struct r300_textures_state*)r300->textures_state.state;
1316 struct r300_texture *texture;
1317 unsigned i, real_num_views = 0, view_index = 0;
1318 unsigned tex_units = r300->screen->caps.num_tex_units;
1319 boolean dirty_tex = FALSE;
1320
1321 if (count > tex_units) {
1322 return;
1323 }
1324
1325 /* Calculate the real number of views. */
1326 for (i = 0; i < count; i++) {
1327 if (views[i])
1328 real_num_views++;
1329 }
1330
1331 for (i = 0; i < count; i++) {
1332 pipe_sampler_view_reference(
1333 (struct pipe_sampler_view**)&state->sampler_views[i],
1334 views[i]);
1335
1336 if (!views[i]) {
1337 continue;
1338 }
1339
1340 /* A new sampler view (= texture)... */
1341 dirty_tex = TRUE;
1342
1343 /* Set the texrect factor in the fragment shader.
1344 * Needed for RECT and NPOT fallback. */
1345 texture = r300_texture(views[i]->texture);
1346 if (texture->desc.is_npot) {
1347 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state);
1348 }
1349
1350 state->sampler_views[i]->texcache_region =
1351 r300_assign_texture_cache_region(view_index, real_num_views);
1352 view_index++;
1353 }
1354
1355 for (i = count; i < tex_units; i++) {
1356 if (state->sampler_views[i]) {
1357 pipe_sampler_view_reference(
1358 (struct pipe_sampler_view**)&state->sampler_views[i],
1359 NULL);
1360 }
1361 }
1362
1363 state->sampler_view_count = count;
1364
1365 r300_mark_atom_dirty(r300, &r300->textures_state);
1366 r300->validate_buffers = TRUE;
1367
1368 if (dirty_tex) {
1369 r300_mark_atom_dirty(r300, &r300->texture_cache_inval);
1370 }
1371 }
1372
1373 static struct pipe_sampler_view *
1374 r300_create_sampler_view(struct pipe_context *pipe,
1375 struct pipe_resource *texture,
1376 const struct pipe_sampler_view *templ)
1377 {
1378 struct r300_sampler_view *view = CALLOC_STRUCT(r300_sampler_view);
1379 struct r300_texture *tex = r300_texture(texture);
1380 boolean is_r500 = r300_screen(pipe->screen)->caps.is_r500;
1381 boolean dxtc_swizzle = r300_screen(pipe->screen)->caps.dxtc_swizzle;
1382
1383 if (view) {
1384 view->base = *templ;
1385 view->base.reference.count = 1;
1386 view->base.context = pipe;
1387 view->base.texture = NULL;
1388 pipe_resource_reference(&view->base.texture, texture);
1389
1390 view->swizzle[0] = templ->swizzle_r;
1391 view->swizzle[1] = templ->swizzle_g;
1392 view->swizzle[2] = templ->swizzle_b;
1393 view->swizzle[3] = templ->swizzle_a;
1394
1395 view->format = tex->tx_format;
1396 view->format.format1 |= r300_translate_texformat(templ->format,
1397 view->swizzle,
1398 is_r500,
1399 dxtc_swizzle);
1400 if (is_r500) {
1401 view->format.format2 |= r500_tx_format_msb_bit(templ->format);
1402 }
1403 }
1404
1405 return (struct pipe_sampler_view*)view;
1406 }
1407
1408 static void
1409 r300_sampler_view_destroy(struct pipe_context *pipe,
1410 struct pipe_sampler_view *view)
1411 {
1412 pipe_resource_reference(&view->texture, NULL);
1413 FREE(view);
1414 }
1415
1416 static void r300_set_scissor_state(struct pipe_context* pipe,
1417 const struct pipe_scissor_state* state)
1418 {
1419 struct r300_context* r300 = r300_context(pipe);
1420
1421 memcpy(r300->scissor_state.state, state,
1422 sizeof(struct pipe_scissor_state));
1423
1424 r300_mark_atom_dirty(r300, &r300->scissor_state);
1425 }
1426
1427 static void r300_set_viewport_state(struct pipe_context* pipe,
1428 const struct pipe_viewport_state* state)
1429 {
1430 struct r300_context* r300 = r300_context(pipe);
1431 struct r300_viewport_state* viewport =
1432 (struct r300_viewport_state*)r300->viewport_state.state;
1433
1434 r300->viewport = *state;
1435
1436 if (r300->draw) {
1437 draw_set_viewport_state(r300->draw, state);
1438 viewport->vte_control = R300_VTX_XY_FMT | R300_VTX_Z_FMT;
1439 return;
1440 }
1441
1442 /* Do the transform in HW. */
1443 viewport->vte_control = R300_VTX_W0_FMT;
1444
1445 if (state->scale[0] != 1.0f) {
1446 viewport->xscale = state->scale[0];
1447 viewport->vte_control |= R300_VPORT_X_SCALE_ENA;
1448 }
1449 if (state->scale[1] != 1.0f) {
1450 viewport->yscale = state->scale[1];
1451 viewport->vte_control |= R300_VPORT_Y_SCALE_ENA;
1452 }
1453 if (state->scale[2] != 1.0f) {
1454 viewport->zscale = state->scale[2];
1455 viewport->vte_control |= R300_VPORT_Z_SCALE_ENA;
1456 }
1457 if (state->translate[0] != 0.0f) {
1458 viewport->xoffset = state->translate[0];
1459 viewport->vte_control |= R300_VPORT_X_OFFSET_ENA;
1460 }
1461 if (state->translate[1] != 0.0f) {
1462 viewport->yoffset = state->translate[1];
1463 viewport->vte_control |= R300_VPORT_Y_OFFSET_ENA;
1464 }
1465 if (state->translate[2] != 0.0f) {
1466 viewport->zoffset = state->translate[2];
1467 viewport->vte_control |= R300_VPORT_Z_OFFSET_ENA;
1468 }
1469
1470 r300_mark_atom_dirty(r300, &r300->viewport_state);
1471 if (r300->fs.state && r300_fs(r300)->shader->inputs.wpos != ATTR_UNUSED) {
1472 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state);
1473 }
1474 }
1475
1476 static void r300_set_vertex_buffers(struct pipe_context* pipe,
1477 unsigned count,
1478 const struct pipe_vertex_buffer* buffers)
1479 {
1480 struct r300_context* r300 = r300_context(pipe);
1481 const struct pipe_vertex_buffer *vbo;
1482 unsigned i, max_index = (1 << 24) - 1;
1483 boolean any_user_buffer = FALSE;
1484 boolean any_nonuser_buffer = FALSE;
1485 struct pipe_vertex_buffer dummy_vb = {0};
1486
1487 /* There must be at least one vertex buffer set, otherwise it locks up. */
1488 if (!count) {
1489 dummy_vb.buffer = r300->dummy_vb;
1490 buffers = &dummy_vb;
1491 count = 1;
1492 }
1493
1494 if (count == r300->vertex_buffer_count &&
1495 memcmp(r300->vertex_buffer, buffers,
1496 sizeof(struct pipe_vertex_buffer) * count) == 0) {
1497 return;
1498 }
1499
1500 if (r300->screen->caps.has_tcl) {
1501 /* HW TCL. */
1502 r300->incompatible_vb_layout = FALSE;
1503
1504 /* Check if the strides and offsets are aligned to the size of DWORD. */
1505 for (i = 0; i < count; i++) {
1506 if (buffers[i].buffer) {
1507 if (buffers[i].stride % 4 != 0 ||
1508 buffers[i].buffer_offset % 4 != 0) {
1509 r300->incompatible_vb_layout = TRUE;
1510 break;
1511 }
1512 }
1513 }
1514
1515 for (i = 0; i < count; i++) {
1516 vbo = &buffers[i];
1517
1518 /* Skip NULL buffers */
1519 if (!vbo->buffer) {
1520 continue;
1521 }
1522
1523 /* User buffers have no info about maximum index,
1524 * we will have to compute it in draw_vbo. */
1525 if (r300_is_user_buffer(vbo->buffer)) {
1526 any_user_buffer = TRUE;
1527 continue;
1528 }
1529 any_nonuser_buffer = TRUE;
1530
1531 /* The stride of zero means we will be fetching only the first
1532 * vertex, so don't care about max_index. */
1533 if (!vbo->stride)
1534 continue;
1535
1536 /* Update the maximum index. */
1537 {
1538 unsigned vbo_max_index =
1539 (vbo->buffer->width0 - vbo->buffer_offset) / vbo->stride;
1540 max_index = MIN2(max_index, vbo_max_index);
1541 }
1542 }
1543
1544 r300->any_user_vbs = any_user_buffer;
1545 r300->vertex_buffer_max_index = max_index;
1546 r300->vertex_arrays_dirty = TRUE;
1547 if (any_nonuser_buffer)
1548 r300->validate_buffers = TRUE;
1549 if (!any_user_buffer)
1550 r300->upload_vb_validated = FALSE;
1551 } else {
1552 /* SW TCL. */
1553 draw_set_vertex_buffers(r300->draw, count, buffers);
1554 }
1555
1556 /* Common code. */
1557 for (i = 0; i < count; i++) {
1558 vbo = &buffers[i];
1559
1560 /* Reference our buffer. */
1561 pipe_resource_reference(&r300->vertex_buffer[i].buffer, vbo->buffer);
1562 if (vbo->buffer && r300_is_user_buffer(vbo->buffer)) {
1563 pipe_resource_reference(&r300->real_vertex_buffer[i], NULL);
1564 } else {
1565 pipe_resource_reference(&r300->real_vertex_buffer[i], vbo->buffer);
1566 }
1567 }
1568 for (; i < r300->real_vertex_buffer_count; i++) {
1569 /* Dereference any old buffers. */
1570 pipe_resource_reference(&r300->vertex_buffer[i].buffer, NULL);
1571 pipe_resource_reference(&r300->real_vertex_buffer[i], NULL);
1572 }
1573
1574 memcpy(r300->vertex_buffer, buffers,
1575 sizeof(struct pipe_vertex_buffer) * count);
1576
1577 r300->vertex_buffer_count = count;
1578 r300->real_vertex_buffer_count = count;
1579 }
1580
1581 static void r300_set_index_buffer(struct pipe_context* pipe,
1582 const struct pipe_index_buffer *ib)
1583 {
1584 struct r300_context* r300 = r300_context(pipe);
1585
1586 if (ib && ib->buffer) {
1587 pipe_resource_reference(&r300->index_buffer.buffer, ib->buffer);
1588 memcpy(&r300->index_buffer, ib, sizeof(r300->index_buffer));
1589
1590 if (r300->screen->caps.has_tcl &&
1591 !r300_is_user_buffer(ib->buffer)) {
1592 r300->validate_buffers = TRUE;
1593 r300->upload_ib_validated = FALSE;
1594 }
1595 }
1596 else {
1597 pipe_resource_reference(&r300->index_buffer.buffer, NULL);
1598 memset(&r300->index_buffer, 0, sizeof(r300->index_buffer));
1599 }
1600
1601 if (!r300->screen->caps.has_tcl) {
1602 draw_set_index_buffer(r300->draw, ib);
1603 }
1604 }
1605
1606 /* Initialize the PSC tables. */
1607 static void r300_vertex_psc(struct r300_vertex_element_state *velems)
1608 {
1609 struct r300_vertex_stream_state *vstream = &velems->vertex_stream;
1610 uint16_t type, swizzle;
1611 enum pipe_format format;
1612 unsigned i;
1613
1614 if (velems->count > 16) {
1615 fprintf(stderr, "r300: More than 16 vertex elements are not supported,"
1616 " requested %i, using 16.\n", velems->count);
1617 velems->count = 16;
1618 }
1619
1620 /* Vertex shaders have no semantics on their inputs,
1621 * so PSC should just route stuff based on the vertex elements,
1622 * and not on attrib information. */
1623 for (i = 0; i < velems->count; i++) {
1624 format = velems->hw_format[i];
1625
1626 type = r300_translate_vertex_data_type(format);
1627 if (type == R300_INVALID_FORMAT) {
1628 fprintf(stderr, "r300: Bad vertex format %s.\n",
1629 util_format_short_name(format));
1630 assert(0);
1631 abort();
1632 }
1633
1634 type |= i << R300_DST_VEC_LOC_SHIFT;
1635 swizzle = r300_translate_vertex_data_swizzle(format);
1636
1637 if (i & 1) {
1638 vstream->vap_prog_stream_cntl[i >> 1] |= type << 16;
1639 vstream->vap_prog_stream_cntl_ext[i >> 1] |= swizzle << 16;
1640 } else {
1641 vstream->vap_prog_stream_cntl[i >> 1] |= type;
1642 vstream->vap_prog_stream_cntl_ext[i >> 1] |= swizzle;
1643 }
1644 }
1645
1646 /* Set the last vector in the PSC. */
1647 if (i) {
1648 i -= 1;
1649 }
1650 vstream->vap_prog_stream_cntl[i >> 1] |=
1651 (R300_LAST_VEC << (i & 1 ? 16 : 0));
1652
1653 vstream->count = (i >> 1) + 1;
1654 }
1655
1656 #define FORMAT_REPLACE(what, withwhat) \
1657 case PIPE_FORMAT_##what: *format = PIPE_FORMAT_##withwhat; break
1658
1659 static void* r300_create_vertex_elements_state(struct pipe_context* pipe,
1660 unsigned count,
1661 const struct pipe_vertex_element* attribs)
1662 {
1663 struct r300_vertex_element_state *velems;
1664 unsigned i;
1665 enum pipe_format *format;
1666 struct pipe_vertex_element dummy_attrib = {0};
1667
1668 /* R300 Programmable Stream Control (PSC) doesn't support 0 vertex elements. */
1669 if (!count) {
1670 dummy_attrib.src_format = PIPE_FORMAT_R8G8B8A8_UNORM;
1671 attribs = &dummy_attrib;
1672 count = 1;
1673 }
1674
1675 assert(count <= PIPE_MAX_ATTRIBS);
1676 velems = CALLOC_STRUCT(r300_vertex_element_state);
1677 if (velems != NULL) {
1678 velems->count = count;
1679 memcpy(velems->velem, attribs, sizeof(struct pipe_vertex_element) * count);
1680
1681 if (r300_screen(pipe->screen)->caps.has_tcl) {
1682 /* Set the best hw format in case the original format is not
1683 * supported by hw. */
1684 for (i = 0; i < count; i++) {
1685 velems->hw_format[i] = velems->velem[i].src_format;
1686 format = &velems->hw_format[i];
1687
1688 /* This is basically the list of unsupported formats.
1689 * For now we don't care about the alignment, that's going to
1690 * be sorted out after the PSC setup. */
1691 switch (*format) {
1692 FORMAT_REPLACE(R64_FLOAT, R32_FLOAT);
1693 FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT);
1694 FORMAT_REPLACE(R64G64B64_FLOAT, R32G32B32_FLOAT);
1695 FORMAT_REPLACE(R64G64B64A64_FLOAT, R32G32B32A32_FLOAT);
1696
1697 FORMAT_REPLACE(R32_UNORM, R32_FLOAT);
1698 FORMAT_REPLACE(R32G32_UNORM, R32G32_FLOAT);
1699 FORMAT_REPLACE(R32G32B32_UNORM, R32G32B32_FLOAT);
1700 FORMAT_REPLACE(R32G32B32A32_UNORM, R32G32B32A32_FLOAT);
1701
1702 FORMAT_REPLACE(R32_USCALED, R32_FLOAT);
1703 FORMAT_REPLACE(R32G32_USCALED, R32G32_FLOAT);
1704 FORMAT_REPLACE(R32G32B32_USCALED, R32G32B32_FLOAT);
1705 FORMAT_REPLACE(R32G32B32A32_USCALED,R32G32B32A32_FLOAT);
1706
1707 FORMAT_REPLACE(R32_SNORM, R32_FLOAT);
1708 FORMAT_REPLACE(R32G32_SNORM, R32G32_FLOAT);
1709 FORMAT_REPLACE(R32G32B32_SNORM, R32G32B32_FLOAT);
1710 FORMAT_REPLACE(R32G32B32A32_SNORM, R32G32B32A32_FLOAT);
1711
1712 FORMAT_REPLACE(R32_SSCALED, R32_FLOAT);
1713 FORMAT_REPLACE(R32G32_SSCALED, R32G32_FLOAT);
1714 FORMAT_REPLACE(R32G32B32_SSCALED, R32G32B32_FLOAT);
1715 FORMAT_REPLACE(R32G32B32A32_SSCALED,R32G32B32A32_FLOAT);
1716
1717 FORMAT_REPLACE(R32_FIXED, R32_FLOAT);
1718 FORMAT_REPLACE(R32G32_FIXED, R32G32_FLOAT);
1719 FORMAT_REPLACE(R32G32B32_FIXED, R32G32B32_FLOAT);
1720 FORMAT_REPLACE(R32G32B32A32_FIXED, R32G32B32A32_FLOAT);
1721
1722 default:;
1723 }
1724
1725 velems->incompatible_layout =
1726 velems->incompatible_layout ||
1727 velems->velem[i].src_format != velems->hw_format[i] ||
1728 velems->velem[i].src_offset % 4 != 0;
1729 }
1730
1731 /* Now setup PSC.
1732 * The unused components will be replaced by (..., 0, 1). */
1733 r300_vertex_psc(velems);
1734
1735 /* Align the formats to the size of DWORD.
1736 * We only care about the blocksizes of the formats since
1737 * swizzles are already set up.
1738 * Also compute the vertex size. */
1739 for (i = 0; i < count; i++) {
1740 /* This is OK because we check for aligned strides too
1741 * elsewhere. */
1742 velems->hw_format_size[i] =
1743 align(util_format_get_blocksize(velems->hw_format[i]), 4);
1744 velems->vertex_size_dwords += velems->hw_format_size[i] / 4;
1745 }
1746 }
1747 }
1748 return velems;
1749 }
1750
1751 static void r300_bind_vertex_elements_state(struct pipe_context *pipe,
1752 void *state)
1753 {
1754 struct r300_context *r300 = r300_context(pipe);
1755 struct r300_vertex_element_state *velems = state;
1756
1757 if (velems == NULL) {
1758 return;
1759 }
1760
1761 r300->velems = velems;
1762
1763 if (r300->draw) {
1764 draw_set_vertex_elements(r300->draw, velems->count, velems->velem);
1765 return;
1766 }
1767
1768 UPDATE_STATE(&velems->vertex_stream, r300->vertex_stream_state);
1769 r300->vertex_stream_state.size = (1 + velems->vertex_stream.count) * 2;
1770 r300->vertex_arrays_dirty = TRUE;
1771 }
1772
1773 static void r300_delete_vertex_elements_state(struct pipe_context *pipe, void *state)
1774 {
1775 FREE(state);
1776 }
1777
1778 static void* r300_create_vs_state(struct pipe_context* pipe,
1779 const struct pipe_shader_state* shader)
1780 {
1781 struct r300_context* r300 = r300_context(pipe);
1782 struct r300_vertex_shader* vs = CALLOC_STRUCT(r300_vertex_shader);
1783
1784 /* Copy state directly into shader. */
1785 vs->state = *shader;
1786 vs->state.tokens = tgsi_dup_tokens(shader->tokens);
1787
1788 if (r300->screen->caps.has_tcl) {
1789 r300_init_vs_outputs(vs);
1790 r300_translate_vertex_shader(r300, vs);
1791 } else {
1792 r300_draw_init_vertex_shader(r300->draw, vs);
1793 }
1794
1795 return vs;
1796 }
1797
1798 static void r300_bind_vs_state(struct pipe_context* pipe, void* shader)
1799 {
1800 struct r300_context* r300 = r300_context(pipe);
1801 struct r300_vertex_shader* vs = (struct r300_vertex_shader*)shader;
1802
1803 if (vs == NULL) {
1804 r300->vs_state.state = NULL;
1805 return;
1806 }
1807 if (vs == r300->vs_state.state) {
1808 return;
1809 }
1810 r300->vs_state.state = vs;
1811
1812 /* The majority of the RS block bits is dependent on the vertex shader. */
1813 r300_mark_atom_dirty(r300, &r300->rs_block_state); /* Will be updated before the emission. */
1814
1815 if (r300->screen->caps.has_tcl) {
1816 unsigned fc_op_dwords = r300->screen->caps.is_r500 ? 3 : 2;
1817 r300_mark_atom_dirty(r300, &r300->vs_state);
1818 r300->vs_state.size =
1819 vs->code.length + 9 +
1820 (vs->code.num_fc_ops ? vs->code.num_fc_ops * fc_op_dwords + 4 : 0);
1821
1822 r300_mark_atom_dirty(r300, &r300->vs_constants);
1823 r300->vs_constants.size =
1824 2 +
1825 (vs->externals_count ? vs->externals_count * 4 + 3 : 0) +
1826 (vs->immediates_count ? vs->immediates_count * 4 + 3 : 0);
1827
1828 ((struct r300_constant_buffer*)r300->vs_constants.state)->remap_table =
1829 vs->code.constants_remap_table;
1830
1831 r300_mark_atom_dirty(r300, &r300->pvs_flush);
1832 } else {
1833 draw_bind_vertex_shader(r300->draw,
1834 (struct draw_vertex_shader*)vs->draw_vs);
1835 }
1836 }
1837
1838 static void r300_delete_vs_state(struct pipe_context* pipe, void* shader)
1839 {
1840 struct r300_context* r300 = r300_context(pipe);
1841 struct r300_vertex_shader* vs = (struct r300_vertex_shader*)shader;
1842
1843 if (r300->screen->caps.has_tcl) {
1844 rc_constants_destroy(&vs->code.constants);
1845 if (vs->code.constants_remap_table)
1846 FREE(vs->code.constants_remap_table);
1847 } else {
1848 draw_delete_vertex_shader(r300->draw,
1849 (struct draw_vertex_shader*)vs->draw_vs);
1850 }
1851
1852 FREE((void*)vs->state.tokens);
1853 FREE(shader);
1854 }
1855
1856 static void r300_set_constant_buffer(struct pipe_context *pipe,
1857 uint shader, uint index,
1858 struct pipe_resource *buf)
1859 {
1860 struct r300_context* r300 = r300_context(pipe);
1861 struct r300_constant_buffer *cbuf;
1862 struct r300_buffer *rbuf = r300_buffer(buf);
1863 uint32_t *mapped;
1864
1865 switch (shader) {
1866 case PIPE_SHADER_VERTEX:
1867 cbuf = (struct r300_constant_buffer*)r300->vs_constants.state;
1868 break;
1869 case PIPE_SHADER_FRAGMENT:
1870 cbuf = (struct r300_constant_buffer*)r300->fs_constants.state;
1871 break;
1872 default:
1873 return;
1874 }
1875
1876 if (buf == NULL || buf->width0 == 0)
1877 return;
1878
1879 if (rbuf->user_buffer)
1880 mapped = (uint32_t*)rbuf->user_buffer;
1881 else if (rbuf->constant_buffer)
1882 mapped = (uint32_t*)rbuf->constant_buffer;
1883 else
1884 return;
1885
1886 if (shader == PIPE_SHADER_FRAGMENT ||
1887 (shader == PIPE_SHADER_VERTEX && r300->screen->caps.has_tcl)) {
1888 cbuf->ptr = mapped;
1889 }
1890
1891 if (shader == PIPE_SHADER_VERTEX) {
1892 if (r300->screen->caps.has_tcl) {
1893 struct r300_vertex_shader *vs =
1894 (struct r300_vertex_shader*)r300->vs_state.state;
1895
1896 if (!vs) {
1897 cbuf->buffer_base = 0;
1898 return;
1899 }
1900
1901 cbuf->buffer_base = r300->vs_const_base;
1902 r300->vs_const_base += vs->code.constants.Count;
1903 if (r300->vs_const_base > R500_MAX_PVS_CONST_VECS) {
1904 r300->vs_const_base = vs->code.constants.Count;
1905 cbuf->buffer_base = 0;
1906 r300_mark_atom_dirty(r300, &r300->pvs_flush);
1907 }
1908 r300_mark_atom_dirty(r300, &r300->vs_constants);
1909 } else if (r300->draw) {
1910 draw_set_mapped_constant_buffer(r300->draw, PIPE_SHADER_VERTEX,
1911 0, mapped, buf->width0);
1912 }
1913 } else if (shader == PIPE_SHADER_FRAGMENT) {
1914 r300_mark_atom_dirty(r300, &r300->fs_constants);
1915 }
1916 }
1917
1918 void r300_init_state_functions(struct r300_context* r300)
1919 {
1920 r300->context.create_blend_state = r300_create_blend_state;
1921 r300->context.bind_blend_state = r300_bind_blend_state;
1922 r300->context.delete_blend_state = r300_delete_blend_state;
1923
1924 r300->context.set_blend_color = r300_set_blend_color;
1925
1926 r300->context.set_clip_state = r300_set_clip_state;
1927 r300->context.set_sample_mask = r300_set_sample_mask;
1928
1929 r300->context.set_constant_buffer = r300_set_constant_buffer;
1930
1931 r300->context.create_depth_stencil_alpha_state = r300_create_dsa_state;
1932 r300->context.bind_depth_stencil_alpha_state = r300_bind_dsa_state;
1933 r300->context.delete_depth_stencil_alpha_state = r300_delete_dsa_state;
1934
1935 r300->context.set_stencil_ref = r300_set_stencil_ref;
1936
1937 r300->context.set_framebuffer_state = r300_set_framebuffer_state;
1938
1939 r300->context.create_fs_state = r300_create_fs_state;
1940 r300->context.bind_fs_state = r300_bind_fs_state;
1941 r300->context.delete_fs_state = r300_delete_fs_state;
1942
1943 r300->context.set_polygon_stipple = r300_set_polygon_stipple;
1944
1945 r300->context.create_rasterizer_state = r300_create_rs_state;
1946 r300->context.bind_rasterizer_state = r300_bind_rs_state;
1947 r300->context.delete_rasterizer_state = r300_delete_rs_state;
1948
1949 r300->context.create_sampler_state = r300_create_sampler_state;
1950 r300->context.bind_fragment_sampler_states = r300_bind_sampler_states;
1951 r300->context.bind_vertex_sampler_states = r300_lacks_vertex_textures;
1952 r300->context.delete_sampler_state = r300_delete_sampler_state;
1953
1954 r300->context.set_fragment_sampler_views = r300_set_fragment_sampler_views;
1955 r300->context.create_sampler_view = r300_create_sampler_view;
1956 r300->context.sampler_view_destroy = r300_sampler_view_destroy;
1957
1958 r300->context.set_scissor_state = r300_set_scissor_state;
1959
1960 r300->context.set_viewport_state = r300_set_viewport_state;
1961
1962 r300->context.set_vertex_buffers = r300_set_vertex_buffers;
1963 r300->context.set_index_buffer = r300_set_index_buffer;
1964
1965 r300->context.create_vertex_elements_state = r300_create_vertex_elements_state;
1966 r300->context.bind_vertex_elements_state = r300_bind_vertex_elements_state;
1967 r300->context.delete_vertex_elements_state = r300_delete_vertex_elements_state;
1968
1969 r300->context.create_vs_state = r300_create_vs_state;
1970 r300->context.bind_vs_state = r300_bind_vs_state;
1971 r300->context.delete_vs_state = r300_delete_vs_state;
1972 }