r300g: implement MSAA
[mesa.git] / src / gallium / drivers / r300 / r300_state.c
1 /*
2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright 2009 Marek Olšák <maraeo@gmail.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
23
24 #include "draw/draw_context.h"
25
26 #include "util/u_framebuffer.h"
27 #include "util/u_half.h"
28 #include "util/u_helpers.h"
29 #include "util/u_math.h"
30 #include "util/u_mm.h"
31 #include "util/u_memory.h"
32 #include "util/u_pack_color.h"
33 #include "util/u_transfer.h"
34
35 #include "tgsi/tgsi_parse.h"
36
37 #include "pipe/p_config.h"
38
39 #include "r300_cb.h"
40 #include "r300_context.h"
41 #include "r300_emit.h"
42 #include "r300_reg.h"
43 #include "r300_screen.h"
44 #include "r300_screen_buffer.h"
45 #include "r300_state_inlines.h"
46 #include "r300_fs.h"
47 #include "r300_texture.h"
48 #include "r300_vs.h"
49
50 /* r300_state: Functions used to intialize state context by translating
51 * Gallium state objects into semi-native r300 state objects. */
52
53 #define UPDATE_STATE(cso, atom) \
54 if (cso != atom.state) { \
55 atom.state = cso; \
56 r300_mark_atom_dirty(r300, &(atom)); \
57 }
58
59 static boolean blend_discard_if_src_alpha_0(unsigned srcRGB, unsigned srcA,
60 unsigned dstRGB, unsigned dstA)
61 {
62 /* If the blend equation is ADD or REVERSE_SUBTRACT,
63 * SRC_ALPHA == 0, and the following state is set, the colorbuffer
64 * will not be changed.
65 * Notice that the dst factors are the src factors inverted. */
66 return (srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
67 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
68 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
69 (srcA == PIPE_BLENDFACTOR_SRC_COLOR ||
70 srcA == PIPE_BLENDFACTOR_SRC_ALPHA ||
71 srcA == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
72 srcA == PIPE_BLENDFACTOR_ZERO) &&
73 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
74 dstRGB == PIPE_BLENDFACTOR_ONE) &&
75 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
76 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
77 dstA == PIPE_BLENDFACTOR_ONE);
78 }
79
80 static boolean blend_discard_if_src_alpha_1(unsigned srcRGB, unsigned srcA,
81 unsigned dstRGB, unsigned dstA)
82 {
83 /* If the blend equation is ADD or REVERSE_SUBTRACT,
84 * SRC_ALPHA == 1, and the following state is set, the colorbuffer
85 * will not be changed.
86 * Notice that the dst factors are the src factors inverted. */
87 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
88 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
89 (srcA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
90 srcA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
91 srcA == PIPE_BLENDFACTOR_ZERO) &&
92 (dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
93 dstRGB == PIPE_BLENDFACTOR_ONE) &&
94 (dstA == PIPE_BLENDFACTOR_SRC_COLOR ||
95 dstA == PIPE_BLENDFACTOR_SRC_ALPHA ||
96 dstA == PIPE_BLENDFACTOR_ONE);
97 }
98
99 static boolean blend_discard_if_src_color_0(unsigned srcRGB, unsigned srcA,
100 unsigned dstRGB, unsigned dstA)
101 {
102 /* If the blend equation is ADD or REVERSE_SUBTRACT,
103 * SRC_COLOR == (0,0,0), and the following state is set, the colorbuffer
104 * will not be changed.
105 * Notice that the dst factors are the src factors inverted. */
106 return (srcRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
107 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
108 (srcA == PIPE_BLENDFACTOR_ZERO) &&
109 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
110 dstRGB == PIPE_BLENDFACTOR_ONE) &&
111 (dstA == PIPE_BLENDFACTOR_ONE);
112 }
113
114 static boolean blend_discard_if_src_color_1(unsigned srcRGB, unsigned srcA,
115 unsigned dstRGB, unsigned dstA)
116 {
117 /* If the blend equation is ADD or REVERSE_SUBTRACT,
118 * SRC_COLOR == (1,1,1), and the following state is set, the colorbuffer
119 * will not be changed.
120 * Notice that the dst factors are the src factors inverted. */
121 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
122 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
123 (srcA == PIPE_BLENDFACTOR_ZERO) &&
124 (dstRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
125 dstRGB == PIPE_BLENDFACTOR_ONE) &&
126 (dstA == PIPE_BLENDFACTOR_ONE);
127 }
128
129 static boolean blend_discard_if_src_alpha_color_0(unsigned srcRGB, unsigned srcA,
130 unsigned dstRGB, unsigned dstA)
131 {
132 /* If the blend equation is ADD or REVERSE_SUBTRACT,
133 * SRC_ALPHA_COLOR == (0,0,0,0), and the following state is set,
134 * the colorbuffer will not be changed.
135 * Notice that the dst factors are the src factors inverted. */
136 return (srcRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
137 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
138 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
139 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
140 (srcA == PIPE_BLENDFACTOR_SRC_COLOR ||
141 srcA == PIPE_BLENDFACTOR_SRC_ALPHA ||
142 srcA == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
143 srcA == PIPE_BLENDFACTOR_ZERO) &&
144 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
145 dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
146 dstRGB == PIPE_BLENDFACTOR_ONE) &&
147 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
148 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
149 dstA == PIPE_BLENDFACTOR_ONE);
150 }
151
152 static boolean blend_discard_if_src_alpha_color_1(unsigned srcRGB, unsigned srcA,
153 unsigned dstRGB, unsigned dstA)
154 {
155 /* If the blend equation is ADD or REVERSE_SUBTRACT,
156 * SRC_ALPHA_COLOR == (1,1,1,1), and the following state is set,
157 * the colorbuffer will not be changed.
158 * Notice that the dst factors are the src factors inverted. */
159 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
160 srcRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
161 srcRGB == PIPE_BLENDFACTOR_ZERO) &&
162 (srcA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
163 srcA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
164 srcA == PIPE_BLENDFACTOR_ZERO) &&
165 (dstRGB == PIPE_BLENDFACTOR_SRC_COLOR ||
166 dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
167 dstRGB == PIPE_BLENDFACTOR_ONE) &&
168 (dstA == PIPE_BLENDFACTOR_SRC_COLOR ||
169 dstA == PIPE_BLENDFACTOR_SRC_ALPHA ||
170 dstA == PIPE_BLENDFACTOR_ONE);
171 }
172
173 /* The hardware colormask is clunky a must be swizzled depending on the format.
174 * This was figured out by trial-and-error. */
175 static unsigned bgra_cmask(unsigned mask)
176 {
177 return ((mask & PIPE_MASK_R) << 2) |
178 ((mask & PIPE_MASK_B) >> 2) |
179 (mask & (PIPE_MASK_G | PIPE_MASK_A));
180 }
181
182 static unsigned rgba_cmask(unsigned mask)
183 {
184 return mask & PIPE_MASK_RGBA;
185 }
186
187 static unsigned rrrr_cmask(unsigned mask)
188 {
189 return (mask & PIPE_MASK_R) |
190 ((mask & PIPE_MASK_R) << 1) |
191 ((mask & PIPE_MASK_R) << 2) |
192 ((mask & PIPE_MASK_R) << 3);
193 }
194
195 static unsigned aaaa_cmask(unsigned mask)
196 {
197 return ((mask & PIPE_MASK_A) >> 3) |
198 ((mask & PIPE_MASK_A) >> 2) |
199 ((mask & PIPE_MASK_A) >> 1) |
200 (mask & PIPE_MASK_A);
201 }
202
203 static unsigned grrg_cmask(unsigned mask)
204 {
205 return ((mask & PIPE_MASK_R) << 1) |
206 ((mask & PIPE_MASK_R) << 2) |
207 ((mask & PIPE_MASK_G) >> 1) |
208 ((mask & PIPE_MASK_G) << 2);
209 }
210
211 static unsigned arra_cmask(unsigned mask)
212 {
213 return ((mask & PIPE_MASK_R) << 1) |
214 ((mask & PIPE_MASK_R) << 2) |
215 ((mask & PIPE_MASK_A) >> 3) |
216 (mask & PIPE_MASK_A);
217 }
218
219 /* Create a new blend state based on the CSO blend state.
220 *
221 * This encompasses alpha blending, logic/raster ops, and blend dithering. */
222 static void* r300_create_blend_state(struct pipe_context* pipe,
223 const struct pipe_blend_state* state)
224 {
225 struct r300_screen* r300screen = r300_screen(pipe->screen);
226 struct r300_blend_state* blend = CALLOC_STRUCT(r300_blend_state);
227 uint32_t blend_control = 0; /* R300_RB3D_CBLEND: 0x4e04 */
228 uint32_t blend_control_noclamp = 0; /* R300_RB3D_CBLEND: 0x4e04 */
229 uint32_t alpha_blend_control = 0; /* R300_RB3D_ABLEND: 0x4e08 */
230 uint32_t alpha_blend_control_noclamp = 0; /* R300_RB3D_ABLEND: 0x4e08 */
231 uint32_t rop = 0; /* R300_RB3D_ROPCNTL: 0x4e18 */
232 uint32_t dither = 0; /* R300_RB3D_DITHER_CTL: 0x4e50 */
233 int i;
234 CB_LOCALS;
235
236 blend->state = *state;
237
238 if (state->rt[0].blend_enable)
239 {
240 unsigned eqRGB = state->rt[0].rgb_func;
241 unsigned srcRGB = state->rt[0].rgb_src_factor;
242 unsigned dstRGB = state->rt[0].rgb_dst_factor;
243
244 unsigned eqA = state->rt[0].alpha_func;
245 unsigned srcA = state->rt[0].alpha_src_factor;
246 unsigned dstA = state->rt[0].alpha_dst_factor;
247
248 /* despite the name, ALPHA_BLEND_ENABLE has nothing to do with alpha,
249 * this is just the crappy D3D naming */
250 blend_control = blend_control_noclamp =
251 R300_ALPHA_BLEND_ENABLE |
252 ( r300_translate_blend_factor(srcRGB) << R300_SRC_BLEND_SHIFT) |
253 ( r300_translate_blend_factor(dstRGB) << R300_DST_BLEND_SHIFT);
254 blend_control |=
255 r300_translate_blend_function(eqRGB, TRUE);
256 blend_control_noclamp |=
257 r300_translate_blend_function(eqRGB, FALSE);
258
259 /* Optimization: some operations do not require the destination color.
260 *
261 * When SRC_ALPHA_SATURATE is used, colorbuffer reads must be enabled,
262 * otherwise blending gives incorrect results. It seems to be
263 * a hardware bug. */
264 if (eqRGB == PIPE_BLEND_MIN || eqA == PIPE_BLEND_MIN ||
265 eqRGB == PIPE_BLEND_MAX || eqA == PIPE_BLEND_MAX ||
266 dstRGB != PIPE_BLENDFACTOR_ZERO ||
267 dstA != PIPE_BLENDFACTOR_ZERO ||
268 srcRGB == PIPE_BLENDFACTOR_DST_COLOR ||
269 srcRGB == PIPE_BLENDFACTOR_DST_ALPHA ||
270 srcRGB == PIPE_BLENDFACTOR_INV_DST_COLOR ||
271 srcRGB == PIPE_BLENDFACTOR_INV_DST_ALPHA ||
272 srcA == PIPE_BLENDFACTOR_DST_COLOR ||
273 srcA == PIPE_BLENDFACTOR_DST_ALPHA ||
274 srcA == PIPE_BLENDFACTOR_INV_DST_COLOR ||
275 srcA == PIPE_BLENDFACTOR_INV_DST_ALPHA ||
276 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE) {
277 /* Enable reading from the colorbuffer. */
278 blend_control |= R300_READ_ENABLE;
279 blend_control_noclamp |= R300_READ_ENABLE;
280
281 if (r300screen->caps.is_r500) {
282 /* Optimization: Depending on incoming pixels, we can
283 * conditionally disable the reading in hardware... */
284 if (eqRGB != PIPE_BLEND_MIN && eqA != PIPE_BLEND_MIN &&
285 eqRGB != PIPE_BLEND_MAX && eqA != PIPE_BLEND_MAX) {
286 /* Disable reading if SRC_ALPHA == 0. */
287 if ((dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA ||
288 dstRGB == PIPE_BLENDFACTOR_ZERO) &&
289 (dstA == PIPE_BLENDFACTOR_SRC_COLOR ||
290 dstA == PIPE_BLENDFACTOR_SRC_ALPHA ||
291 dstA == PIPE_BLENDFACTOR_ZERO) &&
292 (srcRGB != PIPE_BLENDFACTOR_DST_COLOR &&
293 srcRGB != PIPE_BLENDFACTOR_DST_ALPHA &&
294 srcRGB != PIPE_BLENDFACTOR_INV_DST_COLOR &&
295 srcRGB != PIPE_BLENDFACTOR_INV_DST_ALPHA)) {
296 blend_control |= R500_SRC_ALPHA_0_NO_READ;
297 }
298
299 /* Disable reading if SRC_ALPHA == 1. */
300 if ((dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
301 dstRGB == PIPE_BLENDFACTOR_ZERO) &&
302 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR ||
303 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA ||
304 dstA == PIPE_BLENDFACTOR_ZERO) &&
305 (srcRGB != PIPE_BLENDFACTOR_DST_COLOR &&
306 srcRGB != PIPE_BLENDFACTOR_DST_ALPHA &&
307 srcRGB != PIPE_BLENDFACTOR_INV_DST_COLOR &&
308 srcRGB != PIPE_BLENDFACTOR_INV_DST_ALPHA)) {
309 blend_control |= R500_SRC_ALPHA_1_NO_READ;
310 }
311 }
312 }
313 }
314
315 /* Optimization: discard pixels which don't change the colorbuffer.
316 *
317 * The code below is non-trivial and some math is involved.
318 *
319 * Discarding pixels must be disabled when FP16 AA is enabled.
320 * This is a hardware bug. Also, this implementation wouldn't work
321 * with FP blending enabled and equation clamping disabled.
322 *
323 * Equations other than ADD are rarely used and therefore won't be
324 * optimized. */
325 if ((eqRGB == PIPE_BLEND_ADD || eqRGB == PIPE_BLEND_REVERSE_SUBTRACT) &&
326 (eqA == PIPE_BLEND_ADD || eqA == PIPE_BLEND_REVERSE_SUBTRACT)) {
327 /* ADD: X+Y
328 * REVERSE_SUBTRACT: Y-X
329 *
330 * The idea is:
331 * If X = src*srcFactor = 0 and Y = dst*dstFactor = 1,
332 * then CB will not be changed.
333 *
334 * Given the srcFactor and dstFactor variables, we can derive
335 * what src and dst should be equal to and discard appropriate
336 * pixels.
337 */
338 if (blend_discard_if_src_alpha_0(srcRGB, srcA, dstRGB, dstA)) {
339 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_ALPHA_0;
340 } else if (blend_discard_if_src_alpha_1(srcRGB, srcA,
341 dstRGB, dstA)) {
342 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_ALPHA_1;
343 } else if (blend_discard_if_src_color_0(srcRGB, srcA,
344 dstRGB, dstA)) {
345 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_COLOR_0;
346 } else if (blend_discard_if_src_color_1(srcRGB, srcA,
347 dstRGB, dstA)) {
348 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_COLOR_1;
349 } else if (blend_discard_if_src_alpha_color_0(srcRGB, srcA,
350 dstRGB, dstA)) {
351 blend_control |=
352 R300_DISCARD_SRC_PIXELS_SRC_ALPHA_COLOR_0;
353 } else if (blend_discard_if_src_alpha_color_1(srcRGB, srcA,
354 dstRGB, dstA)) {
355 blend_control |=
356 R300_DISCARD_SRC_PIXELS_SRC_ALPHA_COLOR_1;
357 }
358 }
359
360 /* separate alpha */
361 if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) {
362 blend_control |= R300_SEPARATE_ALPHA_ENABLE;
363 blend_control_noclamp |= R300_SEPARATE_ALPHA_ENABLE;
364 alpha_blend_control = alpha_blend_control_noclamp =
365 (r300_translate_blend_factor(srcA) << R300_SRC_BLEND_SHIFT) |
366 (r300_translate_blend_factor(dstA) << R300_DST_BLEND_SHIFT);
367 alpha_blend_control |=
368 r300_translate_blend_function(eqA, TRUE);
369 alpha_blend_control_noclamp |=
370 r300_translate_blend_function(eqA, FALSE);
371 }
372 }
373
374 /* PIPE_LOGICOP_* don't need to be translated, fortunately. */
375 if (state->logicop_enable) {
376 rop = R300_RB3D_ROPCNTL_ROP_ENABLE |
377 (state->logicop_func) << R300_RB3D_ROPCNTL_ROP_SHIFT;
378 }
379
380 /* Neither fglrx nor classic r300 ever set this, regardless of dithering
381 * state. Since it's an optional implementation detail, we can leave it
382 * out and never dither.
383 *
384 * This could be revisited if we ever get quality or conformance hints.
385 *
386 if (state->dither) {
387 dither = R300_RB3D_DITHER_CTL_DITHER_MODE_LUT |
388 R300_RB3D_DITHER_CTL_ALPHA_DITHER_MODE_LUT;
389 }
390 */
391
392 /* Build a command buffer. */
393 {
394 unsigned (*func[COLORMASK_NUM_SWIZZLES])(unsigned) = {
395 bgra_cmask,
396 rgba_cmask,
397 rrrr_cmask,
398 aaaa_cmask,
399 grrg_cmask,
400 arra_cmask
401 };
402
403 for (i = 0; i < COLORMASK_NUM_SWIZZLES; i++) {
404 BEGIN_CB(blend->cb_clamp[i], 8);
405 OUT_CB_REG(R300_RB3D_ROPCNTL, rop);
406 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3);
407 OUT_CB(blend_control);
408 OUT_CB(alpha_blend_control);
409 OUT_CB(func[i](state->rt[0].colormask));
410 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither);
411 END_CB;
412 }
413 }
414
415 /* Build a command buffer. */
416 BEGIN_CB(blend->cb_noclamp, 8);
417 OUT_CB_REG(R300_RB3D_ROPCNTL, rop);
418 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3);
419 OUT_CB(blend_control_noclamp);
420 OUT_CB(alpha_blend_control_noclamp);
421 OUT_CB(rgba_cmask(state->rt[0].colormask));
422 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither);
423 END_CB;
424
425 /* The same as above, but with no colorbuffer reads and writes. */
426 BEGIN_CB(blend->cb_no_readwrite, 8);
427 OUT_CB_REG(R300_RB3D_ROPCNTL, rop);
428 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3);
429 OUT_CB(0);
430 OUT_CB(0);
431 OUT_CB(0);
432 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither);
433 END_CB;
434
435 return (void*)blend;
436 }
437
438 /* Bind blend state. */
439 static void r300_bind_blend_state(struct pipe_context* pipe,
440 void* state)
441 {
442 struct r300_context* r300 = r300_context(pipe);
443 struct r300_blend_state *blend = (struct r300_blend_state*)state;
444 boolean last_alpha_to_one = r300->alpha_to_one;
445 boolean last_alpha_to_coverage = r300->alpha_to_coverage;
446
447 UPDATE_STATE(state, r300->blend_state);
448
449 if (!blend)
450 return;
451
452 r300->alpha_to_one = blend->state.alpha_to_one;
453 r300->alpha_to_coverage = blend->state.alpha_to_coverage;
454
455 if (r300->alpha_to_one != last_alpha_to_one && r300->msaa_enable &&
456 r300->fs_status == FRAGMENT_SHADER_VALID) {
457 r300->fs_status = FRAGMENT_SHADER_MAYBE_DIRTY;
458 }
459
460 if (r300->alpha_to_coverage != last_alpha_to_coverage &&
461 r300->msaa_enable) {
462 r300_mark_atom_dirty(r300, &r300->dsa_state);
463 }
464 }
465
466 /* Free blend state. */
467 static void r300_delete_blend_state(struct pipe_context* pipe,
468 void* state)
469 {
470 FREE(state);
471 }
472
473 /* Convert float to 10bit integer */
474 static unsigned float_to_fixed10(float f)
475 {
476 return CLAMP((unsigned)(f * 1023.9f), 0, 1023);
477 }
478
479 /* Set blend color.
480 * Setup both R300 and R500 registers, figure out later which one to write. */
481 static void r300_set_blend_color(struct pipe_context* pipe,
482 const struct pipe_blend_color* color)
483 {
484 struct r300_context* r300 = r300_context(pipe);
485 struct pipe_framebuffer_state *fb = r300->fb_state.state;
486 struct r300_blend_color_state *state =
487 (struct r300_blend_color_state*)r300->blend_color_state.state;
488 struct pipe_blend_color c;
489 enum pipe_format format = fb->nr_cbufs ? fb->cbufs[0]->format : 0;
490 CB_LOCALS;
491
492 state->state = *color; /* Save it, so that we can reuse it in set_fb_state */
493 c = *color;
494
495 /* The blend color is dependent on the colorbuffer format. */
496 if (fb->nr_cbufs) {
497 switch (format) {
498 case PIPE_FORMAT_R8_UNORM:
499 case PIPE_FORMAT_L8_UNORM:
500 case PIPE_FORMAT_I8_UNORM:
501 c.color[1] = c.color[0];
502 break;
503
504 case PIPE_FORMAT_A8_UNORM:
505 c.color[1] = c.color[3];
506 break;
507
508 case PIPE_FORMAT_R8G8_UNORM:
509 c.color[2] = c.color[1];
510 break;
511
512 case PIPE_FORMAT_L8A8_UNORM:
513 c.color[2] = c.color[3];
514 break;
515
516 default:;
517 }
518 }
519
520 if (r300->screen->caps.is_r500) {
521 BEGIN_CB(state->cb, 3);
522 OUT_CB_REG_SEQ(R500_RB3D_CONSTANT_COLOR_AR, 2);
523
524 switch (format) {
525 case PIPE_FORMAT_R16G16B16A16_FLOAT:
526 OUT_CB(util_float_to_half(c.color[2]) |
527 (util_float_to_half(c.color[3]) << 16));
528 OUT_CB(util_float_to_half(c.color[0]) |
529 (util_float_to_half(c.color[1]) << 16));
530 break;
531
532 default:
533 OUT_CB(float_to_fixed10(c.color[0]) |
534 (float_to_fixed10(c.color[3]) << 16));
535 OUT_CB(float_to_fixed10(c.color[2]) |
536 (float_to_fixed10(c.color[1]) << 16));
537 }
538
539 END_CB;
540 } else {
541 union util_color uc;
542 util_pack_color(c.color, PIPE_FORMAT_B8G8R8A8_UNORM, &uc);
543
544 BEGIN_CB(state->cb, 2);
545 OUT_CB_REG(R300_RB3D_BLEND_COLOR, uc.ui);
546 END_CB;
547 }
548
549 r300_mark_atom_dirty(r300, &r300->blend_color_state);
550 }
551
552 static void r300_set_clip_state(struct pipe_context* pipe,
553 const struct pipe_clip_state* state)
554 {
555 struct r300_context* r300 = r300_context(pipe);
556 struct r300_clip_state *clip =
557 (struct r300_clip_state*)r300->clip_state.state;
558 CB_LOCALS;
559
560 if (r300->screen->caps.has_tcl) {
561 BEGIN_CB(clip->cb, r300->clip_state.size);
562 OUT_CB_REG(R300_VAP_PVS_VECTOR_INDX_REG,
563 (r300->screen->caps.is_r500 ?
564 R500_PVS_UCP_START : R300_PVS_UCP_START));
565 OUT_CB_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, 6 * 4);
566 OUT_CB_TABLE(state->ucp, 6 * 4);
567 END_CB;
568
569 r300_mark_atom_dirty(r300, &r300->clip_state);
570 } else {
571 draw_set_clip_state(r300->draw, state);
572 }
573 }
574
575 /* Create a new depth, stencil, and alpha state based on the CSO dsa state.
576 *
577 * This contains the depth buffer, stencil buffer, alpha test, and such.
578 * On the Radeon, depth and stencil buffer setup are intertwined, which is
579 * the reason for some of the strange-looking assignments across registers. */
580 static void* r300_create_dsa_state(struct pipe_context* pipe,
581 const struct pipe_depth_stencil_alpha_state* state)
582 {
583 boolean is_r500 = r300_screen(pipe->screen)->caps.is_r500;
584 struct r300_dsa_state* dsa = CALLOC_STRUCT(r300_dsa_state);
585 CB_LOCALS;
586 uint32_t alpha_value_fp16 = 0;
587 uint32_t z_buffer_control = 0;
588 uint32_t z_stencil_control = 0;
589 uint32_t stencil_ref_mask = 0;
590 uint32_t stencil_ref_bf = 0;
591
592 dsa->dsa = *state;
593
594 /* Depth test setup. - separate write mask depth for decomp flush */
595 if (state->depth.writemask) {
596 z_buffer_control |= R300_Z_WRITE_ENABLE;
597 }
598
599 if (state->depth.enabled) {
600 z_buffer_control |= R300_Z_ENABLE;
601
602 z_stencil_control |=
603 (r300_translate_depth_stencil_function(state->depth.func) <<
604 R300_Z_FUNC_SHIFT);
605 }
606
607 /* Stencil buffer setup. */
608 if (state->stencil[0].enabled) {
609 z_buffer_control |= R300_STENCIL_ENABLE;
610 z_stencil_control |=
611 (r300_translate_depth_stencil_function(state->stencil[0].func) <<
612 R300_S_FRONT_FUNC_SHIFT) |
613 (r300_translate_stencil_op(state->stencil[0].fail_op) <<
614 R300_S_FRONT_SFAIL_OP_SHIFT) |
615 (r300_translate_stencil_op(state->stencil[0].zpass_op) <<
616 R300_S_FRONT_ZPASS_OP_SHIFT) |
617 (r300_translate_stencil_op(state->stencil[0].zfail_op) <<
618 R300_S_FRONT_ZFAIL_OP_SHIFT);
619
620 stencil_ref_mask =
621 (state->stencil[0].valuemask << R300_STENCILMASK_SHIFT) |
622 (state->stencil[0].writemask << R300_STENCILWRITEMASK_SHIFT);
623
624 if (state->stencil[1].enabled) {
625 dsa->two_sided = TRUE;
626
627 z_buffer_control |= R300_STENCIL_FRONT_BACK;
628 z_stencil_control |=
629 (r300_translate_depth_stencil_function(state->stencil[1].func) <<
630 R300_S_BACK_FUNC_SHIFT) |
631 (r300_translate_stencil_op(state->stencil[1].fail_op) <<
632 R300_S_BACK_SFAIL_OP_SHIFT) |
633 (r300_translate_stencil_op(state->stencil[1].zpass_op) <<
634 R300_S_BACK_ZPASS_OP_SHIFT) |
635 (r300_translate_stencil_op(state->stencil[1].zfail_op) <<
636 R300_S_BACK_ZFAIL_OP_SHIFT);
637
638 stencil_ref_bf =
639 (state->stencil[1].valuemask << R300_STENCILMASK_SHIFT) |
640 (state->stencil[1].writemask << R300_STENCILWRITEMASK_SHIFT);
641
642 if (is_r500) {
643 z_buffer_control |= R500_STENCIL_REFMASK_FRONT_BACK;
644 } else {
645 dsa->two_sided_stencil_ref =
646 (state->stencil[0].valuemask != state->stencil[1].valuemask ||
647 state->stencil[0].writemask != state->stencil[1].writemask);
648 }
649 }
650 }
651
652 /* Alpha test setup. */
653 if (state->alpha.enabled) {
654 dsa->alpha_function =
655 r300_translate_alpha_function(state->alpha.func) |
656 R300_FG_ALPHA_FUNC_ENABLE;
657
658 dsa->alpha_function |= float_to_ubyte(state->alpha.ref_value);
659 alpha_value_fp16 = util_float_to_half(state->alpha.ref_value);
660 }
661
662 BEGIN_CB(&dsa->cb_begin, 8);
663 OUT_CB_REG_SEQ(R300_ZB_CNTL, 3);
664 OUT_CB(z_buffer_control);
665 OUT_CB(z_stencil_control);
666 OUT_CB(stencil_ref_mask);
667 OUT_CB_REG(R500_ZB_STENCILREFMASK_BF, stencil_ref_bf);
668 OUT_CB_REG(R500_FG_ALPHA_VALUE, alpha_value_fp16);
669 END_CB;
670
671 BEGIN_CB(dsa->cb_zb_no_readwrite, 8);
672 OUT_CB_REG_SEQ(R300_ZB_CNTL, 3);
673 OUT_CB(0);
674 OUT_CB(0);
675 OUT_CB(0);
676 OUT_CB_REG(R500_ZB_STENCILREFMASK_BF, 0);
677 OUT_CB_REG(R500_FG_ALPHA_VALUE, alpha_value_fp16);
678 END_CB;
679
680 return (void*)dsa;
681 }
682
683 static void r300_dsa_inject_stencilref(struct r300_context *r300)
684 {
685 struct r300_dsa_state *dsa =
686 (struct r300_dsa_state*)r300->dsa_state.state;
687
688 if (!dsa)
689 return;
690
691 dsa->stencil_ref_mask =
692 (dsa->stencil_ref_mask & ~R300_STENCILREF_MASK) |
693 r300->stencil_ref.ref_value[0];
694 dsa->stencil_ref_bf =
695 (dsa->stencil_ref_bf & ~R300_STENCILREF_MASK) |
696 r300->stencil_ref.ref_value[1];
697 }
698
699 /* Bind DSA state. */
700 static void r300_bind_dsa_state(struct pipe_context* pipe,
701 void* state)
702 {
703 struct r300_context* r300 = r300_context(pipe);
704
705 if (!state) {
706 return;
707 }
708
709 UPDATE_STATE(state, r300->dsa_state);
710
711 r300_mark_atom_dirty(r300, &r300->hyperz_state); /* Will be updated before the emission. */
712 r300_dsa_inject_stencilref(r300);
713 }
714
715 /* Free DSA state. */
716 static void r300_delete_dsa_state(struct pipe_context* pipe,
717 void* state)
718 {
719 FREE(state);
720 }
721
722 static void r300_set_stencil_ref(struct pipe_context* pipe,
723 const struct pipe_stencil_ref* sr)
724 {
725 struct r300_context* r300 = r300_context(pipe);
726
727 r300->stencil_ref = *sr;
728
729 r300_dsa_inject_stencilref(r300);
730 r300_mark_atom_dirty(r300, &r300->dsa_state);
731 }
732
733 static void r300_tex_set_tiling_flags(struct r300_context *r300,
734 struct r300_resource *tex,
735 unsigned level)
736 {
737 /* Check if the macrotile flag needs to be changed.
738 * Skip changing the flags otherwise. */
739 if (tex->tex.macrotile[tex->surface_level] !=
740 tex->tex.macrotile[level]) {
741 r300->rws->buffer_set_tiling(tex->buf, r300->cs,
742 tex->tex.microtile, tex->tex.macrotile[level],
743 0, 0, 0, 0, 0,
744 tex->tex.stride_in_bytes[0]);
745
746 tex->surface_level = level;
747 }
748 }
749
750 /* This switcheroo is needed just because of goddamned MACRO_SWITCH. */
751 static void r300_fb_set_tiling_flags(struct r300_context *r300,
752 const struct pipe_framebuffer_state *state)
753 {
754 unsigned i;
755
756 /* Set tiling flags for new surfaces. */
757 for (i = 0; i < state->nr_cbufs; i++) {
758 r300_tex_set_tiling_flags(r300,
759 r300_resource(state->cbufs[i]->texture),
760 state->cbufs[i]->u.tex.level);
761 }
762 if (state->zsbuf) {
763 r300_tex_set_tiling_flags(r300,
764 r300_resource(state->zsbuf->texture),
765 state->zsbuf->u.tex.level);
766 }
767 }
768
769 static void r300_print_fb_surf_info(struct pipe_surface *surf, unsigned index,
770 const char *binding)
771 {
772 struct pipe_resource *tex = surf->texture;
773 struct r300_resource *rtex = r300_resource(tex);
774
775 fprintf(stderr,
776 "r300: %s[%i] Dim: %ix%i, Firstlayer: %i, "
777 "Lastlayer: %i, Level: %i, Format: %s\n"
778
779 "r300: TEX: Macro: %s, Micro: %s, "
780 "Dim: %ix%ix%i, LastLevel: %i, Format: %s\n",
781
782 binding, index, surf->width, surf->height,
783 surf->u.tex.first_layer, surf->u.tex.last_layer, surf->u.tex.level,
784 util_format_short_name(surf->format),
785
786 rtex->tex.macrotile[0] ? "YES" : " NO",
787 rtex->tex.microtile ? "YES" : " NO",
788 tex->width0, tex->height0, tex->depth0,
789 tex->last_level, util_format_short_name(surf->format));
790 }
791
792 void r300_mark_fb_state_dirty(struct r300_context *r300,
793 enum r300_fb_state_change change)
794 {
795 struct pipe_framebuffer_state *state = r300->fb_state.state;
796
797 r300_mark_atom_dirty(r300, &r300->gpu_flush);
798 r300_mark_atom_dirty(r300, &r300->fb_state);
799
800 /* What is marked as dirty depends on the enum r300_fb_state_change. */
801 if (change == R300_CHANGED_FB_STATE) {
802 r300_mark_atom_dirty(r300, &r300->aa_state);
803 r300_mark_atom_dirty(r300, &r300->dsa_state); /* for AlphaRef */
804 r300_set_blend_color(&r300->context, r300->blend_color_state.state);
805 }
806
807 if (change == R300_CHANGED_FB_STATE ||
808 change == R300_CHANGED_HYPERZ_FLAG) {
809 r300_mark_atom_dirty(r300, &r300->hyperz_state);
810 }
811
812 if (change == R300_CHANGED_FB_STATE ||
813 change == R300_CHANGED_MULTIWRITE) {
814 r300_mark_atom_dirty(r300, &r300->fb_state_pipelined);
815 }
816
817 /* Now compute the fb_state atom size. */
818 r300->fb_state.size = 2 + (8 * state->nr_cbufs);
819
820 if (r300->cbzb_clear)
821 r300->fb_state.size += 10;
822 else if (state->zsbuf) {
823 r300->fb_state.size += 10;
824 if (r300->hyperz_enabled)
825 r300->fb_state.size += 8;
826 }
827
828 /* The size of the rest of atoms stays the same. */
829 }
830
831 static unsigned r300_get_num_samples(struct r300_context *r300)
832 {
833 struct pipe_framebuffer_state* fb =
834 (struct pipe_framebuffer_state*)r300->fb_state.state;
835 unsigned num_samples;
836
837 if (fb->nr_cbufs)
838 num_samples = fb->cbufs[0]->texture->nr_samples;
839 else if (fb->zsbuf)
840 num_samples = fb->zsbuf->texture->nr_samples;
841 else
842 num_samples = 1;
843
844 if (!num_samples)
845 num_samples = 1;
846
847 return num_samples;
848 }
849
850 static void
851 r300_set_framebuffer_state(struct pipe_context* pipe,
852 const struct pipe_framebuffer_state* state)
853 {
854 struct r300_context* r300 = r300_context(pipe);
855 struct r300_aa_state *aa = (struct r300_aa_state*)r300->aa_state.state;
856 struct pipe_framebuffer_state *old_state = r300->fb_state.state;
857 unsigned max_width, max_height, i;
858 uint32_t zbuffer_bpp = 0;
859 boolean unlock_zbuffer = FALSE;
860
861 if (r300->screen->caps.is_r500) {
862 max_width = max_height = 4096;
863 } else if (r300->screen->caps.is_r400) {
864 max_width = max_height = 4021;
865 } else {
866 max_width = max_height = 2560;
867 }
868
869 if (state->width > max_width || state->height > max_height) {
870 fprintf(stderr, "r300: Implementation error: Render targets are too "
871 "big in %s, refusing to bind framebuffer state!\n", __FUNCTION__);
872 return;
873 }
874
875 if (old_state->zsbuf && r300->zmask_in_use && !r300->locked_zbuffer) {
876 /* There is a zmask in use, what are we gonna do? */
877 if (state->zsbuf) {
878 if (!pipe_surface_equal(old_state->zsbuf, state->zsbuf)) {
879 /* Decompress the currently bound zbuffer before we bind another one. */
880 r300_decompress_zmask(r300);
881 r300->hiz_in_use = FALSE;
882 }
883 } else {
884 /* We don't bind another zbuffer, so lock the current one. */
885 pipe_surface_reference(&r300->locked_zbuffer, old_state->zsbuf);
886 }
887 } else if (r300->locked_zbuffer) {
888 /* We have a locked zbuffer now, what are we gonna do? */
889 if (state->zsbuf) {
890 if (!pipe_surface_equal(r300->locked_zbuffer, state->zsbuf)) {
891 /* We are binding some other zbuffer, so decompress the locked one,
892 * it gets unlocked automatically. */
893 r300_decompress_zmask_locked_unsafe(r300);
894 r300->hiz_in_use = FALSE;
895 } else {
896 /* We are binding the locked zbuffer again, so unlock it. */
897 unlock_zbuffer = TRUE;
898 }
899 }
900 }
901 assert(state->zsbuf || (r300->locked_zbuffer && !unlock_zbuffer) || !r300->zmask_in_use);
902
903 /* Need to reset clamping or colormask. */
904 r300_mark_atom_dirty(r300, &r300->blend_state);
905
906 /* If zsbuf is set from NULL to non-NULL or vice versa.. */
907 if (!!old_state->zsbuf != !!state->zsbuf) {
908 r300_mark_atom_dirty(r300, &r300->dsa_state);
909 }
910
911 if (r300->screen->info.drm_minor < 12) {
912 /* The tiling flags are dependent on the surface miplevel, unfortunately.
913 * This workarounds a bad design decision in old kernels which were
914 * rewriting tile fields in registers. */
915 r300_fb_set_tiling_flags(r300, state);
916 }
917
918 util_copy_framebuffer_state(r300->fb_state.state, state);
919
920 if (unlock_zbuffer) {
921 pipe_surface_reference(&r300->locked_zbuffer, NULL);
922 }
923
924 r300_mark_fb_state_dirty(r300, R300_CHANGED_FB_STATE);
925
926 if (state->zsbuf) {
927 switch (util_format_get_blocksize(state->zsbuf->format)) {
928 case 2:
929 zbuffer_bpp = 16;
930 break;
931 case 4:
932 zbuffer_bpp = 24;
933 break;
934 }
935
936 /* Polygon offset depends on the zbuffer bit depth. */
937 if (r300->zbuffer_bpp != zbuffer_bpp) {
938 r300->zbuffer_bpp = zbuffer_bpp;
939
940 if (r300->polygon_offset_enabled)
941 r300_mark_atom_dirty(r300, &r300->rs_state);
942 }
943 }
944
945 r300->num_samples = r300_get_num_samples(r300);
946
947 /* Set up AA config. */
948 if (r300->num_samples > 1) {
949 switch (r300->num_samples) {
950 case 2:
951 aa->aa_config = R300_GB_AA_CONFIG_AA_ENABLE |
952 R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_2;
953 break;
954 case 4:
955 aa->aa_config = R300_GB_AA_CONFIG_AA_ENABLE |
956 R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_4;
957 break;
958 case 6:
959 aa->aa_config = R300_GB_AA_CONFIG_AA_ENABLE |
960 R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_6;
961 break;
962 }
963 } else {
964 aa->aa_config = 0;
965 }
966
967 if (DBG_ON(r300, DBG_FB)) {
968 fprintf(stderr, "r300: set_framebuffer_state:\n");
969 for (i = 0; i < state->nr_cbufs; i++) {
970 r300_print_fb_surf_info(state->cbufs[i], i, "CB");
971 }
972 if (state->zsbuf) {
973 r300_print_fb_surf_info(state->zsbuf, 0, "ZB");
974 }
975 }
976 }
977
978 /* Create fragment shader state. */
979 static void* r300_create_fs_state(struct pipe_context* pipe,
980 const struct pipe_shader_state* shader)
981 {
982 struct r300_fragment_shader* fs = NULL;
983
984 fs = (struct r300_fragment_shader*)CALLOC_STRUCT(r300_fragment_shader);
985
986 /* Copy state directly into shader. */
987 fs->state = *shader;
988 fs->state.tokens = tgsi_dup_tokens(shader->tokens);
989
990 return (void*)fs;
991 }
992
993 void r300_mark_fs_code_dirty(struct r300_context *r300)
994 {
995 struct r300_fragment_shader* fs = r300_fs(r300);
996
997 r300_mark_atom_dirty(r300, &r300->fs);
998 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state);
999 r300_mark_atom_dirty(r300, &r300->fs_constants);
1000 r300->fs.size = fs->shader->cb_code_size;
1001
1002 if (r300->screen->caps.is_r500) {
1003 r300->fs_rc_constant_state.size = fs->shader->rc_state_count * 7;
1004 r300->fs_constants.size = fs->shader->externals_count * 4 + 3;
1005 } else {
1006 r300->fs_rc_constant_state.size = fs->shader->rc_state_count * 5;
1007 r300->fs_constants.size = fs->shader->externals_count * 4 + 1;
1008 }
1009
1010 ((struct r300_constant_buffer*)r300->fs_constants.state)->remap_table =
1011 fs->shader->code.constants_remap_table;
1012 }
1013
1014 /* Bind fragment shader state. */
1015 static void r300_bind_fs_state(struct pipe_context* pipe, void* shader)
1016 {
1017 struct r300_context* r300 = r300_context(pipe);
1018 struct r300_fragment_shader* fs = (struct r300_fragment_shader*)shader;
1019
1020 if (fs == NULL) {
1021 r300->fs.state = NULL;
1022 return;
1023 }
1024
1025 r300->fs.state = fs;
1026 r300->fs_status = FRAGMENT_SHADER_DIRTY;
1027
1028 r300_mark_atom_dirty(r300, &r300->rs_block_state); /* Will be updated before the emission. */
1029 }
1030
1031 /* Delete fragment shader state. */
1032 static void r300_delete_fs_state(struct pipe_context* pipe, void* shader)
1033 {
1034 struct r300_fragment_shader* fs = (struct r300_fragment_shader*)shader;
1035 struct r300_fragment_shader_code *tmp, *ptr = fs->first;
1036
1037 while (ptr) {
1038 tmp = ptr;
1039 ptr = ptr->next;
1040 rc_constants_destroy(&tmp->code.constants);
1041 FREE(tmp->cb_code);
1042 FREE(tmp);
1043 }
1044 FREE((void*)fs->state.tokens);
1045 FREE(shader);
1046 }
1047
1048 static void r300_set_polygon_stipple(struct pipe_context* pipe,
1049 const struct pipe_poly_stipple* state)
1050 {
1051 /* XXX no idea how to set this up, but not terribly important */
1052 }
1053
1054 /* Create a new rasterizer state based on the CSO rasterizer state.
1055 *
1056 * This is a very large chunk of state, and covers most of the graphics
1057 * backend (GB), geometry assembly (GA), and setup unit (SU) blocks.
1058 *
1059 * In a not entirely unironic sidenote, this state has nearly nothing to do
1060 * with the actual block on the Radeon called the rasterizer (RS). */
1061 static void* r300_create_rs_state(struct pipe_context* pipe,
1062 const struct pipe_rasterizer_state* state)
1063 {
1064 struct r300_rs_state* rs = CALLOC_STRUCT(r300_rs_state);
1065 uint32_t vap_control_status; /* R300_VAP_CNTL_STATUS: 0x2140 */
1066 uint32_t vap_clip_cntl; /* R300_VAP_CLIP_CNTL: 0x221C */
1067 uint32_t point_size; /* R300_GA_POINT_SIZE: 0x421c */
1068 uint32_t point_minmax; /* R300_GA_POINT_MINMAX: 0x4230 */
1069 uint32_t line_control; /* R300_GA_LINE_CNTL: 0x4234 */
1070 uint32_t polygon_offset_enable; /* R300_SU_POLY_OFFSET_ENABLE: 0x42b4 */
1071 uint32_t cull_mode; /* R300_SU_CULL_MODE: 0x42b8 */
1072 uint32_t line_stipple_config; /* R300_GA_LINE_STIPPLE_CONFIG: 0x4328 */
1073 uint32_t line_stipple_value; /* R300_GA_LINE_STIPPLE_VALUE: 0x4260 */
1074 uint32_t polygon_mode; /* R300_GA_POLY_MODE: 0x4288 */
1075 uint32_t clip_rule; /* R300_SC_CLIP_RULE: 0x43D0 */
1076 uint32_t round_mode; /* R300_GA_ROUND_MODE: 0x428c */
1077
1078 /* Point sprites texture coordinates, 0: lower left, 1: upper right */
1079 float point_texcoord_left = 0; /* R300_GA_POINT_S0: 0x4200 */
1080 float point_texcoord_bottom = 0;/* R300_GA_POINT_T0: 0x4204 */
1081 float point_texcoord_right = 1; /* R300_GA_POINT_S1: 0x4208 */
1082 float point_texcoord_top = 0; /* R300_GA_POINT_T1: 0x420c */
1083 boolean vclamp = state->clamp_vertex_color ||
1084 !r300_context(pipe)->screen->caps.is_r500;
1085 CB_LOCALS;
1086
1087 /* Copy rasterizer state. */
1088 rs->rs = *state;
1089 rs->rs_draw = *state;
1090
1091 rs->rs.sprite_coord_enable = state->point_quad_rasterization *
1092 state->sprite_coord_enable;
1093
1094 /* Override some states for Draw. */
1095 rs->rs_draw.sprite_coord_enable = 0; /* We can do this in HW. */
1096 rs->rs_draw.offset_point = 0;
1097 rs->rs_draw.offset_line = 0;
1098 rs->rs_draw.offset_tri = 0;
1099 rs->rs_draw.offset_clamp = 0;
1100
1101 #ifdef PIPE_ARCH_LITTLE_ENDIAN
1102 vap_control_status = R300_VC_NO_SWAP;
1103 #else
1104 vap_control_status = R300_VC_32BIT_SWAP;
1105 #endif
1106
1107 /* If no TCL engine is present, turn off the HW TCL. */
1108 if (!r300_screen(pipe->screen)->caps.has_tcl) {
1109 vap_control_status |= R300_VAP_TCL_BYPASS;
1110 }
1111
1112 /* Point size width and height. */
1113 point_size =
1114 pack_float_16_6x(state->point_size) |
1115 (pack_float_16_6x(state->point_size) << R300_POINTSIZE_X_SHIFT);
1116
1117 /* Point size clamping. */
1118 if (state->point_size_per_vertex) {
1119 /* Per-vertex point size.
1120 * Clamp to [0, max FB size] */
1121 float min_psiz = util_get_min_point_size(state);
1122 float max_psiz = pipe->screen->get_paramf(pipe->screen,
1123 PIPE_CAPF_MAX_POINT_WIDTH);
1124 point_minmax =
1125 (pack_float_16_6x(min_psiz) << R300_GA_POINT_MINMAX_MIN_SHIFT) |
1126 (pack_float_16_6x(max_psiz) << R300_GA_POINT_MINMAX_MAX_SHIFT);
1127 } else {
1128 /* We cannot disable the point-size vertex output,
1129 * so clamp it. */
1130 float psiz = state->point_size;
1131 point_minmax =
1132 (pack_float_16_6x(psiz) << R300_GA_POINT_MINMAX_MIN_SHIFT) |
1133 (pack_float_16_6x(psiz) << R300_GA_POINT_MINMAX_MAX_SHIFT);
1134 }
1135
1136 /* Line control. */
1137 line_control = pack_float_16_6x(state->line_width) |
1138 R300_GA_LINE_CNTL_END_TYPE_COMP;
1139
1140 /* Enable polygon mode */
1141 polygon_mode = 0;
1142 if (state->fill_front != PIPE_POLYGON_MODE_FILL ||
1143 state->fill_back != PIPE_POLYGON_MODE_FILL) {
1144 polygon_mode = R300_GA_POLY_MODE_DUAL;
1145 }
1146
1147 /* Front face */
1148 if (state->front_ccw)
1149 cull_mode = R300_FRONT_FACE_CCW;
1150 else
1151 cull_mode = R300_FRONT_FACE_CW;
1152
1153 /* Polygon offset */
1154 polygon_offset_enable = 0;
1155 if (util_get_offset(state, state->fill_front)) {
1156 polygon_offset_enable |= R300_FRONT_ENABLE;
1157 }
1158 if (util_get_offset(state, state->fill_back)) {
1159 polygon_offset_enable |= R300_BACK_ENABLE;
1160 }
1161
1162 rs->polygon_offset_enable = polygon_offset_enable != 0;
1163
1164 /* Polygon mode */
1165 if (polygon_mode) {
1166 polygon_mode |=
1167 r300_translate_polygon_mode_front(state->fill_front);
1168 polygon_mode |=
1169 r300_translate_polygon_mode_back(state->fill_back);
1170 }
1171
1172 if (state->cull_face & PIPE_FACE_FRONT) {
1173 cull_mode |= R300_CULL_FRONT;
1174 }
1175 if (state->cull_face & PIPE_FACE_BACK) {
1176 cull_mode |= R300_CULL_BACK;
1177 }
1178
1179 if (state->line_stipple_enable) {
1180 line_stipple_config =
1181 R300_GA_LINE_STIPPLE_CONFIG_LINE_RESET_LINE |
1182 (fui((float)state->line_stipple_factor) &
1183 R300_GA_LINE_STIPPLE_CONFIG_STIPPLE_SCALE_MASK);
1184 /* XXX this might need to be scaled up */
1185 line_stipple_value = state->line_stipple_pattern;
1186 } else {
1187 line_stipple_config = 0;
1188 line_stipple_value = 0;
1189 }
1190
1191 if (state->flatshade) {
1192 rs->color_control = R300_SHADE_MODEL_FLAT;
1193 } else {
1194 rs->color_control = R300_SHADE_MODEL_SMOOTH;
1195 }
1196
1197 clip_rule = state->scissor ? 0xAAAA : 0xFFFF;
1198
1199 /* Point sprites coord mode */
1200 if (rs->rs.sprite_coord_enable) {
1201 switch (state->sprite_coord_mode) {
1202 case PIPE_SPRITE_COORD_UPPER_LEFT:
1203 point_texcoord_top = 0.0f;
1204 point_texcoord_bottom = 1.0f;
1205 break;
1206 case PIPE_SPRITE_COORD_LOWER_LEFT:
1207 point_texcoord_top = 1.0f;
1208 point_texcoord_bottom = 0.0f;
1209 break;
1210 }
1211 }
1212
1213 if (r300_screen(pipe->screen)->caps.has_tcl) {
1214 vap_clip_cntl = (state->clip_plane_enable & 63) |
1215 R300_PS_UCP_MODE_CLIP_AS_TRIFAN |
1216 (state->depth_clip ? 0 : R300_CLIP_DISABLE);
1217 } else {
1218 vap_clip_cntl = R300_CLIP_DISABLE;
1219 }
1220
1221 /* Vertex color clamping. FP20 means no clamping. */
1222 round_mode =
1223 R300_GA_ROUND_MODE_GEOMETRY_ROUND_NEAREST |
1224 (!vclamp ? (R300_GA_ROUND_MODE_RGB_CLAMP_FP20 |
1225 R300_GA_ROUND_MODE_ALPHA_CLAMP_FP20) : 0);
1226
1227 /* Build the main command buffer. */
1228 BEGIN_CB(rs->cb_main, RS_STATE_MAIN_SIZE);
1229 OUT_CB_REG(R300_VAP_CNTL_STATUS, vap_control_status);
1230 OUT_CB_REG(R300_VAP_CLIP_CNTL, vap_clip_cntl);
1231 OUT_CB_REG(R300_GA_POINT_SIZE, point_size);
1232 OUT_CB_REG_SEQ(R300_GA_POINT_MINMAX, 2);
1233 OUT_CB(point_minmax);
1234 OUT_CB(line_control);
1235 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_ENABLE, 2);
1236 OUT_CB(polygon_offset_enable);
1237 rs->cull_mode_index = 11;
1238 OUT_CB(cull_mode);
1239 OUT_CB_REG(R300_GA_LINE_STIPPLE_CONFIG, line_stipple_config);
1240 OUT_CB_REG(R300_GA_LINE_STIPPLE_VALUE, line_stipple_value);
1241 OUT_CB_REG(R300_GA_POLY_MODE, polygon_mode);
1242 OUT_CB_REG(R300_GA_ROUND_MODE, round_mode);
1243 OUT_CB_REG(R300_SC_CLIP_RULE, clip_rule);
1244 OUT_CB_REG_SEQ(R300_GA_POINT_S0, 4);
1245 OUT_CB_32F(point_texcoord_left);
1246 OUT_CB_32F(point_texcoord_bottom);
1247 OUT_CB_32F(point_texcoord_right);
1248 OUT_CB_32F(point_texcoord_top);
1249 END_CB;
1250
1251 /* Build the two command buffers for polygon offset setup. */
1252 if (polygon_offset_enable) {
1253 float scale = state->offset_scale * 12;
1254 float offset = state->offset_units * 4;
1255
1256 BEGIN_CB(rs->cb_poly_offset_zb16, 5);
1257 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE, 4);
1258 OUT_CB_32F(scale);
1259 OUT_CB_32F(offset);
1260 OUT_CB_32F(scale);
1261 OUT_CB_32F(offset);
1262 END_CB;
1263
1264 offset = state->offset_units * 2;
1265
1266 BEGIN_CB(rs->cb_poly_offset_zb24, 5);
1267 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE, 4);
1268 OUT_CB_32F(scale);
1269 OUT_CB_32F(offset);
1270 OUT_CB_32F(scale);
1271 OUT_CB_32F(offset);
1272 END_CB;
1273 }
1274
1275 return (void*)rs;
1276 }
1277
1278 /* Bind rasterizer state. */
1279 static void r300_bind_rs_state(struct pipe_context* pipe, void* state)
1280 {
1281 struct r300_context* r300 = r300_context(pipe);
1282 struct r300_rs_state* rs = (struct r300_rs_state*)state;
1283 int last_sprite_coord_enable = r300->sprite_coord_enable;
1284 boolean last_two_sided_color = r300->two_sided_color;
1285 boolean last_msaa_enable = r300->msaa_enable;
1286
1287 if (r300->draw && rs) {
1288 draw_set_rasterizer_state(r300->draw, &rs->rs_draw, state);
1289 }
1290
1291 if (rs) {
1292 r300->polygon_offset_enabled = rs->polygon_offset_enable;
1293 r300->sprite_coord_enable = rs->rs.sprite_coord_enable;
1294 r300->two_sided_color = rs->rs.light_twoside;
1295 r300->msaa_enable = rs->rs.multisample;
1296 } else {
1297 r300->polygon_offset_enabled = FALSE;
1298 r300->sprite_coord_enable = 0;
1299 r300->two_sided_color = FALSE;
1300 r300->msaa_enable = FALSE;
1301 }
1302
1303 UPDATE_STATE(state, r300->rs_state);
1304 r300->rs_state.size = RS_STATE_MAIN_SIZE + (r300->polygon_offset_enabled ? 5 : 0);
1305
1306 if (last_sprite_coord_enable != r300->sprite_coord_enable ||
1307 last_two_sided_color != r300->two_sided_color) {
1308 r300_mark_atom_dirty(r300, &r300->rs_block_state);
1309 }
1310
1311 if (last_msaa_enable != r300->msaa_enable) {
1312 r300_mark_atom_dirty(r300, &r300->fb_state_pipelined);
1313
1314 if (r300->alpha_to_coverage) {
1315 r300_mark_atom_dirty(r300, &r300->dsa_state);
1316 }
1317
1318 if (r300->alpha_to_one &&
1319 r300->fs_status == FRAGMENT_SHADER_VALID) {
1320 r300->fs_status = FRAGMENT_SHADER_MAYBE_DIRTY;
1321 }
1322 }
1323 }
1324
1325 /* Free rasterizer state. */
1326 static void r300_delete_rs_state(struct pipe_context* pipe, void* state)
1327 {
1328 FREE(state);
1329 }
1330
1331 static void*
1332 r300_create_sampler_state(struct pipe_context* pipe,
1333 const struct pipe_sampler_state* state)
1334 {
1335 struct r300_context* r300 = r300_context(pipe);
1336 struct r300_sampler_state* sampler = CALLOC_STRUCT(r300_sampler_state);
1337 boolean is_r500 = r300->screen->caps.is_r500;
1338 int lod_bias;
1339
1340 sampler->state = *state;
1341
1342 /* r300 doesn't handle CLAMP and MIRROR_CLAMP correctly when either MAG
1343 * or MIN filter is NEAREST. Since texwrap produces same results
1344 * for CLAMP and CLAMP_TO_EDGE, we use them instead. */
1345 if (sampler->state.min_img_filter == PIPE_TEX_FILTER_NEAREST ||
1346 sampler->state.mag_img_filter == PIPE_TEX_FILTER_NEAREST) {
1347 /* Wrap S. */
1348 if (sampler->state.wrap_s == PIPE_TEX_WRAP_CLAMP)
1349 sampler->state.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1350 else if (sampler->state.wrap_s == PIPE_TEX_WRAP_MIRROR_CLAMP)
1351 sampler->state.wrap_s = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
1352
1353 /* Wrap T. */
1354 if (sampler->state.wrap_t == PIPE_TEX_WRAP_CLAMP)
1355 sampler->state.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1356 else if (sampler->state.wrap_t == PIPE_TEX_WRAP_MIRROR_CLAMP)
1357 sampler->state.wrap_t = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
1358
1359 /* Wrap R. */
1360 if (sampler->state.wrap_r == PIPE_TEX_WRAP_CLAMP)
1361 sampler->state.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
1362 else if (sampler->state.wrap_r == PIPE_TEX_WRAP_MIRROR_CLAMP)
1363 sampler->state.wrap_r = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
1364 }
1365
1366 sampler->filter0 |=
1367 (r300_translate_wrap(sampler->state.wrap_s) << R300_TX_WRAP_S_SHIFT) |
1368 (r300_translate_wrap(sampler->state.wrap_t) << R300_TX_WRAP_T_SHIFT) |
1369 (r300_translate_wrap(sampler->state.wrap_r) << R300_TX_WRAP_R_SHIFT);
1370
1371 sampler->filter0 |= r300_translate_tex_filters(state->min_img_filter,
1372 state->mag_img_filter,
1373 state->min_mip_filter,
1374 state->max_anisotropy > 1);
1375
1376 sampler->filter0 |= r300_anisotropy(state->max_anisotropy);
1377
1378 /* Unfortunately, r300-r500 don't support floating-point mipmap lods. */
1379 /* We must pass these to the merge function to clamp them properly. */
1380 sampler->min_lod = (unsigned)MAX2(state->min_lod, 0);
1381 sampler->max_lod = (unsigned)MAX2(ceilf(state->max_lod), 0);
1382
1383 lod_bias = CLAMP((int)(state->lod_bias * 32 + 1), -(1 << 9), (1 << 9) - 1);
1384
1385 sampler->filter1 |= (lod_bias << R300_LOD_BIAS_SHIFT) & R300_LOD_BIAS_MASK;
1386
1387 /* This is very high quality anisotropic filtering for R5xx.
1388 * It's good for benchmarking the performance of texturing but
1389 * in practice we don't want to slow down the driver because it's
1390 * a pretty good performance killer. Feel free to play with it. */
1391 if (DBG_ON(r300, DBG_ANISOHQ) && is_r500) {
1392 sampler->filter1 |= r500_anisotropy(state->max_anisotropy);
1393 }
1394
1395 /* R500-specific fixups and optimizations */
1396 if (r300->screen->caps.is_r500) {
1397 sampler->filter1 |= R500_BORDER_FIX;
1398 }
1399
1400 return (void*)sampler;
1401 }
1402
1403 static void r300_bind_sampler_states(struct pipe_context* pipe,
1404 unsigned count,
1405 void** states)
1406 {
1407 struct r300_context* r300 = r300_context(pipe);
1408 struct r300_textures_state* state =
1409 (struct r300_textures_state*)r300->textures_state.state;
1410 unsigned tex_units = r300->screen->caps.num_tex_units;
1411
1412 if (count > tex_units) {
1413 return;
1414 }
1415
1416 memcpy(state->sampler_states, states, sizeof(void*) * count);
1417 state->sampler_state_count = count;
1418
1419 r300_mark_atom_dirty(r300, &r300->textures_state);
1420 }
1421
1422 static void r300_lacks_vertex_textures(struct pipe_context* pipe,
1423 unsigned count,
1424 void** states)
1425 {
1426 }
1427
1428 static void r300_delete_sampler_state(struct pipe_context* pipe, void* state)
1429 {
1430 FREE(state);
1431 }
1432
1433 static uint32_t r300_assign_texture_cache_region(unsigned index, unsigned num)
1434 {
1435 /* This looks like a hack, but I believe it's suppose to work like
1436 * that. To illustrate how this works, let's assume you have 5 textures.
1437 * From docs, 5 and the successive numbers are:
1438 *
1439 * FOURTH_1 = 5
1440 * FOURTH_2 = 6
1441 * FOURTH_3 = 7
1442 * EIGHTH_0 = 8
1443 * EIGHTH_1 = 9
1444 *
1445 * First 3 textures will get 3/4 of size of the cache, divived evenly
1446 * between them. The last 1/4 of the cache must be divided between
1447 * the last 2 textures, each will therefore get 1/8 of the cache.
1448 * Why not just to use "5 + texture_index" ?
1449 *
1450 * This simple trick works for all "num" <= 16.
1451 */
1452 if (num <= 1)
1453 return R300_TX_CACHE(R300_TX_CACHE_WHOLE);
1454 else
1455 return R300_TX_CACHE(num + index);
1456 }
1457
1458 static void r300_set_fragment_sampler_views(struct pipe_context* pipe,
1459 unsigned count,
1460 struct pipe_sampler_view** views)
1461 {
1462 struct r300_context* r300 = r300_context(pipe);
1463 struct r300_textures_state* state =
1464 (struct r300_textures_state*)r300->textures_state.state;
1465 struct r300_resource *texture;
1466 unsigned i, real_num_views = 0, view_index = 0;
1467 unsigned tex_units = r300->screen->caps.num_tex_units;
1468 boolean dirty_tex = FALSE;
1469
1470 if (count > tex_units) {
1471 return;
1472 }
1473
1474 /* Calculate the real number of views. */
1475 for (i = 0; i < count; i++) {
1476 if (views[i])
1477 real_num_views++;
1478 }
1479
1480 for (i = 0; i < count; i++) {
1481 pipe_sampler_view_reference(
1482 (struct pipe_sampler_view**)&state->sampler_views[i],
1483 views[i]);
1484
1485 if (!views[i]) {
1486 continue;
1487 }
1488
1489 /* A new sampler view (= texture)... */
1490 dirty_tex = TRUE;
1491
1492 /* Set the texrect factor in the fragment shader.
1493 * Needed for RECT and NPOT fallback. */
1494 texture = r300_resource(views[i]->texture);
1495 if (texture->tex.is_npot) {
1496 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state);
1497 }
1498
1499 state->sampler_views[i]->texcache_region =
1500 r300_assign_texture_cache_region(view_index, real_num_views);
1501 view_index++;
1502 }
1503
1504 for (i = count; i < tex_units; i++) {
1505 if (state->sampler_views[i]) {
1506 pipe_sampler_view_reference(
1507 (struct pipe_sampler_view**)&state->sampler_views[i],
1508 NULL);
1509 }
1510 }
1511
1512 state->sampler_view_count = count;
1513
1514 r300_mark_atom_dirty(r300, &r300->textures_state);
1515
1516 if (dirty_tex) {
1517 r300_mark_atom_dirty(r300, &r300->texture_cache_inval);
1518 }
1519 }
1520
1521 struct pipe_sampler_view *
1522 r300_create_sampler_view_custom(struct pipe_context *pipe,
1523 struct pipe_resource *texture,
1524 const struct pipe_sampler_view *templ,
1525 unsigned width0_override,
1526 unsigned height0_override)
1527 {
1528 struct r300_sampler_view *view = CALLOC_STRUCT(r300_sampler_view);
1529 struct r300_resource *tex = r300_resource(texture);
1530 boolean is_r500 = r300_screen(pipe->screen)->caps.is_r500;
1531 boolean dxtc_swizzle = r300_screen(pipe->screen)->caps.dxtc_swizzle;
1532
1533 if (view) {
1534 unsigned hwformat;
1535
1536 view->base = *templ;
1537 view->base.reference.count = 1;
1538 view->base.context = pipe;
1539 view->base.texture = NULL;
1540 pipe_resource_reference(&view->base.texture, texture);
1541
1542 view->width0_override = width0_override;
1543 view->height0_override = height0_override;
1544 view->swizzle[0] = templ->swizzle_r;
1545 view->swizzle[1] = templ->swizzle_g;
1546 view->swizzle[2] = templ->swizzle_b;
1547 view->swizzle[3] = templ->swizzle_a;
1548
1549 hwformat = r300_translate_texformat(templ->format,
1550 view->swizzle,
1551 is_r500,
1552 dxtc_swizzle);
1553
1554 if (hwformat == ~0) {
1555 fprintf(stderr, "r300: Ooops. Got unsupported format %s in %s.\n",
1556 util_format_short_name(templ->format), __func__);
1557 }
1558 assert(hwformat != ~0);
1559
1560 r300_texture_setup_format_state(r300_screen(pipe->screen), tex,
1561 templ->format, 0,
1562 width0_override, height0_override,
1563 &view->format);
1564 view->format.format1 |= hwformat;
1565 if (is_r500) {
1566 view->format.format2 |= r500_tx_format_msb_bit(templ->format);
1567 }
1568 }
1569
1570 return (struct pipe_sampler_view*)view;
1571 }
1572
1573 static struct pipe_sampler_view *
1574 r300_create_sampler_view(struct pipe_context *pipe,
1575 struct pipe_resource *texture,
1576 const struct pipe_sampler_view *templ)
1577 {
1578 return r300_create_sampler_view_custom(pipe, texture, templ,
1579 r300_resource(texture)->tex.width0,
1580 r300_resource(texture)->tex.height0);
1581 }
1582
1583
1584 static void
1585 r300_sampler_view_destroy(struct pipe_context *pipe,
1586 struct pipe_sampler_view *view)
1587 {
1588 pipe_resource_reference(&view->texture, NULL);
1589 FREE(view);
1590 }
1591
1592 static void r300_set_sample_mask(struct pipe_context *pipe,
1593 unsigned mask)
1594 {
1595 struct r300_context* r300 = r300_context(pipe);
1596
1597 *((unsigned*)r300->sample_mask.state) = mask;
1598
1599 r300_mark_atom_dirty(r300, &r300->sample_mask);
1600 }
1601
1602 static void r300_set_scissor_state(struct pipe_context* pipe,
1603 const struct pipe_scissor_state* state)
1604 {
1605 struct r300_context* r300 = r300_context(pipe);
1606
1607 memcpy(r300->scissor_state.state, state,
1608 sizeof(struct pipe_scissor_state));
1609
1610 r300_mark_atom_dirty(r300, &r300->scissor_state);
1611 }
1612
1613 static void r300_set_viewport_state(struct pipe_context* pipe,
1614 const struct pipe_viewport_state* state)
1615 {
1616 struct r300_context* r300 = r300_context(pipe);
1617 struct r300_viewport_state* viewport =
1618 (struct r300_viewport_state*)r300->viewport_state.state;
1619
1620 r300->viewport = *state;
1621
1622 if (r300->draw) {
1623 draw_set_viewport_state(r300->draw, state);
1624 viewport->vte_control = R300_VTX_XY_FMT | R300_VTX_Z_FMT;
1625 return;
1626 }
1627
1628 /* Do the transform in HW. */
1629 viewport->vte_control = R300_VTX_W0_FMT;
1630
1631 if (state->scale[0] != 1.0f) {
1632 viewport->xscale = state->scale[0];
1633 viewport->vte_control |= R300_VPORT_X_SCALE_ENA;
1634 }
1635 if (state->scale[1] != 1.0f) {
1636 viewport->yscale = state->scale[1];
1637 viewport->vte_control |= R300_VPORT_Y_SCALE_ENA;
1638 }
1639 if (state->scale[2] != 1.0f) {
1640 viewport->zscale = state->scale[2];
1641 viewport->vte_control |= R300_VPORT_Z_SCALE_ENA;
1642 }
1643 if (state->translate[0] != 0.0f) {
1644 viewport->xoffset = state->translate[0];
1645 viewport->vte_control |= R300_VPORT_X_OFFSET_ENA;
1646 }
1647 if (state->translate[1] != 0.0f) {
1648 viewport->yoffset = state->translate[1];
1649 viewport->vte_control |= R300_VPORT_Y_OFFSET_ENA;
1650 }
1651 if (state->translate[2] != 0.0f) {
1652 viewport->zoffset = state->translate[2];
1653 viewport->vte_control |= R300_VPORT_Z_OFFSET_ENA;
1654 }
1655
1656 r300_mark_atom_dirty(r300, &r300->viewport_state);
1657 if (r300->fs.state && r300_fs(r300)->shader &&
1658 r300_fs(r300)->shader->inputs.wpos != ATTR_UNUSED) {
1659 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state);
1660 }
1661 }
1662
1663 static void r300_set_vertex_buffers_hwtcl(struct pipe_context* pipe,
1664 unsigned start_slot, unsigned count,
1665 const struct pipe_vertex_buffer* buffers)
1666 {
1667 struct r300_context* r300 = r300_context(pipe);
1668
1669 util_set_vertex_buffers_count(r300->vertex_buffer,
1670 &r300->nr_vertex_buffers,
1671 buffers, start_slot, count);
1672
1673 /* There must be at least one vertex buffer set, otherwise it locks up. */
1674 if (!r300->nr_vertex_buffers) {
1675 util_set_vertex_buffers_count(r300->vertex_buffer,
1676 &r300->nr_vertex_buffers,
1677 &r300->dummy_vb, 0, 1);
1678 }
1679
1680 r300->vertex_arrays_dirty = TRUE;
1681 }
1682
1683 static void r300_set_vertex_buffers_swtcl(struct pipe_context* pipe,
1684 unsigned start_slot, unsigned count,
1685 const struct pipe_vertex_buffer* buffers)
1686 {
1687 struct r300_context* r300 = r300_context(pipe);
1688 unsigned i;
1689
1690 util_set_vertex_buffers_count(r300->vertex_buffer,
1691 &r300->nr_vertex_buffers,
1692 buffers, start_slot, count);
1693 draw_set_vertex_buffers(r300->draw, start_slot, count, buffers);
1694
1695 if (!buffers)
1696 return;
1697
1698 for (i = 0; i < count; i++) {
1699 if (buffers[i].user_buffer) {
1700 draw_set_mapped_vertex_buffer(r300->draw, start_slot + i,
1701 buffers[i].user_buffer);
1702 } else if (buffers[i].buffer) {
1703 draw_set_mapped_vertex_buffer(r300->draw, start_slot + i,
1704 r300_resource(buffers[i].buffer)->malloced_buffer);
1705 }
1706 }
1707 }
1708
1709 static void r300_set_index_buffer_hwtcl(struct pipe_context* pipe,
1710 const struct pipe_index_buffer *ib)
1711 {
1712 struct r300_context* r300 = r300_context(pipe);
1713
1714 if (ib) {
1715 pipe_resource_reference(&r300->index_buffer.buffer, ib->buffer);
1716 memcpy(&r300->index_buffer, ib, sizeof(*ib));
1717 } else {
1718 pipe_resource_reference(&r300->index_buffer.buffer, NULL);
1719 }
1720 }
1721
1722 static void r300_set_index_buffer_swtcl(struct pipe_context* pipe,
1723 const struct pipe_index_buffer *ib)
1724 {
1725 struct r300_context* r300 = r300_context(pipe);
1726
1727 if (ib) {
1728 const void *buf = NULL;
1729 if (ib->user_buffer) {
1730 buf = ib->user_buffer;
1731 } else if (ib->buffer) {
1732 buf = r300_resource(ib->buffer)->malloced_buffer;
1733 }
1734 draw_set_indexes(r300->draw,
1735 (const ubyte *) buf + ib->offset,
1736 ib->index_size);
1737 }
1738 }
1739
1740 /* Initialize the PSC tables. */
1741 static void r300_vertex_psc(struct r300_vertex_element_state *velems)
1742 {
1743 struct r300_vertex_stream_state *vstream = &velems->vertex_stream;
1744 uint16_t type, swizzle;
1745 enum pipe_format format;
1746 unsigned i;
1747
1748 /* Vertex shaders have no semantics on their inputs,
1749 * so PSC should just route stuff based on the vertex elements,
1750 * and not on attrib information. */
1751 for (i = 0; i < velems->count; i++) {
1752 format = velems->velem[i].src_format;
1753
1754 type = r300_translate_vertex_data_type(format);
1755 if (type == R300_INVALID_FORMAT) {
1756 fprintf(stderr, "r300: Bad vertex format %s.\n",
1757 util_format_short_name(format));
1758 assert(0);
1759 abort();
1760 }
1761
1762 type |= i << R300_DST_VEC_LOC_SHIFT;
1763 swizzle = r300_translate_vertex_data_swizzle(format);
1764
1765 if (i & 1) {
1766 vstream->vap_prog_stream_cntl[i >> 1] |= type << 16;
1767 vstream->vap_prog_stream_cntl_ext[i >> 1] |= swizzle << 16;
1768 } else {
1769 vstream->vap_prog_stream_cntl[i >> 1] |= type;
1770 vstream->vap_prog_stream_cntl_ext[i >> 1] |= swizzle;
1771 }
1772 }
1773
1774 /* Set the last vector in the PSC. */
1775 if (i) {
1776 i -= 1;
1777 }
1778 vstream->vap_prog_stream_cntl[i >> 1] |=
1779 (R300_LAST_VEC << (i & 1 ? 16 : 0));
1780
1781 vstream->count = (i >> 1) + 1;
1782 }
1783
1784 static void* r300_create_vertex_elements_state(struct pipe_context* pipe,
1785 unsigned count,
1786 const struct pipe_vertex_element* attribs)
1787 {
1788 struct r300_vertex_element_state *velems;
1789 unsigned i;
1790 struct pipe_vertex_element dummy_attrib = {0};
1791
1792 /* R300 Programmable Stream Control (PSC) doesn't support 0 vertex elements. */
1793 if (!count) {
1794 dummy_attrib.src_format = PIPE_FORMAT_R8G8B8A8_UNORM;
1795 attribs = &dummy_attrib;
1796 count = 1;
1797 } else if (count > 16) {
1798 fprintf(stderr, "r300: More than 16 vertex elements are not supported,"
1799 " requested %i, using 16.\n", count);
1800 count = 16;
1801 }
1802
1803 velems = CALLOC_STRUCT(r300_vertex_element_state);
1804 if (!velems)
1805 return NULL;
1806
1807 velems->count = count;
1808 memcpy(velems->velem, attribs, sizeof(struct pipe_vertex_element) * count);
1809
1810 if (r300_screen(pipe->screen)->caps.has_tcl) {
1811 /* Setup PSC.
1812 * The unused components will be replaced by (..., 0, 1). */
1813 r300_vertex_psc(velems);
1814
1815 for (i = 0; i < count; i++) {
1816 velems->format_size[i] =
1817 align(util_format_get_blocksize(velems->velem[i].src_format), 4);
1818 velems->vertex_size_dwords += velems->format_size[i] / 4;
1819 }
1820 }
1821
1822 return velems;
1823 }
1824
1825 static void r300_bind_vertex_elements_state(struct pipe_context *pipe,
1826 void *state)
1827 {
1828 struct r300_context *r300 = r300_context(pipe);
1829 struct r300_vertex_element_state *velems = state;
1830
1831 if (velems == NULL) {
1832 return;
1833 }
1834
1835 r300->velems = velems;
1836
1837 if (r300->draw) {
1838 draw_set_vertex_elements(r300->draw, velems->count, velems->velem);
1839 return;
1840 }
1841
1842 UPDATE_STATE(&velems->vertex_stream, r300->vertex_stream_state);
1843 r300->vertex_stream_state.size = (1 + velems->vertex_stream.count) * 2;
1844 r300->vertex_arrays_dirty = TRUE;
1845 }
1846
1847 static void r300_delete_vertex_elements_state(struct pipe_context *pipe, void *state)
1848 {
1849 FREE(state);
1850 }
1851
1852 static void* r300_create_vs_state(struct pipe_context* pipe,
1853 const struct pipe_shader_state* shader)
1854 {
1855 struct r300_context* r300 = r300_context(pipe);
1856 struct r300_vertex_shader* vs = CALLOC_STRUCT(r300_vertex_shader);
1857
1858 /* Copy state directly into shader. */
1859 vs->state = *shader;
1860 vs->state.tokens = tgsi_dup_tokens(shader->tokens);
1861
1862 if (r300->screen->caps.has_tcl) {
1863 r300_init_vs_outputs(r300, vs);
1864 r300_translate_vertex_shader(r300, vs);
1865 } else {
1866 r300_draw_init_vertex_shader(r300, vs);
1867 }
1868
1869 return vs;
1870 }
1871
1872 static void r300_bind_vs_state(struct pipe_context* pipe, void* shader)
1873 {
1874 struct r300_context* r300 = r300_context(pipe);
1875 struct r300_vertex_shader* vs = (struct r300_vertex_shader*)shader;
1876
1877 if (vs == NULL) {
1878 r300->vs_state.state = NULL;
1879 return;
1880 }
1881 if (vs == r300->vs_state.state) {
1882 return;
1883 }
1884 r300->vs_state.state = vs;
1885
1886 /* The majority of the RS block bits is dependent on the vertex shader. */
1887 r300_mark_atom_dirty(r300, &r300->rs_block_state); /* Will be updated before the emission. */
1888
1889 if (r300->screen->caps.has_tcl) {
1890 unsigned fc_op_dwords = r300->screen->caps.is_r500 ? 3 : 2;
1891 r300_mark_atom_dirty(r300, &r300->vs_state);
1892 r300->vs_state.size = vs->code.length + 9 +
1893 (R300_VS_MAX_FC_OPS * fc_op_dwords + 4);
1894
1895 r300_mark_atom_dirty(r300, &r300->vs_constants);
1896 r300->vs_constants.size =
1897 2 +
1898 (vs->externals_count ? vs->externals_count * 4 + 3 : 0) +
1899 (vs->immediates_count ? vs->immediates_count * 4 + 3 : 0);
1900
1901 ((struct r300_constant_buffer*)r300->vs_constants.state)->remap_table =
1902 vs->code.constants_remap_table;
1903
1904 r300_mark_atom_dirty(r300, &r300->pvs_flush);
1905 } else {
1906 draw_bind_vertex_shader(r300->draw,
1907 (struct draw_vertex_shader*)vs->draw_vs);
1908 }
1909 }
1910
1911 static void r300_delete_vs_state(struct pipe_context* pipe, void* shader)
1912 {
1913 struct r300_context* r300 = r300_context(pipe);
1914 struct r300_vertex_shader* vs = (struct r300_vertex_shader*)shader;
1915
1916 if (r300->screen->caps.has_tcl) {
1917 rc_constants_destroy(&vs->code.constants);
1918 FREE(vs->code.constants_remap_table);
1919 } else {
1920 draw_delete_vertex_shader(r300->draw,
1921 (struct draw_vertex_shader*)vs->draw_vs);
1922 }
1923
1924 FREE((void*)vs->state.tokens);
1925 FREE(shader);
1926 }
1927
1928 static void r300_set_constant_buffer(struct pipe_context *pipe,
1929 uint shader, uint index,
1930 struct pipe_constant_buffer *cb)
1931 {
1932 struct r300_context* r300 = r300_context(pipe);
1933 struct r300_constant_buffer *cbuf;
1934 uint32_t *mapped;
1935
1936 if (!cb)
1937 return;
1938
1939 switch (shader) {
1940 case PIPE_SHADER_VERTEX:
1941 cbuf = (struct r300_constant_buffer*)r300->vs_constants.state;
1942 break;
1943 case PIPE_SHADER_FRAGMENT:
1944 cbuf = (struct r300_constant_buffer*)r300->fs_constants.state;
1945 break;
1946 default:
1947 return;
1948 }
1949
1950
1951 if (cb->user_buffer)
1952 mapped = (uint32_t*)cb->user_buffer;
1953 else {
1954 struct r300_resource *rbuf = r300_resource(cb->buffer);
1955
1956 if (rbuf && rbuf->malloced_buffer)
1957 mapped = (uint32_t*)rbuf->malloced_buffer;
1958 else
1959 return;
1960 }
1961
1962 if (shader == PIPE_SHADER_FRAGMENT ||
1963 (shader == PIPE_SHADER_VERTEX && r300->screen->caps.has_tcl)) {
1964 cbuf->ptr = mapped;
1965 }
1966
1967 if (shader == PIPE_SHADER_VERTEX) {
1968 if (r300->screen->caps.has_tcl) {
1969 struct r300_vertex_shader *vs =
1970 (struct r300_vertex_shader*)r300->vs_state.state;
1971
1972 if (!vs) {
1973 cbuf->buffer_base = 0;
1974 return;
1975 }
1976
1977 cbuf->buffer_base = r300->vs_const_base;
1978 r300->vs_const_base += vs->code.constants.Count;
1979 if (r300->vs_const_base > R500_MAX_PVS_CONST_VECS) {
1980 r300->vs_const_base = vs->code.constants.Count;
1981 cbuf->buffer_base = 0;
1982 r300_mark_atom_dirty(r300, &r300->pvs_flush);
1983 }
1984 r300_mark_atom_dirty(r300, &r300->vs_constants);
1985 } else if (r300->draw) {
1986 draw_set_mapped_constant_buffer(r300->draw, PIPE_SHADER_VERTEX,
1987 0, mapped, cb->buffer_size);
1988 }
1989 } else if (shader == PIPE_SHADER_FRAGMENT) {
1990 r300_mark_atom_dirty(r300, &r300->fs_constants);
1991 }
1992 }
1993
1994 static void r300_texture_barrier(struct pipe_context *pipe)
1995 {
1996 struct r300_context *r300 = r300_context(pipe);
1997
1998 r300_mark_atom_dirty(r300, &r300->gpu_flush);
1999 r300_mark_atom_dirty(r300, &r300->texture_cache_inval);
2000 }
2001
2002 void r300_init_state_functions(struct r300_context* r300)
2003 {
2004 r300->context.create_blend_state = r300_create_blend_state;
2005 r300->context.bind_blend_state = r300_bind_blend_state;
2006 r300->context.delete_blend_state = r300_delete_blend_state;
2007
2008 r300->context.set_blend_color = r300_set_blend_color;
2009
2010 r300->context.set_clip_state = r300_set_clip_state;
2011 r300->context.set_sample_mask = r300_set_sample_mask;
2012
2013 r300->context.set_constant_buffer = r300_set_constant_buffer;
2014
2015 r300->context.create_depth_stencil_alpha_state = r300_create_dsa_state;
2016 r300->context.bind_depth_stencil_alpha_state = r300_bind_dsa_state;
2017 r300->context.delete_depth_stencil_alpha_state = r300_delete_dsa_state;
2018
2019 r300->context.set_stencil_ref = r300_set_stencil_ref;
2020
2021 r300->context.set_framebuffer_state = r300_set_framebuffer_state;
2022
2023 r300->context.create_fs_state = r300_create_fs_state;
2024 r300->context.bind_fs_state = r300_bind_fs_state;
2025 r300->context.delete_fs_state = r300_delete_fs_state;
2026
2027 r300->context.set_polygon_stipple = r300_set_polygon_stipple;
2028
2029 r300->context.create_rasterizer_state = r300_create_rs_state;
2030 r300->context.bind_rasterizer_state = r300_bind_rs_state;
2031 r300->context.delete_rasterizer_state = r300_delete_rs_state;
2032
2033 r300->context.create_sampler_state = r300_create_sampler_state;
2034 r300->context.bind_fragment_sampler_states = r300_bind_sampler_states;
2035 r300->context.bind_vertex_sampler_states = r300_lacks_vertex_textures;
2036 r300->context.delete_sampler_state = r300_delete_sampler_state;
2037
2038 r300->context.set_fragment_sampler_views = r300_set_fragment_sampler_views;
2039 r300->context.create_sampler_view = r300_create_sampler_view;
2040 r300->context.sampler_view_destroy = r300_sampler_view_destroy;
2041
2042 r300->context.set_scissor_state = r300_set_scissor_state;
2043
2044 r300->context.set_viewport_state = r300_set_viewport_state;
2045
2046 if (r300->screen->caps.has_tcl) {
2047 r300->context.set_vertex_buffers = r300_set_vertex_buffers_hwtcl;
2048 r300->context.set_index_buffer = r300_set_index_buffer_hwtcl;
2049 } else {
2050 r300->context.set_vertex_buffers = r300_set_vertex_buffers_swtcl;
2051 r300->context.set_index_buffer = r300_set_index_buffer_swtcl;
2052 }
2053
2054 r300->context.create_vertex_elements_state = r300_create_vertex_elements_state;
2055 r300->context.bind_vertex_elements_state = r300_bind_vertex_elements_state;
2056 r300->context.delete_vertex_elements_state = r300_delete_vertex_elements_state;
2057
2058 r300->context.create_vs_state = r300_create_vs_state;
2059 r300->context.bind_vs_state = r300_bind_vs_state;
2060 r300->context.delete_vs_state = r300_delete_vs_state;
2061
2062 r300->context.texture_barrier = r300_texture_barrier;
2063 }