intel: silence uninitialized var warning
[mesa.git] / src / mesa / drivers / dri / r600 / r700_chip.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/imports.h"
29 #include "main/glheader.h"
30 #include "main/simple_list.h"
31
32 #include "r600_context.h"
33 #include "r600_cmdbuf.h"
34
35 #include "r600_tex.h"
36 #include "r700_oglprog.h"
37 #include "r700_fragprog.h"
38 #include "r700_vertprog.h"
39
40 #include "radeon_mipmap_tree.h"
41
42 static void r700SendTexState(struct gl_context *ctx, struct radeon_state_atom *atom)
43 {
44 context_t *context = R700_CONTEXT(ctx);
45 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
46
47 struct r700_vertex_program *vp = context->selected_vp;
48
49 struct radeon_bo *bo = NULL;
50 unsigned int i;
51 BATCH_LOCALS(&context->radeon);
52
53 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
54
55 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
56 if (ctx->Texture.Unit[i]._ReallyEnabled) {
57 radeonTexObj *t = r700->textures[i];
58 if (t) {
59 if (!t->image_override) {
60 bo = t->mt->bo;
61 } else {
62 bo = t->bo;
63 }
64 if (bo) {
65
66 r700SyncSurf(context, bo,
67 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
68 0, TC_ACTION_ENA_bit);
69
70 BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
71 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
72
73 if( (1<<i) & vp->r700AsmCode.unVetTexBits )
74 { /* vs texture */
75 R600_OUT_BATCH((i + VERT_ATTRIB_MAX + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
76 }
77 else
78 {
79 R600_OUT_BATCH(i * 7);
80 }
81
82 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
83 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
84 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE2);
85 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
86 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
87 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
88 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
89 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
90 bo,
91 r700->textures[i]->SQ_TEX_RESOURCE2,
92 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
93 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE3,
94 bo,
95 r700->textures[i]->SQ_TEX_RESOURCE3,
96 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
97 END_BATCH();
98 COMMIT_BATCH();
99 }
100 }
101 }
102 }
103 }
104
105 #define SAMPLER_STRIDE 3
106
107 static void r700SendTexSamplerState(struct gl_context *ctx, struct radeon_state_atom *atom)
108 {
109 context_t *context = R700_CONTEXT(ctx);
110 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
111 unsigned int i;
112
113 struct r700_vertex_program *vp = context->selected_vp;
114
115 BATCH_LOCALS(&context->radeon);
116 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
117
118 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
119 if (ctx->Texture.Unit[i]._ReallyEnabled) {
120 radeonTexObj *t = r700->textures[i];
121 if (t) {
122 BEGIN_BATCH_NO_AUTOSTATE(5);
123 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, 3));
124
125 if( (1<<i) & vp->r700AsmCode.unVetTexBits )
126 { /* vs texture */
127 R600_OUT_BATCH((i+SQ_TEX_SAMPLER_VS_OFFSET) * SAMPLER_STRIDE); //work 1
128 }
129 else
130 {
131 R600_OUT_BATCH(i * 3);
132 }
133
134 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER0);
135 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER1);
136 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER2);
137 END_BATCH();
138 COMMIT_BATCH();
139 }
140 }
141 }
142 }
143
144 static void r700SendTexBorderColorState(struct gl_context *ctx, struct radeon_state_atom *atom)
145 {
146 context_t *context = R700_CONTEXT(ctx);
147 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
148 unsigned int i;
149 BATCH_LOCALS(&context->radeon);
150 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
151
152 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
153 if (ctx->Texture.Unit[i]._ReallyEnabled) {
154 radeonTexObj *t = r700->textures[i];
155 if (t) {
156 BEGIN_BATCH_NO_AUTOSTATE(2 + 4);
157 R600_OUT_BATCH_REGSEQ((TD_PS_SAMPLER0_BORDER_RED + (i * 16)), 4);
158 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_RED);
159 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_GREEN);
160 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_BLUE);
161 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_ALPHA);
162 END_BATCH();
163 COMMIT_BATCH();
164 }
165 }
166 }
167 }
168
169 extern int getTypeSize(GLenum type);
170 static void r700SetupVTXConstants(struct gl_context * ctx,
171 void * pAos,
172 StreamDesc * pStreamDesc)
173 {
174 context_t *context = R700_CONTEXT(ctx);
175 struct radeon_aos * paos = (struct radeon_aos *)pAos;
176 BATCH_LOCALS(&context->radeon);
177
178 unsigned int uSQ_VTX_CONSTANT_WORD0_0;
179 unsigned int uSQ_VTX_CONSTANT_WORD1_0;
180 unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
181 unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
182 unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;
183
184 if (!paos->bo)
185 return;
186
187 if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
188 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
189 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
190 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS880) ||
191 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
192 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
193 else
194 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);
195
196 uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
197 uSQ_VTX_CONSTANT_WORD1_0 = paos->bo->size - paos->offset - 1;
198
199 SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
200 SETfield(uSQ_VTX_CONSTANT_WORD2_0, pStreamDesc->stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
201 SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
202 SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(pStreamDesc->type, pStreamDesc->size, NULL),
203 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
204 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
205 SETfield(uSQ_VTX_CONSTANT_WORD2_0,
206 #ifdef MESA_BIG_ENDIAN
207 SQ_ENDIAN_8IN32,
208 #else
209 SQ_ENDIAN_NONE,
210 #endif
211 SQ_VTX_CONSTANT_WORD2_0__ENDIAN_SWAP_shift,
212 SQ_VTX_CONSTANT_WORD2_0__ENDIAN_SWAP_mask);
213
214 if(GL_TRUE == pStreamDesc->normalize)
215 {
216 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_NORM,
217 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
218 }
219 else
220 {
221 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_SCALED,
222 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
223 }
224
225 if(1 == pStreamDesc->_signed)
226 {
227 SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
228 }
229
230 SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
231 SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
232 SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
233
234 BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
235
236 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
237 R600_OUT_BATCH((pStreamDesc->element + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
238 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
239 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
240 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
241 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
242 R600_OUT_BATCH(0);
243 R600_OUT_BATCH(0);
244 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
245 R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
246 paos->bo,
247 uSQ_VTX_CONSTANT_WORD0_0,
248 RADEON_GEM_DOMAIN_GTT, 0, 0);
249 END_BATCH();
250 COMMIT_BATCH();
251
252 }
253
254 static void r700SendVTXState(struct gl_context *ctx, struct radeon_state_atom *atom)
255 {
256 context_t *context = R700_CONTEXT(ctx);
257 struct r700_vertex_program *vp = context->selected_vp;
258 unsigned int i, j = 0;
259 BATCH_LOCALS(&context->radeon);
260 (void) b_l_rmesa; /* silence unused var warning */
261 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
262
263 if (context->radeon.tcl.aos_count == 0)
264 return;
265
266 for(i=0; i<VERT_ATTRIB_MAX; i++) {
267 if(vp->mesa_program->Base.InputsRead & (1 << i))
268 {
269 r700SetupVTXConstants(ctx,
270 (void*)(&context->radeon.tcl.aos[j]),
271 &(context->stream_desc[j]));
272 j++;
273 }
274 }
275 }
276
277 static void r700SetRenderTarget(context_t *context, int id)
278 {
279 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
280 uint32_t format = COLOR_8_8_8_8, comp_swap = SWAP_ALT, number_type = NUMBER_UNORM;
281 struct radeon_renderbuffer *rrb;
282 unsigned int nPitchInPixel, height;
283
284 rrb = radeon_get_colorbuffer(&context->radeon);
285 if (!rrb || !rrb->bo) {
286 return;
287 }
288
289 R600_STATECHANGE(context, cb_target);
290
291 /* color buffer */
292 r700->render_target[id].CB_COLOR0_BASE.u32All = context->radeon.state.color.draw_offset / 256;
293
294 nPitchInPixel = rrb->pitch/rrb->cpp;
295
296 if (context->radeon.radeonScreen->driScreen->dri2.enabled)
297 {
298 height = rrb->base.Height;
299 }
300 else
301 {
302 height = context->radeon.radeonScreen->driScreen->fbHeight;
303 }
304
305 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, (nPitchInPixel/8)-1,
306 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
307 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, ( (nPitchInPixel * height)/64 )-1,
308 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask);
309 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ENDIAN_NONE, ENDIAN_shift, ENDIAN_mask);
310 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_LINEAR_GENERAL,
311 CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
312
313 switch (rrb->base.Format) {
314 case MESA_FORMAT_RGBA8888:
315 format = COLOR_8_8_8_8;
316 comp_swap = SWAP_STD_REV;
317 number_type = NUMBER_UNORM;
318 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
319 break;
320 case MESA_FORMAT_SIGNED_RGBA8888:
321 format = COLOR_8_8_8_8;
322 comp_swap = SWAP_STD_REV;
323 number_type = NUMBER_SNORM;
324 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
325 break;
326 case MESA_FORMAT_RGBA8888_REV:
327 format = COLOR_8_8_8_8;
328 comp_swap = SWAP_STD;
329 number_type = NUMBER_UNORM;
330 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
331 break;
332 case MESA_FORMAT_SIGNED_RGBA8888_REV:
333 format = COLOR_8_8_8_8;
334 comp_swap = SWAP_STD;
335 number_type = NUMBER_SNORM;
336 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
337 break;
338 case MESA_FORMAT_ARGB8888:
339 case MESA_FORMAT_XRGB8888:
340 format = COLOR_8_8_8_8;
341 comp_swap = SWAP_ALT;
342 number_type = NUMBER_UNORM;
343 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
344 break;
345 case MESA_FORMAT_ARGB8888_REV:
346 case MESA_FORMAT_XRGB8888_REV:
347 format = COLOR_8_8_8_8;
348 comp_swap = SWAP_ALT_REV;
349 number_type = NUMBER_UNORM;
350 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
351 break;
352 case MESA_FORMAT_RGB565:
353 format = COLOR_5_6_5;
354 comp_swap = SWAP_STD_REV;
355 number_type = NUMBER_UNORM;
356 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
357 break;
358 case MESA_FORMAT_RGB565_REV:
359 format = COLOR_5_6_5;
360 comp_swap = SWAP_STD;
361 number_type = NUMBER_UNORM;
362 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
363 break;
364 case MESA_FORMAT_ARGB4444:
365 format = COLOR_4_4_4_4;
366 comp_swap = SWAP_ALT;
367 number_type = NUMBER_UNORM;
368 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
369 break;
370 case MESA_FORMAT_ARGB4444_REV:
371 format = COLOR_4_4_4_4;
372 comp_swap = SWAP_ALT_REV;
373 number_type = NUMBER_UNORM;
374 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
375 break;
376 case MESA_FORMAT_ARGB1555:
377 format = COLOR_1_5_5_5;
378 comp_swap = SWAP_ALT;
379 number_type = NUMBER_UNORM;
380 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
381 break;
382 case MESA_FORMAT_ARGB1555_REV:
383 format = COLOR_1_5_5_5;
384 comp_swap = SWAP_ALT_REV;
385 number_type = NUMBER_UNORM;
386 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
387 break;
388 case MESA_FORMAT_AL88:
389 format = COLOR_8_8;
390 comp_swap = SWAP_STD;
391 number_type = NUMBER_UNORM;
392 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
393 break;
394 case MESA_FORMAT_AL88_REV:
395 format = COLOR_8_8;
396 comp_swap = SWAP_STD_REV;
397 number_type = NUMBER_UNORM;
398 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
399 break;
400 case MESA_FORMAT_RGB332:
401 format = COLOR_3_3_2;
402 comp_swap = SWAP_STD_REV;
403 number_type = NUMBER_UNORM;
404 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
405 break;
406 case MESA_FORMAT_A8:
407 format = COLOR_8;
408 comp_swap = SWAP_ALT_REV;
409 number_type = NUMBER_UNORM;
410 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
411 break;
412 case MESA_FORMAT_I8:
413 format = COLOR_8;
414 comp_swap = SWAP_STD;
415 number_type = NUMBER_UNORM;
416 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
417 break;
418 case MESA_FORMAT_L8:
419 format = COLOR_8;
420 comp_swap = SWAP_ALT;
421 number_type = NUMBER_UNORM;
422 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
423 break;
424 case MESA_FORMAT_RGBA_FLOAT32:
425 format = COLOR_32_32_32_32_FLOAT;
426 comp_swap = SWAP_STD_REV;
427 number_type = NUMBER_FLOAT;
428 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_FLOAT32_bit);
429 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
430 break;
431 case MESA_FORMAT_RGBA_FLOAT16:
432 format = COLOR_16_16_16_16_FLOAT;
433 comp_swap = SWAP_STD_REV;
434 number_type = NUMBER_FLOAT;
435 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
436 break;
437 case MESA_FORMAT_ALPHA_FLOAT32:
438 format = COLOR_32_FLOAT;
439 comp_swap = SWAP_ALT_REV;
440 number_type = NUMBER_FLOAT;
441 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_FLOAT32_bit);
442 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
443 break;
444 case MESA_FORMAT_ALPHA_FLOAT16:
445 format = COLOR_16_FLOAT;
446 comp_swap = SWAP_ALT_REV;
447 number_type = NUMBER_FLOAT;
448 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
449 break;
450 case MESA_FORMAT_LUMINANCE_FLOAT32:
451 format = COLOR_32_FLOAT;
452 comp_swap = SWAP_ALT;
453 number_type = NUMBER_FLOAT;
454 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_FLOAT32_bit);
455 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
456 break;
457 case MESA_FORMAT_LUMINANCE_FLOAT16:
458 format = COLOR_16_FLOAT;
459 comp_swap = SWAP_ALT;
460 number_type = NUMBER_FLOAT;
461 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
462 break;
463 case MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32:
464 format = COLOR_32_32_FLOAT;
465 comp_swap = SWAP_ALT_REV;
466 number_type = NUMBER_FLOAT;
467 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_FLOAT32_bit);
468 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
469 break;
470 case MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16:
471 format = COLOR_16_16_FLOAT;
472 comp_swap = SWAP_ALT_REV;
473 number_type = NUMBER_FLOAT;
474 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
475 break;
476 case MESA_FORMAT_INTENSITY_FLOAT32: /* X, X, X, X */
477 format = COLOR_32_FLOAT;
478 comp_swap = SWAP_STD;
479 number_type = NUMBER_FLOAT;
480 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_FLOAT32_bit);
481 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
482 break;
483 case MESA_FORMAT_INTENSITY_FLOAT16: /* X, X, X, X */
484 format = COLOR_16_FLOAT;
485 comp_swap = SWAP_STD;
486 number_type = NUMBER_UNORM;
487 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
488 break;
489 case MESA_FORMAT_X8_Z24:
490 case MESA_FORMAT_S8_Z24:
491 format = COLOR_8_24;
492 comp_swap = SWAP_STD;
493 number_type = NUMBER_UNORM;
494 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_1D_TILED_THIN1,
495 CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
496 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
497 break;
498 case MESA_FORMAT_Z24_S8:
499 format = COLOR_24_8;
500 comp_swap = SWAP_STD;
501 number_type = NUMBER_UNORM;
502 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_1D_TILED_THIN1,
503 CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
504 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
505 break;
506 case MESA_FORMAT_Z16:
507 format = COLOR_16;
508 comp_swap = SWAP_STD;
509 number_type = NUMBER_UNORM;
510 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_1D_TILED_THIN1,
511 CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
512 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
513 break;
514 case MESA_FORMAT_Z32:
515 format = COLOR_32;
516 comp_swap = SWAP_STD;
517 number_type = NUMBER_UNORM;
518 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_1D_TILED_THIN1,
519 CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
520 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
521 break;
522 case MESA_FORMAT_SARGB8:
523 format = COLOR_8_8_8_8;
524 comp_swap = SWAP_ALT;
525 number_type = NUMBER_SRGB;
526 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
527 break;
528 case MESA_FORMAT_SLA8:
529 format = COLOR_8_8;
530 comp_swap = SWAP_ALT_REV;
531 number_type = NUMBER_SRGB;
532 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
533 break;
534 case MESA_FORMAT_SL8:
535 format = COLOR_8;
536 comp_swap = SWAP_ALT_REV;
537 number_type = NUMBER_SRGB;
538 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
539 break;
540 default:
541 _mesa_problem(context->radeon.glCtx, "unexpected format in r700SetRenderTarget()");
542 break;
543 }
544
545 /* must be 0 on r7xx */
546 if (context->radeon.radeonScreen->chip_family >= CHIP_FAMILY_RV770)
547 CLEARbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_FLOAT32_bit);
548
549 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, format,
550 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
551 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, comp_swap,
552 COMP_SWAP_shift, COMP_SWAP_mask);
553 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, number_type,
554 NUMBER_TYPE_shift, NUMBER_TYPE_mask);
555 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_CLAMP_bit);
556
557 r700->render_target[id].enabled = GL_TRUE;
558 }
559
560 static void r700SetDepthTarget(context_t *context)
561 {
562 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
563
564 struct radeon_renderbuffer *rrb;
565 unsigned int nPitchInPixel, height;
566
567 rrb = radeon_get_depthbuffer(&context->radeon);
568 if (!rrb)
569 return;
570
571 R600_STATECHANGE(context, db_target);
572
573 /* depth buf */
574 r700->DB_DEPTH_SIZE.u32All = 0;
575 r700->DB_DEPTH_BASE.u32All = 0;
576 r700->DB_DEPTH_INFO.u32All = 0;
577 r700->DB_DEPTH_VIEW.u32All = 0;
578
579 nPitchInPixel = rrb->pitch/rrb->cpp;
580
581 if (context->radeon.radeonScreen->driScreen->dri2.enabled)
582 {
583 height = rrb->base.Height;
584 }
585 else
586 {
587 height = context->radeon.radeonScreen->driScreen->fbHeight;
588 }
589
590 SETfield(r700->DB_DEPTH_SIZE.u32All, (nPitchInPixel/8)-1,
591 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
592 SETfield(r700->DB_DEPTH_SIZE.u32All, ( (nPitchInPixel * height)/64 )-1,
593 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask); /* size in pixel / 64 - 1 */
594
595 if(4 == rrb->cpp)
596 {
597 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_8_24,
598 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
599 }
600 else
601 {
602 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_16,
603 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
604 }
605 SETfield(r700->DB_DEPTH_INFO.u32All, ARRAY_1D_TILED_THIN1,
606 DB_DEPTH_INFO__ARRAY_MODE_shift, DB_DEPTH_INFO__ARRAY_MODE_mask);
607 /* r700->DB_PREFETCH_LIMIT.bits.DEPTH_HEIGHT_TILE_MAX = (context->currentDraw->h >> 3) - 1; */ /* z buffer sie may much bigger than what need, so use actual used h. */
608 }
609
610 static void r700SendDepthTargetState(struct gl_context *ctx, struct radeon_state_atom *atom)
611 {
612 context_t *context = R700_CONTEXT(ctx);
613 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
614 struct radeon_renderbuffer *rrb;
615 BATCH_LOCALS(&context->radeon);
616 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
617
618 rrb = radeon_get_depthbuffer(&context->radeon);
619 if (!rrb || !rrb->bo) {
620 return;
621 }
622
623 r700SetDepthTarget(context);
624
625 BEGIN_BATCH_NO_AUTOSTATE(7 + 2);
626 R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
627 R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
628 R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
629 R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 1);
630 R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
631 R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
632 rrb->bo,
633 r700->DB_DEPTH_BASE.u32All,
634 0, RADEON_GEM_DOMAIN_VRAM, 0);
635 END_BATCH();
636 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
637 R600_OUT_BATCH_REGSEQ(DB_DEPTH_INFO, 1);
638 R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
639 R600_OUT_BATCH_RELOC(r700->DB_DEPTH_INFO.u32All,
640 rrb->bo,
641 r700->DB_DEPTH_INFO.u32All,
642 0, RADEON_GEM_DOMAIN_VRAM, 0);
643 END_BATCH();
644
645 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
646 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
647 BEGIN_BATCH_NO_AUTOSTATE(2);
648 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
649 R600_OUT_BATCH(1 << 0);
650 END_BATCH();
651 }
652
653 COMMIT_BATCH();
654
655 }
656
657 static void r700SendRenderTargetState(struct gl_context *ctx, struct radeon_state_atom *atom)
658 {
659 context_t *context = R700_CONTEXT(ctx);
660 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
661 struct radeon_renderbuffer *rrb;
662 BATCH_LOCALS(&context->radeon);
663 int id = 0;
664 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
665
666 rrb = radeon_get_colorbuffer(&context->radeon);
667 if (!rrb || !rrb->bo) {
668 return;
669 }
670
671 r700SetRenderTarget(context, 0);
672
673 if (id > R700_MAX_RENDER_TARGETS)
674 return;
675
676 if (!r700->render_target[id].enabled)
677 return;
678
679 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
680 R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
681 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
682 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
683 rrb->bo,
684 r700->render_target[id].CB_COLOR0_BASE.u32All,
685 0, RADEON_GEM_DOMAIN_VRAM, 0);
686 END_BATCH();
687
688 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
689 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
690 BEGIN_BATCH_NO_AUTOSTATE(2);
691 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
692 R600_OUT_BATCH((2 << id));
693 END_BATCH();
694 }
695 /* Set CMASK & TILE buffer to the offset of color buffer as
696 * we don't use those this shouldn't cause any issue and we
697 * then have a valid cmd stream
698 */
699 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
700 R600_OUT_BATCH_REGSEQ(CB_COLOR0_TILE + (4 * id), 1);
701 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_TILE.u32All);
702 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_TILE.u32All,
703 rrb->bo,
704 r700->render_target[id].CB_COLOR0_TILE.u32All,
705 0, RADEON_GEM_DOMAIN_VRAM, 0);
706 END_BATCH();
707 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
708 R600_OUT_BATCH_REGSEQ(CB_COLOR0_FRAG + (4 * id), 1);
709 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_FRAG.u32All);
710 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_FRAG.u32All,
711 rrb->bo,
712 r700->render_target[id].CB_COLOR0_FRAG.u32All,
713 0, RADEON_GEM_DOMAIN_VRAM, 0);
714 END_BATCH();
715
716 BEGIN_BATCH_NO_AUTOSTATE(9);
717 R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
718 R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
719 R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
720 END_BATCH();
721
722 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
723 R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
724 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_INFO.u32All,
725 rrb->bo,
726 r700->render_target[id].CB_COLOR0_INFO.u32All,
727 0, RADEON_GEM_DOMAIN_VRAM, 0);
728
729 END_BATCH();
730
731 COMMIT_BATCH();
732
733 }
734
735 static void r700SendPSState(struct gl_context *ctx, struct radeon_state_atom *atom)
736 {
737 context_t *context = R700_CONTEXT(ctx);
738 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
739 struct radeon_bo * pbo;
740 struct radeon_bo * pbo_const;
741 BATCH_LOCALS(&context->radeon);
742 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
743
744 pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));
745
746 if (!pbo)
747 return;
748
749 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
750
751 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
752 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
753 R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
754 R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
755 pbo,
756 r700->ps.SQ_PGM_START_PS.u32All,
757 RADEON_GEM_DOMAIN_GTT, 0, 0);
758 END_BATCH();
759
760 BEGIN_BATCH_NO_AUTOSTATE(9);
761 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
762 R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
763 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
764 END_BATCH();
765
766 BEGIN_BATCH_NO_AUTOSTATE(3);
767 R600_OUT_BATCH_REGVAL(SQ_LOOP_CONST_0, 0x01000FFF);
768 END_BATCH();
769
770 pbo_const = (struct radeon_bo *)r700GetActiveFpShaderConstBo(GL_CONTEXT(context));
771 //TODO : set up shader const
772
773 COMMIT_BATCH();
774
775 }
776
777 static void r700SendVSState(struct gl_context *ctx, struct radeon_state_atom *atom)
778 {
779 context_t *context = R700_CONTEXT(ctx);
780 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
781 struct radeon_bo * pbo;
782 struct radeon_bo * pbo_const;
783 BATCH_LOCALS(&context->radeon);
784 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
785
786 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
787
788 if (!pbo)
789 return;
790
791 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
792
793 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
794 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_VS, 1);
795 R600_OUT_BATCH(r700->vs.SQ_PGM_START_VS.u32All);
796 R600_OUT_BATCH_RELOC(r700->vs.SQ_PGM_START_VS.u32All,
797 pbo,
798 r700->vs.SQ_PGM_START_VS.u32All,
799 RADEON_GEM_DOMAIN_GTT, 0, 0);
800 END_BATCH();
801
802 BEGIN_BATCH_NO_AUTOSTATE(6);
803 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_VS, r700->vs.SQ_PGM_RESOURCES_VS.u32All);
804 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_VS, r700->vs.SQ_PGM_CF_OFFSET_VS.u32All);
805 END_BATCH();
806
807 BEGIN_BATCH_NO_AUTOSTATE(3);
808 R600_OUT_BATCH_REGVAL((SQ_LOOP_CONST_0 + 32*4), 0x0100000F);
809 //R600_OUT_BATCH_REGVAL((SQ_LOOP_CONST_0 + (SQ_LOOP_CONST_vs<2)), 0x0100000F);
810 END_BATCH();
811
812 /* TODO : handle 4 bufs */
813 if(GL_TRUE == r700->bShaderUseMemConstant)
814 {
815 pbo_const = (struct radeon_bo *)r700GetActiveVpShaderConstBo(GL_CONTEXT(context));
816 if(NULL != pbo_const)
817 {
818 r700SyncSurf(context, pbo_const, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit); /* TODO : Check kc bit. */
819
820 BEGIN_BATCH_NO_AUTOSTATE(3);
821 R600_OUT_BATCH_REGVAL(SQ_ALU_CONST_BUFFER_SIZE_VS_0, (r700->vs.num_consts * 4)/16 );
822 END_BATCH();
823
824 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
825 R600_OUT_BATCH_REGSEQ(SQ_ALU_CONST_CACHE_VS_0, 1);
826 R600_OUT_BATCH(r700->vs.SQ_ALU_CONST_CACHE_VS_0.u32All);
827 R600_OUT_BATCH_RELOC(r700->vs.SQ_ALU_CONST_CACHE_VS_0.u32All,
828 pbo_const,
829 r700->vs.SQ_ALU_CONST_CACHE_VS_0.u32All,
830 RADEON_GEM_DOMAIN_GTT, 0, 0);
831 END_BATCH();
832 }
833 }
834
835 COMMIT_BATCH();
836 }
837
838 static void r700SendFSState(struct gl_context *ctx, struct radeon_state_atom *atom)
839 {
840 context_t *context = R700_CONTEXT(ctx);
841 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
842 struct radeon_bo * pbo;
843 BATCH_LOCALS(&context->radeon);
844 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
845
846 /* XXX fixme
847 * R6xx chips require a FS be emitted, even if it's not used.
848 * since we aren't using FS yet, just send the VS address to make
849 * the kernel command checker happy
850 */
851 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
852 r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
853 r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
854 r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
855 /* XXX */
856
857 if (!pbo)
858 return;
859
860 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
861
862 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
863 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
864 R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
865 R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
866 pbo,
867 r700->fs.SQ_PGM_START_FS.u32All,
868 RADEON_GEM_DOMAIN_GTT, 0, 0);
869 END_BATCH();
870
871 BEGIN_BATCH_NO_AUTOSTATE(6);
872 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
873 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
874 END_BATCH();
875
876 COMMIT_BATCH();
877
878 }
879
880 static void r700SendViewportState(struct gl_context *ctx, struct radeon_state_atom *atom)
881 {
882 context_t *context = R700_CONTEXT(ctx);
883 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
884 BATCH_LOCALS(&context->radeon);
885 int id = 0;
886 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
887
888 if (id > R700_MAX_VIEWPORTS)
889 return;
890
891 if (!r700->viewport[id].enabled)
892 return;
893
894 BEGIN_BATCH_NO_AUTOSTATE(16);
895 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
896 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
897 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
898 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
899 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
900 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
901 R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
902 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
903 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
904 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
905 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
906 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
907 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
908 END_BATCH();
909
910 COMMIT_BATCH();
911
912 }
913
914 static void r700SendSQConfig(struct gl_context *ctx, struct radeon_state_atom *atom)
915 {
916 context_t *context = R700_CONTEXT(ctx);
917 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
918 BATCH_LOCALS(&context->radeon);
919 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
920
921 BEGIN_BATCH_NO_AUTOSTATE(34);
922 R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
923 R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
924 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
925 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_2.u32All);
926 R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
927 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
928 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);
929
930 R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
931 R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
932 R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
933 R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
934 R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);
935
936 R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
937 R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
938 R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
939 R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
940 R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
941 R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
942 R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
943 R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
944 R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
945 R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
946 END_BATCH();
947
948 COMMIT_BATCH();
949 }
950
951 static void r700SendUCPState(struct gl_context *ctx, struct radeon_state_atom *atom)
952 {
953 context_t *context = R700_CONTEXT(ctx);
954 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
955 BATCH_LOCALS(&context->radeon);
956 int i;
957 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
958
959 for (i = 0; i < R700_MAX_UCP; i++) {
960 if (r700->ucp[i].enabled) {
961 BEGIN_BATCH_NO_AUTOSTATE(6);
962 R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
963 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
964 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Y.u32All);
965 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Z.u32All);
966 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
967 END_BATCH();
968 COMMIT_BATCH();
969 }
970 }
971 }
972
973 static void r700SendSPIState(struct gl_context *ctx, struct radeon_state_atom *atom)
974 {
975 context_t *context = R700_CONTEXT(ctx);
976 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
977 BATCH_LOCALS(&context->radeon);
978 unsigned int ui;
979 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
980
981 BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);
982
983 R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
984 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
985 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
986 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
987 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
988 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
989 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
990 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
991 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
992 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
993 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
994 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
995 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
996 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
997 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
998 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
999 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
1000 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
1001 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
1002 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
1003 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
1004 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
1005 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
1006 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
1007 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
1008 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
1009 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
1010 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
1011 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
1012 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
1013 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
1014 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
1015 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);
1016
1017 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
1018 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
1019 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
1020 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
1021 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
1022 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
1023 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
1024 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
1025 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
1026 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
1027 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);
1028
1029 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
1030 R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
1031 R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
1032 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
1033 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
1034 R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
1035 R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
1036 R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
1037 R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
1038 R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);
1039
1040 R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
1041 for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
1042 R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
1043
1044 END_BATCH();
1045 COMMIT_BATCH();
1046 }
1047
1048 static void r700SendVGTState(struct gl_context *ctx, struct radeon_state_atom *atom)
1049 {
1050 context_t *context = R700_CONTEXT(ctx);
1051 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1052 BATCH_LOCALS(&context->radeon);
1053 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1054
1055 BEGIN_BATCH_NO_AUTOSTATE(41);
1056
1057 R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
1058 R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
1059 R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
1060 R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
1061 R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);
1062
1063 R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
1064 R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
1065 R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
1066 R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
1067 R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
1068 R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
1069 R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
1070 R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
1071 R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
1072 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
1073 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
1074 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
1075 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
1076 R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);
1077
1078 R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
1079 R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
1080 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
1081 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);
1082
1083 R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
1084 R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
1085 R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
1086 R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);
1087
1088 R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);
1089
1090 END_BATCH();
1091 COMMIT_BATCH();
1092 }
1093
1094 static void r700SendSXState(struct gl_context *ctx, struct radeon_state_atom *atom)
1095 {
1096 context_t *context = R700_CONTEXT(ctx);
1097 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1098 BATCH_LOCALS(&context->radeon);
1099 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1100
1101 BEGIN_BATCH_NO_AUTOSTATE(9);
1102 R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
1103 R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
1104 R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
1105 END_BATCH();
1106 COMMIT_BATCH();
1107 }
1108
1109 static void r700SendDBState(struct gl_context *ctx, struct radeon_state_atom *atom)
1110 {
1111 context_t *context = R700_CONTEXT(ctx);
1112 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1113 BATCH_LOCALS(&context->radeon);
1114 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1115
1116 BEGIN_BATCH_NO_AUTOSTATE(17);
1117
1118 R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
1119 R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
1120 R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);
1121
1122 R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
1123 R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);
1124
1125 R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
1126 R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
1127 R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);
1128
1129 R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);
1130
1131 END_BATCH();
1132 COMMIT_BATCH();
1133 }
1134
1135 static void r700SendStencilState(struct gl_context *ctx, struct radeon_state_atom *atom)
1136 {
1137 context_t *context = R700_CONTEXT(ctx);
1138 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1139 BATCH_LOCALS(&context->radeon);
1140
1141 BEGIN_BATCH_NO_AUTOSTATE(4);
1142 R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
1143 R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
1144 R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
1145 END_BATCH();
1146 COMMIT_BATCH();
1147 }
1148
1149 static void r700SendCBState(struct gl_context *ctx, struct radeon_state_atom *atom)
1150 {
1151 context_t *context = R700_CONTEXT(ctx);
1152 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1153 BATCH_LOCALS(&context->radeon);
1154 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1155
1156 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
1157 BEGIN_BATCH_NO_AUTOSTATE(11);
1158 R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
1159 R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
1160 R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
1161 R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
1162 R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
1163 R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
1164 R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
1165 R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
1166 R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
1167 END_BATCH();
1168 }
1169
1170 BEGIN_BATCH_NO_AUTOSTATE(7);
1171 R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
1172 R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
1173 R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
1174 R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
1175 END_BATCH();
1176 COMMIT_BATCH();
1177 }
1178
1179 static void r700SendCBCLRCMPState(struct gl_context *ctx, struct radeon_state_atom *atom)
1180 {
1181 context_t *context = R700_CONTEXT(ctx);
1182 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1183 BATCH_LOCALS(&context->radeon);
1184
1185 BEGIN_BATCH_NO_AUTOSTATE(6);
1186 R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
1187 R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
1188 R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
1189 R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
1190 R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
1191 END_BATCH();
1192 COMMIT_BATCH();
1193 }
1194
1195 static void r700SendCBBlendState(struct gl_context *ctx, struct radeon_state_atom *atom)
1196 {
1197 context_t *context = R700_CONTEXT(ctx);
1198 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1199 BATCH_LOCALS(&context->radeon);
1200 unsigned int ui;
1201 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1202
1203 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
1204 BEGIN_BATCH_NO_AUTOSTATE(3);
1205 R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
1206 END_BATCH();
1207 }
1208
1209 BEGIN_BATCH_NO_AUTOSTATE(3);
1210 R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
1211 END_BATCH();
1212
1213 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
1214 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
1215 if (r700->render_target[ui].enabled) {
1216 BEGIN_BATCH_NO_AUTOSTATE(3);
1217 R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
1218 r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
1219 END_BATCH();
1220 }
1221 }
1222 }
1223
1224 COMMIT_BATCH();
1225 }
1226
1227 static void r700SendCBBlendColorState(struct gl_context *ctx, struct radeon_state_atom *atom)
1228 {
1229 context_t *context = R700_CONTEXT(ctx);
1230 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1231 BATCH_LOCALS(&context->radeon);
1232 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1233
1234 BEGIN_BATCH_NO_AUTOSTATE(6);
1235 R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
1236 R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
1237 R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
1238 R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
1239 R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
1240 END_BATCH();
1241 COMMIT_BATCH();
1242 }
1243
1244 static void r700SendSUState(struct gl_context *ctx, struct radeon_state_atom *atom)
1245 {
1246 context_t *context = R700_CONTEXT(ctx);
1247 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1248 BATCH_LOCALS(&context->radeon);
1249
1250 BEGIN_BATCH_NO_AUTOSTATE(9);
1251 R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
1252 R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
1253 R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
1254 R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
1255 R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
1256 R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
1257 END_BATCH();
1258 COMMIT_BATCH();
1259
1260 }
1261
1262 static void r700SendPolyState(struct gl_context *ctx, struct radeon_state_atom *atom)
1263 {
1264 context_t *context = R700_CONTEXT(ctx);
1265 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1266 BATCH_LOCALS(&context->radeon);
1267
1268 BEGIN_BATCH_NO_AUTOSTATE(10);
1269 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
1270 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
1271 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
1272 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
1273 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
1274 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
1275 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
1276 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
1277 END_BATCH();
1278 COMMIT_BATCH();
1279
1280 }
1281
1282 static void r700SendCLState(struct gl_context *ctx, struct radeon_state_atom *atom)
1283 {
1284 context_t *context = R700_CONTEXT(ctx);
1285 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1286 BATCH_LOCALS(&context->radeon);
1287 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1288
1289 BEGIN_BATCH_NO_AUTOSTATE(12);
1290 R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All);
1291 R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All);
1292 R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All);
1293 R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All);
1294 END_BATCH();
1295 COMMIT_BATCH();
1296 }
1297
1298 static void r700SendGBState(struct gl_context *ctx, struct radeon_state_atom *atom)
1299 {
1300 context_t *context = R700_CONTEXT(ctx);
1301 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1302 BATCH_LOCALS(&context->radeon);
1303
1304 BEGIN_BATCH_NO_AUTOSTATE(6);
1305 R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
1306 R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
1307 R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
1308 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
1309 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
1310 END_BATCH();
1311 COMMIT_BATCH();
1312 }
1313
1314 static void r700SendScissorState(struct gl_context *ctx, struct radeon_state_atom *atom)
1315 {
1316 context_t *context = R700_CONTEXT(ctx);
1317 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1318 BATCH_LOCALS(&context->radeon);
1319 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1320
1321 BEGIN_BATCH_NO_AUTOSTATE(22);
1322 R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
1323 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
1324 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);
1325
1326 R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12);
1327 R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
1328 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
1329 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
1330 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
1331 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
1332 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
1333 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
1334 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
1335 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
1336 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
1337 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
1338 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);
1339
1340 R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
1341 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
1342 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
1343 END_BATCH();
1344 COMMIT_BATCH();
1345 }
1346
1347 static void r700SendSCState(struct gl_context *ctx, struct radeon_state_atom *atom)
1348 {
1349 context_t *context = R700_CONTEXT(ctx);
1350 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1351 BATCH_LOCALS(&context->radeon);
1352 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1353
1354 BEGIN_BATCH_NO_AUTOSTATE(15);
1355 R600_OUT_BATCH_REGVAL(R7xx_PA_SC_EDGERULE, r700->PA_SC_EDGERULE.u32All);
1356 R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All);
1357 R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All);
1358 R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All);
1359 R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All);
1360 END_BATCH();
1361 COMMIT_BATCH();
1362 }
1363
1364 static void r700SendAAState(struct gl_context *ctx, struct radeon_state_atom *atom)
1365 {
1366 context_t *context = R700_CONTEXT(ctx);
1367 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1368 BATCH_LOCALS(&context->radeon);
1369
1370 BEGIN_BATCH_NO_AUTOSTATE(12);
1371 R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
1372 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
1373 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
1374 R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
1375 END_BATCH();
1376 COMMIT_BATCH();
1377 }
1378
1379 static void r700SendPSConsts(struct gl_context *ctx, struct radeon_state_atom *atom)
1380 {
1381 context_t *context = R700_CONTEXT(ctx);
1382 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1383 int i;
1384 BATCH_LOCALS(&context->radeon);
1385
1386 if (r700->ps.num_consts == 0)
1387 return;
1388
1389 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->ps.num_consts * 4));
1390 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->ps.num_consts * 4)));
1391 /* assembler map const from very beginning. */
1392 R600_OUT_BATCH(SQ_ALU_CONSTANT_PS_OFFSET * 4);
1393 for (i = 0; i < r700->ps.num_consts; i++) {
1394 R600_OUT_BATCH(r700->ps.consts[i][0].u32All);
1395 R600_OUT_BATCH(r700->ps.consts[i][1].u32All);
1396 R600_OUT_BATCH(r700->ps.consts[i][2].u32All);
1397 R600_OUT_BATCH(r700->ps.consts[i][3].u32All);
1398 }
1399 END_BATCH();
1400 COMMIT_BATCH();
1401 }
1402
1403 static void r700SendVSConsts(struct gl_context *ctx, struct radeon_state_atom *atom)
1404 {
1405 context_t *context = R700_CONTEXT(ctx);
1406 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1407 int i;
1408 BATCH_LOCALS(&context->radeon);
1409 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1410
1411 if (r700->vs.num_consts == 0)
1412 return;
1413
1414 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->vs.num_consts * 4));
1415 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->vs.num_consts * 4)));
1416 /* assembler map const from very beginning. */
1417 R600_OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
1418 for (i = 0; i < r700->vs.num_consts; i++) {
1419 R600_OUT_BATCH(r700->vs.consts[i][0].u32All);
1420 R600_OUT_BATCH(r700->vs.consts[i][1].u32All);
1421 R600_OUT_BATCH(r700->vs.consts[i][2].u32All);
1422 R600_OUT_BATCH(r700->vs.consts[i][3].u32All);
1423 }
1424 END_BATCH();
1425 COMMIT_BATCH();
1426 }
1427
1428 static void r700SendQueryBegin(struct gl_context *ctx, struct radeon_state_atom *atom)
1429 {
1430 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1431 struct radeon_query_object *query = radeon->query.current;
1432 BATCH_LOCALS(radeon);
1433 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1434
1435 /* clear the buffer */
1436 radeon_bo_map(query->bo, GL_FALSE);
1437 memset(query->bo->ptr, 0, 4 * 2 * sizeof(uint64_t)); /* 4 DBs, 2 qwords each */
1438 radeon_bo_unmap(query->bo);
1439
1440 radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
1441 query->bo,
1442 0, RADEON_GEM_DOMAIN_GTT);
1443
1444 BEGIN_BATCH_NO_AUTOSTATE(4 + 2);
1445 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 2));
1446 R600_OUT_BATCH(R600_EVENT_TYPE(ZPASS_DONE) | R600_EVENT_INDEX(1));
1447 R600_OUT_BATCH(query->curr_offset); /* hw writes qwords */
1448 R600_OUT_BATCH(0x00000000);
1449 R600_OUT_BATCH_RELOC(VGT_EVENT_INITIATOR, query->bo, 0, 0, RADEON_GEM_DOMAIN_GTT, 0);
1450 END_BATCH();
1451 query->emitted_begin = GL_TRUE;
1452 }
1453
1454 static int check_always(struct gl_context *ctx, struct radeon_state_atom *atom)
1455 {
1456 return atom->cmd_size;
1457 }
1458
1459 static int check_cb(struct gl_context *ctx, struct radeon_state_atom *atom)
1460 {
1461 context_t *context = R700_CONTEXT(ctx);
1462 int count = 7;
1463
1464 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1465 count += 11;
1466 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1467
1468 return count;
1469 }
1470
1471 static int check_blnd(struct gl_context *ctx, struct radeon_state_atom *atom)
1472 {
1473 context_t *context = R700_CONTEXT(ctx);
1474 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1475 unsigned int ui;
1476 int count = 3;
1477
1478 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1479 count += 3;
1480
1481 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
1482 /* targets are enabled in r700SetRenderTarget but state
1483 size is calculated before that. Until MRT's are done
1484 hardcode target0 as enabled. */
1485 count += 3;
1486 for (ui = 1; ui < R700_MAX_RENDER_TARGETS; ui++) {
1487 if (r700->render_target[ui].enabled)
1488 count += 3;
1489 }
1490 }
1491 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1492
1493 return count;
1494 }
1495
1496 static int check_ucp(struct gl_context *ctx, struct radeon_state_atom *atom)
1497 {
1498 context_t *context = R700_CONTEXT(ctx);
1499 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1500 int i;
1501 int count = 0;
1502
1503 for (i = 0; i < R700_MAX_UCP; i++) {
1504 if (r700->ucp[i].enabled)
1505 count += 6;
1506 }
1507 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1508 return count;
1509 }
1510
1511 static int check_vtx(struct gl_context *ctx, struct radeon_state_atom *atom)
1512 {
1513 context_t *context = R700_CONTEXT(ctx);
1514 int count = context->radeon.tcl.aos_count * 18;
1515
1516 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1517 return count;
1518 }
1519
1520 static int check_tx(struct gl_context *ctx, struct radeon_state_atom *atom)
1521 {
1522 context_t *context = R700_CONTEXT(ctx);
1523 unsigned int i, count = 0;
1524 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1525
1526 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
1527 if (ctx->Texture.Unit[i]._ReallyEnabled) {
1528 radeonTexObj *t = r700->textures[i];
1529 if (t)
1530 count++;
1531 }
1532 }
1533 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1534 return count * 31;
1535 }
1536
1537 static int check_ps_consts(struct gl_context *ctx, struct radeon_state_atom *atom)
1538 {
1539 context_t *context = R700_CONTEXT(ctx);
1540 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1541 int count = r700->ps.num_consts * 4;
1542
1543 if (count)
1544 count += 2;
1545 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1546
1547 return count;
1548 }
1549
1550 static int check_vs_consts(struct gl_context *ctx, struct radeon_state_atom *atom)
1551 {
1552 context_t *context = R700_CONTEXT(ctx);
1553 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1554 int count = r700->vs.num_consts * 4;
1555
1556 if (count)
1557 count += 2;
1558 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1559
1560 return count;
1561 }
1562
1563 static int check_queryobj(struct gl_context *ctx, struct radeon_state_atom *atom)
1564 {
1565 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1566 struct radeon_query_object *query = radeon->query.current;
1567 int count;
1568
1569 if (!query || query->emitted_begin)
1570 count = 0;
1571 else
1572 count = atom->cmd_size;
1573 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1574 return count;
1575 }
1576
1577 #define ALLOC_STATE( ATOM, CHK, SZ, EMIT ) \
1578 do { \
1579 context->atoms.ATOM.cmd_size = (SZ); \
1580 context->atoms.ATOM.cmd = NULL; \
1581 context->atoms.ATOM.name = #ATOM; \
1582 context->atoms.ATOM.idx = 0; \
1583 context->atoms.ATOM.check = check_##CHK; \
1584 context->atoms.ATOM.dirty = GL_FALSE; \
1585 context->atoms.ATOM.emit = (EMIT); \
1586 context->radeon.hw.max_state_size += (SZ); \
1587 insert_at_tail(&context->radeon.hw.atomlist, &context->atoms.ATOM); \
1588 } while (0)
1589
1590 static void r600_init_query_stateobj(radeonContextPtr radeon, int SZ)
1591 {
1592 radeon->query.queryobj.cmd_size = (SZ);
1593 radeon->query.queryobj.cmd = NULL;
1594 radeon->query.queryobj.name = "queryobj";
1595 radeon->query.queryobj.idx = 0;
1596 radeon->query.queryobj.check = check_queryobj;
1597 radeon->query.queryobj.dirty = GL_FALSE;
1598 radeon->query.queryobj.emit = r700SendQueryBegin;
1599 radeon->hw.max_state_size += (SZ);
1600 insert_at_tail(&radeon->hw.atomlist, &radeon->query.queryobj);
1601 }
1602
1603 void r600InitAtoms(context_t *context)
1604 {
1605 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1606 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %p\n", __func__, context);
1607 context->radeon.hw.max_state_size = 10 + 5 + 14; /* start 3d, idle, cb/db flush */
1608
1609 /* Setup the atom linked list */
1610 make_empty_list(&context->radeon.hw.atomlist);
1611 context->radeon.hw.atomlist.name = "atom-list";
1612
1613 ALLOC_STATE(sq, always, 34, r700SendSQConfig);
1614 ALLOC_STATE(db, always, 17, r700SendDBState);
1615 ALLOC_STATE(stencil, always, 4, r700SendStencilState);
1616 ALLOC_STATE(db_target, always, 16, r700SendDepthTargetState);
1617 ALLOC_STATE(sc, always, 15, r700SendSCState);
1618 ALLOC_STATE(scissor, always, 22, r700SendScissorState);
1619 ALLOC_STATE(aa, always, 12, r700SendAAState);
1620 ALLOC_STATE(cl, always, 12, r700SendCLState);
1621 ALLOC_STATE(gb, always, 6, r700SendGBState);
1622 ALLOC_STATE(ucp, ucp, (R700_MAX_UCP * 6), r700SendUCPState);
1623 ALLOC_STATE(su, always, 9, r700SendSUState);
1624 ALLOC_STATE(poly, always, 10, r700SendPolyState);
1625 ALLOC_STATE(cb, cb, 18, r700SendCBState);
1626 ALLOC_STATE(clrcmp, always, 6, r700SendCBCLRCMPState);
1627 ALLOC_STATE(cb_target, always, 31, r700SendRenderTargetState);
1628 ALLOC_STATE(blnd, blnd, (6 + (R700_MAX_RENDER_TARGETS * 3)), r700SendCBBlendState);
1629 ALLOC_STATE(blnd_clr, always, 6, r700SendCBBlendColorState);
1630 ALLOC_STATE(sx, always, 9, r700SendSXState);
1631 ALLOC_STATE(vgt, always, 41, r700SendVGTState);
1632 ALLOC_STATE(spi, always, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState);
1633 ALLOC_STATE(vpt, always, 16, r700SendViewportState);
1634 ALLOC_STATE(fs, always, 18, r700SendFSState);
1635 if(GL_TRUE == r700->bShaderUseMemConstant)
1636 {
1637 ALLOC_STATE(vs, always, 36, r700SendVSState);
1638 ALLOC_STATE(ps, always, 24, r700SendPSState); /* TODO : not imp yet, fix later. */
1639 }
1640 else
1641 {
1642 ALLOC_STATE(vs, always, 21, r700SendVSState);
1643 ALLOC_STATE(ps, always, 24, r700SendPSState);
1644 ALLOC_STATE(vs_consts, vs_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendVSConsts);
1645 ALLOC_STATE(ps_consts, ps_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendPSConsts);
1646 }
1647
1648 ALLOC_STATE(vtx, vtx, (VERT_ATTRIB_MAX * 18), r700SendVTXState);
1649 ALLOC_STATE(tx, tx, (R700_TEXTURE_NUMBERUNITS * 20), r700SendTexState);
1650 ALLOC_STATE(tx_smplr, tx, (R700_TEXTURE_NUMBERUNITS * 5), r700SendTexSamplerState);
1651 ALLOC_STATE(tx_brdr_clr, tx, (R700_TEXTURE_NUMBERUNITS * 6), r700SendTexBorderColorState);
1652 r600_init_query_stateobj(&context->radeon, 6 * 2);
1653
1654 context->radeon.hw.is_dirty = GL_TRUE;
1655 context->radeon.hw.all_dirty = GL_TRUE;
1656 }