75b97c56cdb919e9091535fb298a6aae369dc4d9
[mesa.git] / src / mesa / drivers / dri / r600 / r700_chip.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/imports.h"
29 #include "main/glheader.h"
30 #include "main/simple_list.h"
31
32 #include "r600_context.h"
33 #include "r600_cmdbuf.h"
34
35 #include "r700_state.h"
36 #include "r600_tex.h"
37 #include "r700_oglprog.h"
38 #include "r700_fragprog.h"
39 #include "r700_vertprog.h"
40 #include "r700_ioctl.h"
41
42 #include "radeon_mipmap_tree.h"
43
44 static void r700SendTexState(GLcontext *ctx, struct radeon_state_atom *atom)
45 {
46 context_t *context = R700_CONTEXT(ctx);
47 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
48 struct radeon_bo *bo = NULL;
49 unsigned int i;
50 BATCH_LOCALS(&context->radeon);
51
52 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
53
54 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
55 if (ctx->Texture.Unit[i]._ReallyEnabled) {
56 radeonTexObj *t = r700->textures[i];
57 if (t) {
58 if (!t->image_override)
59 bo = t->mt->bo;
60 else
61 bo = t->bo;
62 if (bo) {
63
64 r700SyncSurf(context, bo,
65 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
66 0, TC_ACTION_ENA_bit);
67
68 BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
69 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
70 R600_OUT_BATCH(i * 7);
71 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
72 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
73 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE2);
74 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
75 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
76 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
77 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
78 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
79 bo,
80 0,
81 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
82 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE3,
83 bo,
84 r700->textures[i]->SQ_TEX_RESOURCE3,
85 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
86 END_BATCH();
87 COMMIT_BATCH();
88 }
89 }
90 }
91 }
92 }
93
94 static void r700SendTexSamplerState(GLcontext *ctx, struct radeon_state_atom *atom)
95 {
96 context_t *context = R700_CONTEXT(ctx);
97 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
98 unsigned int i;
99 BATCH_LOCALS(&context->radeon);
100 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
101
102 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
103 if (ctx->Texture.Unit[i]._ReallyEnabled) {
104 radeonTexObj *t = r700->textures[i];
105 if (t) {
106 BEGIN_BATCH_NO_AUTOSTATE(5);
107 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, 3));
108 R600_OUT_BATCH(i * 3);
109 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER0);
110 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER1);
111 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER2);
112 END_BATCH();
113 COMMIT_BATCH();
114 }
115 }
116 }
117 }
118
119 static void r700SendTexBorderColorState(GLcontext *ctx, struct radeon_state_atom *atom)
120 {
121 context_t *context = R700_CONTEXT(ctx);
122 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
123 unsigned int i;
124 BATCH_LOCALS(&context->radeon);
125 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
126
127 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
128 if (ctx->Texture.Unit[i]._ReallyEnabled) {
129 radeonTexObj *t = r700->textures[i];
130 if (t) {
131 BEGIN_BATCH_NO_AUTOSTATE(2 + 4);
132 R600_OUT_BATCH_REGSEQ((TD_PS_SAMPLER0_BORDER_RED + (i * 16)), 4);
133 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_RED);
134 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_GREEN);
135 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_BLUE);
136 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_ALPHA);
137 END_BATCH();
138 COMMIT_BATCH();
139 }
140 }
141 }
142 }
143
144 extern int getTypeSize(GLenum type);
145 static void r700SetupVTXConstants(GLcontext * ctx,
146 void * pAos,
147 StreamDesc * pStreamDesc)
148 {
149 context_t *context = R700_CONTEXT(ctx);
150 struct radeon_aos * paos = (struct radeon_aos *)pAos;
151 unsigned int nVBsize;
152 BATCH_LOCALS(&context->radeon);
153
154 unsigned int uSQ_VTX_CONSTANT_WORD0_0;
155 unsigned int uSQ_VTX_CONSTANT_WORD1_0;
156 unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
157 unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
158 unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;
159
160 if (!paos->bo)
161 return;
162
163 if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
164 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
165 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
166 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS880) ||
167 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
168 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
169 else
170 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);
171
172 if(0 == pStreamDesc->stride)
173 {
174 nVBsize = paos->count * pStreamDesc->size * getTypeSize(pStreamDesc->type);
175 }
176 else
177 {
178 nVBsize = paos->count * pStreamDesc->stride;
179 }
180
181 uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
182 uSQ_VTX_CONSTANT_WORD1_0 = nVBsize - 1;
183
184 SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
185 SETfield(uSQ_VTX_CONSTANT_WORD2_0, pStreamDesc->stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
186 SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
187 SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(pStreamDesc->type, pStreamDesc->size, NULL),
188 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
189 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
190
191 if(GL_TRUE == pStreamDesc->normalize)
192 {
193 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_NORM,
194 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
195 }
196 //else
197 //{
198 // SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_INT,
199 // SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
200 //}
201
202 if(1 == pStreamDesc->_signed)
203 {
204 SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
205 }
206
207 SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
208 SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
209 SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
210
211 BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
212
213 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
214 R600_OUT_BATCH((pStreamDesc->element + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
215 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
216 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
217 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
218 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
219 R600_OUT_BATCH(0);
220 R600_OUT_BATCH(0);
221 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
222 R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
223 paos->bo,
224 uSQ_VTX_CONSTANT_WORD0_0,
225 RADEON_GEM_DOMAIN_GTT, 0, 0);
226 END_BATCH();
227 COMMIT_BATCH();
228
229 }
230
231 static void r700SendVTXState(GLcontext *ctx, struct radeon_state_atom *atom)
232 {
233 context_t *context = R700_CONTEXT(ctx);
234 struct r700_vertex_program *vp = context->selected_vp;
235 unsigned int i, j = 0;
236 BATCH_LOCALS(&context->radeon);
237 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
238
239 if (context->radeon.tcl.aos_count == 0)
240 return;
241
242 BEGIN_BATCH_NO_AUTOSTATE(6);
243 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
244 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
245 R600_OUT_BATCH(0);
246
247 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
248 R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
249 R600_OUT_BATCH(0);
250 END_BATCH();
251 COMMIT_BATCH();
252
253 for(i=0; i<VERT_ATTRIB_MAX; i++) {
254 if(vp->mesa_program->Base.InputsRead & (1 << i))
255 {
256 r700SetupVTXConstants(ctx,
257 (void*)(&context->radeon.tcl.aos[j]),
258 &(context->stream_desc[j]));
259 j++;
260 }
261 }
262 }
263
264 static void r700SetRenderTarget(context_t *context, int id)
265 {
266 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
267
268 struct radeon_renderbuffer *rrb;
269 unsigned int nPitchInPixel;
270
271 rrb = radeon_get_colorbuffer(&context->radeon);
272 if (!rrb || !rrb->bo) {
273 return;
274 }
275
276 R600_STATECHANGE(context, cb_target);
277
278 /* color buffer */
279 r700->render_target[id].CB_COLOR0_BASE.u32All = context->radeon.state.color.draw_offset;
280
281 nPitchInPixel = rrb->pitch/rrb->cpp;
282 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, (nPitchInPixel/8)-1,
283 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
284 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
285 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask);
286 r700->render_target[id].CB_COLOR0_BASE.u32All = 0;
287 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ENDIAN_NONE, ENDIAN_shift, ENDIAN_mask);
288 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_LINEAR_GENERAL,
289 CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
290 if(4 == rrb->cpp)
291 {
292 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_8_8_8_8,
293 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
294 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT, COMP_SWAP_shift, COMP_SWAP_mask);
295 }
296 else
297 {
298 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_5_6_5,
299 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
300 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT_REV,
301 COMP_SWAP_shift, COMP_SWAP_mask);
302 }
303 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
304 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_CLAMP_bit);
305 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, NUMBER_UNORM, NUMBER_TYPE_shift, NUMBER_TYPE_mask);
306
307 r700->render_target[id].enabled = GL_TRUE;
308 }
309
310 static void r700SetDepthTarget(context_t *context)
311 {
312 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
313
314 struct radeon_renderbuffer *rrb;
315 unsigned int nPitchInPixel;
316
317 rrb = radeon_get_depthbuffer(&context->radeon);
318 if (!rrb)
319 return;
320
321 R600_STATECHANGE(context, db_target);
322
323 /* depth buf */
324 r700->DB_DEPTH_SIZE.u32All = 0;
325 r700->DB_DEPTH_BASE.u32All = 0;
326 r700->DB_DEPTH_INFO.u32All = 0;
327 r700->DB_DEPTH_VIEW.u32All = 0;
328
329 nPitchInPixel = rrb->pitch/rrb->cpp;
330
331 SETfield(r700->DB_DEPTH_SIZE.u32All, (nPitchInPixel/8)-1,
332 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
333 SETfield(r700->DB_DEPTH_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
334 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask); /* size in pixel / 64 - 1 */
335
336 if(4 == rrb->cpp)
337 {
338 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_8_24,
339 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
340 }
341 else
342 {
343 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_16,
344 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
345 }
346 SETfield(r700->DB_DEPTH_INFO.u32All, ARRAY_1D_TILED_THIN1,
347 DB_DEPTH_INFO__ARRAY_MODE_shift, DB_DEPTH_INFO__ARRAY_MODE_mask);
348 /* r700->DB_PREFETCH_LIMIT.bits.DEPTH_HEIGHT_TILE_MAX = (context->currentDraw->h >> 3) - 1; */ /* z buffer sie may much bigger than what need, so use actual used h. */
349 }
350
351 static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
352 {
353 context_t *context = R700_CONTEXT(ctx);
354 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
355 struct radeon_renderbuffer *rrb;
356 BATCH_LOCALS(&context->radeon);
357 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
358
359 rrb = radeon_get_depthbuffer(&context->radeon);
360 if (!rrb || !rrb->bo) {
361 fprintf(stderr, "no rrb\n");
362 return;
363 }
364
365 r700SetDepthTarget(context);
366
367 BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
368 R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
369 R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
370 R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
371 R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
372 R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
373 R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
374 R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
375 rrb->bo,
376 r700->DB_DEPTH_BASE.u32All,
377 0, RADEON_GEM_DOMAIN_VRAM, 0);
378 END_BATCH();
379
380 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
381 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
382 BEGIN_BATCH_NO_AUTOSTATE(2);
383 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
384 R600_OUT_BATCH(1 << 0);
385 END_BATCH();
386 }
387
388 COMMIT_BATCH();
389
390 }
391
392 static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
393 {
394 context_t *context = R700_CONTEXT(ctx);
395 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
396 struct radeon_renderbuffer *rrb;
397 BATCH_LOCALS(&context->radeon);
398 int id = 0;
399 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
400
401 rrb = radeon_get_colorbuffer(&context->radeon);
402 if (!rrb || !rrb->bo) {
403 fprintf(stderr, "no rrb\n");
404 return;
405 }
406
407 r700SetRenderTarget(context, 0);
408
409 if (id > R700_MAX_RENDER_TARGETS)
410 return;
411
412 if (!r700->render_target[id].enabled)
413 return;
414
415 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
416 R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
417 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
418 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
419 rrb->bo,
420 r700->render_target[id].CB_COLOR0_BASE.u32All,
421 0, RADEON_GEM_DOMAIN_VRAM, 0);
422 END_BATCH();
423
424 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
425 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
426 BEGIN_BATCH_NO_AUTOSTATE(2);
427 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
428 R600_OUT_BATCH((2 << id));
429 END_BATCH();
430 }
431
432 BEGIN_BATCH_NO_AUTOSTATE(18);
433 R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
434 R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
435 R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
436 R600_OUT_BATCH_REGVAL(CB_COLOR0_TILE + (4 * id), r700->render_target[id].CB_COLOR0_TILE.u32All);
437 R600_OUT_BATCH_REGVAL(CB_COLOR0_FRAG + (4 * id), r700->render_target[id].CB_COLOR0_FRAG.u32All);
438 R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
439 END_BATCH();
440
441 COMMIT_BATCH();
442
443 }
444
445 static void r700SendPSState(GLcontext *ctx, struct radeon_state_atom *atom)
446 {
447 context_t *context = R700_CONTEXT(ctx);
448 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
449 struct radeon_bo * pbo;
450 BATCH_LOCALS(&context->radeon);
451 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
452
453 pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));
454
455 if (!pbo)
456 return;
457
458 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
459
460 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
461 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
462 R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
463 R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
464 pbo,
465 r700->ps.SQ_PGM_START_PS.u32All,
466 RADEON_GEM_DOMAIN_GTT, 0, 0);
467 END_BATCH();
468
469 BEGIN_BATCH_NO_AUTOSTATE(9);
470 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
471 R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
472 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
473 END_BATCH();
474
475 COMMIT_BATCH();
476
477 }
478
479 static void r700SendVSState(GLcontext *ctx, struct radeon_state_atom *atom)
480 {
481 context_t *context = R700_CONTEXT(ctx);
482 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
483 struct radeon_bo * pbo;
484 BATCH_LOCALS(&context->radeon);
485 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
486
487 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
488
489 if (!pbo)
490 return;
491
492 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
493
494 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
495 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_VS, 1);
496 R600_OUT_BATCH(r700->vs.SQ_PGM_START_VS.u32All);
497 R600_OUT_BATCH_RELOC(r700->vs.SQ_PGM_START_VS.u32All,
498 pbo,
499 r700->vs.SQ_PGM_START_VS.u32All,
500 RADEON_GEM_DOMAIN_GTT, 0, 0);
501 END_BATCH();
502
503 BEGIN_BATCH_NO_AUTOSTATE(6);
504 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_VS, r700->vs.SQ_PGM_RESOURCES_VS.u32All);
505 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_VS, r700->vs.SQ_PGM_CF_OFFSET_VS.u32All);
506 END_BATCH();
507
508 COMMIT_BATCH();
509 }
510
511 static void r700SendFSState(GLcontext *ctx, struct radeon_state_atom *atom)
512 {
513 context_t *context = R700_CONTEXT(ctx);
514 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
515 struct radeon_bo * pbo;
516 BATCH_LOCALS(&context->radeon);
517 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
518
519 /* XXX fixme
520 * R6xx chips require a FS be emitted, even if it's not used.
521 * since we aren't using FS yet, just send the VS address to make
522 * the kernel command checker happy
523 */
524 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
525 r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
526 r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
527 r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
528 /* XXX */
529
530 if (!pbo)
531 return;
532
533 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
534
535 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
536 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
537 R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
538 R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
539 pbo,
540 r700->fs.SQ_PGM_START_FS.u32All,
541 RADEON_GEM_DOMAIN_GTT, 0, 0);
542 END_BATCH();
543
544 BEGIN_BATCH_NO_AUTOSTATE(6);
545 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
546 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
547 END_BATCH();
548
549 COMMIT_BATCH();
550
551 }
552
553 static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
554 {
555 context_t *context = R700_CONTEXT(ctx);
556 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
557 BATCH_LOCALS(&context->radeon);
558 int id = 0;
559 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
560
561 if (id > R700_MAX_VIEWPORTS)
562 return;
563
564 if (!r700->viewport[id].enabled)
565 return;
566
567 BEGIN_BATCH_NO_AUTOSTATE(16);
568 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
569 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
570 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
571 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
572 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
573 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
574 R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
575 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
576 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
577 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
578 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
579 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
580 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
581 END_BATCH();
582
583 COMMIT_BATCH();
584
585 }
586
587 static void r700SendSQConfig(GLcontext *ctx, struct radeon_state_atom *atom)
588 {
589 context_t *context = R700_CONTEXT(ctx);
590 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
591 BATCH_LOCALS(&context->radeon);
592 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
593
594 BEGIN_BATCH_NO_AUTOSTATE(34);
595 R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
596 R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
597 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
598 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_2.u32All);
599 R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
600 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
601 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);
602
603 R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
604 R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
605 R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
606 R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
607 R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);
608
609 R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
610 R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
611 R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
612 R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
613 R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
614 R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
615 R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
616 R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
617 R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
618 R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
619 END_BATCH();
620
621 COMMIT_BATCH();
622 }
623
624 static void r700SendUCPState(GLcontext *ctx, struct radeon_state_atom *atom)
625 {
626 context_t *context = R700_CONTEXT(ctx);
627 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
628 BATCH_LOCALS(&context->radeon);
629 int i;
630 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
631
632 for (i = 0; i < R700_MAX_UCP; i++) {
633 if (r700->ucp[i].enabled) {
634 BEGIN_BATCH_NO_AUTOSTATE(6);
635 R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
636 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
637 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Y.u32All);
638 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Z.u32All);
639 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
640 END_BATCH();
641 COMMIT_BATCH();
642 }
643 }
644 }
645
646 static void r700SendSPIState(GLcontext *ctx, struct radeon_state_atom *atom)
647 {
648 context_t *context = R700_CONTEXT(ctx);
649 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
650 BATCH_LOCALS(&context->radeon);
651 unsigned int ui;
652 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
653
654 BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);
655
656 R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
657 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
658 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
659 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
660 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
661 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
662 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
663 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
664 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
665 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
666 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
667 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
668 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
669 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
670 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
671 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
672 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
673 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
674 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
675 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
676 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
677 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
678 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
679 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
680 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
681 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
682 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
683 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
684 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
685 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
686 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
687 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
688 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);
689
690 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
691 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
692 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
693 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
694 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
695 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
696 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
697 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
698 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
699 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
700 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);
701
702 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
703 R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
704 R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
705 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
706 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
707 R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
708 R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
709 R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
710 R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
711 R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);
712
713 R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
714 for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
715 R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
716
717 END_BATCH();
718 COMMIT_BATCH();
719 }
720
721 static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom)
722 {
723 context_t *context = R700_CONTEXT(ctx);
724 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
725 BATCH_LOCALS(&context->radeon);
726 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
727
728 BEGIN_BATCH_NO_AUTOSTATE(41);
729
730 R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
731 R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
732 R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
733 R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
734 R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);
735
736 R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
737 R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
738 R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
739 R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
740 R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
741 R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
742 R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
743 R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
744 R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
745 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
746 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
747 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
748 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
749 R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);
750
751 R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
752 R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
753 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
754 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);
755
756 R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
757 R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
758 R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
759 R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);
760
761 R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);
762
763 END_BATCH();
764 COMMIT_BATCH();
765 }
766
767 static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom)
768 {
769 context_t *context = R700_CONTEXT(ctx);
770 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
771 BATCH_LOCALS(&context->radeon);
772 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
773
774 BEGIN_BATCH_NO_AUTOSTATE(9);
775 R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
776 R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
777 R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
778 END_BATCH();
779 COMMIT_BATCH();
780 }
781
782 static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
783 {
784 context_t *context = R700_CONTEXT(ctx);
785 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
786 BATCH_LOCALS(&context->radeon);
787 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
788
789 BEGIN_BATCH_NO_AUTOSTATE(23);
790 R600_OUT_BATCH_REGVAL(DB_HTILE_DATA_BASE, r700->DB_HTILE_DATA_BASE.u32All);
791
792 R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
793 R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
794 R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);
795
796 R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
797 R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);
798
799 R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
800 R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
801 R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);
802
803 R600_OUT_BATCH_REGVAL(DB_HTILE_SURFACE, r700->DB_HTILE_SURFACE.u32All);
804 R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);
805
806 END_BATCH();
807 COMMIT_BATCH();
808 }
809
810 static void r700SendStencilState(GLcontext *ctx, struct radeon_state_atom *atom)
811 {
812 context_t *context = R700_CONTEXT(ctx);
813 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
814 BATCH_LOCALS(&context->radeon);
815
816 BEGIN_BATCH_NO_AUTOSTATE(4);
817 R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
818 R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
819 R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
820 END_BATCH();
821 COMMIT_BATCH();
822 }
823
824 static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
825 {
826 context_t *context = R700_CONTEXT(ctx);
827 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
828 BATCH_LOCALS(&context->radeon);
829 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
830
831 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
832 BEGIN_BATCH_NO_AUTOSTATE(11);
833 R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
834 R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
835 R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
836 R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
837 R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
838 R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
839 R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
840 R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
841 R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
842 END_BATCH();
843 }
844
845 BEGIN_BATCH_NO_AUTOSTATE(7);
846 R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
847 R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
848 R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
849 R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
850 END_BATCH();
851 COMMIT_BATCH();
852 }
853
854 static void r700SendCBCLRCMPState(GLcontext *ctx, struct radeon_state_atom *atom)
855 {
856 context_t *context = R700_CONTEXT(ctx);
857 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
858 BATCH_LOCALS(&context->radeon);
859
860 BEGIN_BATCH_NO_AUTOSTATE(6);
861 R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
862 R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
863 R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
864 R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
865 R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
866 END_BATCH();
867 COMMIT_BATCH();
868 }
869
870 static void r700SendCBBlendState(GLcontext *ctx, struct radeon_state_atom *atom)
871 {
872 context_t *context = R700_CONTEXT(ctx);
873 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
874 BATCH_LOCALS(&context->radeon);
875 unsigned int ui;
876 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
877
878 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
879 BEGIN_BATCH_NO_AUTOSTATE(3);
880 R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
881 END_BATCH();
882 }
883
884 BEGIN_BATCH_NO_AUTOSTATE(3);
885 R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
886 END_BATCH();
887
888 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
889 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
890 if (r700->render_target[ui].enabled) {
891 BEGIN_BATCH_NO_AUTOSTATE(3);
892 R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
893 r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
894 END_BATCH();
895 }
896 }
897 }
898
899 COMMIT_BATCH();
900 }
901
902 static void r700SendCBBlendColorState(GLcontext *ctx, struct radeon_state_atom *atom)
903 {
904 context_t *context = R700_CONTEXT(ctx);
905 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
906 BATCH_LOCALS(&context->radeon);
907 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
908
909 BEGIN_BATCH_NO_AUTOSTATE(6);
910 R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
911 R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
912 R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
913 R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
914 R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
915 END_BATCH();
916 COMMIT_BATCH();
917 }
918
919 static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom)
920 {
921 context_t *context = R700_CONTEXT(ctx);
922 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
923 BATCH_LOCALS(&context->radeon);
924
925 BEGIN_BATCH_NO_AUTOSTATE(9);
926 R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
927 R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
928 R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
929 R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
930 R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
931 R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
932 END_BATCH();
933 COMMIT_BATCH();
934
935 }
936
937 static void r700SendPolyState(GLcontext *ctx, struct radeon_state_atom *atom)
938 {
939 context_t *context = R700_CONTEXT(ctx);
940 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
941 BATCH_LOCALS(&context->radeon);
942
943 BEGIN_BATCH_NO_AUTOSTATE(10);
944 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
945 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
946 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
947 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
948 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
949 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
950 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
951 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
952 END_BATCH();
953 COMMIT_BATCH();
954
955 }
956
957 static void r700SendCLState(GLcontext *ctx, struct radeon_state_atom *atom)
958 {
959 context_t *context = R700_CONTEXT(ctx);
960 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
961 BATCH_LOCALS(&context->radeon);
962 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
963
964 BEGIN_BATCH_NO_AUTOSTATE(12);
965 R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All);
966 R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All);
967 R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All);
968 R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All);
969 END_BATCH();
970 COMMIT_BATCH();
971 }
972
973 static void r700SendGBState(GLcontext *ctx, struct radeon_state_atom *atom)
974 {
975 context_t *context = R700_CONTEXT(ctx);
976 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
977 BATCH_LOCALS(&context->radeon);
978
979 BEGIN_BATCH_NO_AUTOSTATE(6);
980 R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
981 R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
982 R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
983 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
984 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
985 END_BATCH();
986 COMMIT_BATCH();
987 }
988
989 static void r700SendScissorState(GLcontext *ctx, struct radeon_state_atom *atom)
990 {
991 context_t *context = R700_CONTEXT(ctx);
992 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
993 BATCH_LOCALS(&context->radeon);
994 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
995
996 BEGIN_BATCH_NO_AUTOSTATE(22);
997 R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
998 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
999 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);
1000
1001 R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12);
1002 R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
1003 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
1004 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
1005 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
1006 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
1007 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
1008 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
1009 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
1010 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
1011 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
1012 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
1013 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);
1014
1015 R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
1016 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
1017 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
1018 END_BATCH();
1019 COMMIT_BATCH();
1020 }
1021
1022 static void r700SendSCState(GLcontext *ctx, struct radeon_state_atom *atom)
1023 {
1024 context_t *context = R700_CONTEXT(ctx);
1025 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1026 BATCH_LOCALS(&context->radeon);
1027 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1028
1029 BEGIN_BATCH_NO_AUTOSTATE(15);
1030 R600_OUT_BATCH_REGVAL(R7xx_PA_SC_EDGERULE, r700->PA_SC_EDGERULE.u32All);
1031 R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All);
1032 R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All);
1033 R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All);
1034 R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All);
1035 END_BATCH();
1036 COMMIT_BATCH();
1037 }
1038
1039 static void r700SendAAState(GLcontext *ctx, struct radeon_state_atom *atom)
1040 {
1041 context_t *context = R700_CONTEXT(ctx);
1042 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1043 BATCH_LOCALS(&context->radeon);
1044
1045 BEGIN_BATCH_NO_AUTOSTATE(12);
1046 R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
1047 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
1048 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
1049 R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
1050 END_BATCH();
1051 COMMIT_BATCH();
1052 }
1053
1054 static void r700SendPSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1055 {
1056 context_t *context = R700_CONTEXT(ctx);
1057 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1058 int i;
1059 BATCH_LOCALS(&context->radeon);
1060
1061 if (r700->ps.num_consts == 0)
1062 return;
1063
1064 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->ps.num_consts * 4));
1065 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->ps.num_consts * 4)));
1066 /* assembler map const from very beginning. */
1067 R600_OUT_BATCH(SQ_ALU_CONSTANT_PS_OFFSET * 4);
1068 for (i = 0; i < r700->ps.num_consts; i++) {
1069 R600_OUT_BATCH(r700->ps.consts[i][0].u32All);
1070 R600_OUT_BATCH(r700->ps.consts[i][1].u32All);
1071 R600_OUT_BATCH(r700->ps.consts[i][2].u32All);
1072 R600_OUT_BATCH(r700->ps.consts[i][3].u32All);
1073 }
1074 END_BATCH();
1075 COMMIT_BATCH();
1076 }
1077
1078 static void r700SendVSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1079 {
1080 context_t *context = R700_CONTEXT(ctx);
1081 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1082 int i;
1083 BATCH_LOCALS(&context->radeon);
1084 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1085
1086 if (r700->vs.num_consts == 0)
1087 return;
1088
1089 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->vs.num_consts * 4));
1090 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->vs.num_consts * 4)));
1091 /* assembler map const from very beginning. */
1092 R600_OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
1093 for (i = 0; i < r700->vs.num_consts; i++) {
1094 R600_OUT_BATCH(r700->vs.consts[i][0].u32All);
1095 R600_OUT_BATCH(r700->vs.consts[i][1].u32All);
1096 R600_OUT_BATCH(r700->vs.consts[i][2].u32All);
1097 R600_OUT_BATCH(r700->vs.consts[i][3].u32All);
1098 }
1099 END_BATCH();
1100 COMMIT_BATCH();
1101 }
1102
1103 static int check_always(GLcontext *ctx, struct radeon_state_atom *atom)
1104 {
1105 return atom->cmd_size;
1106 }
1107
1108 static int check_cb(GLcontext *ctx, struct radeon_state_atom *atom)
1109 {
1110 context_t *context = R700_CONTEXT(ctx);
1111 int count = 7;
1112
1113 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1114 count += 11;
1115 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1116
1117 return count;
1118 }
1119
1120 static int check_blnd(GLcontext *ctx, struct radeon_state_atom *atom)
1121 {
1122 context_t *context = R700_CONTEXT(ctx);
1123 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1124 unsigned int ui;
1125 int count = 3;
1126
1127 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1128 count += 3;
1129
1130 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
1131 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
1132 if (r700->render_target[ui].enabled)
1133 count += 3;
1134 }
1135 }
1136 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1137
1138 return count;
1139 }
1140
1141 static int check_ucp(GLcontext *ctx, struct radeon_state_atom *atom)
1142 {
1143 context_t *context = R700_CONTEXT(ctx);
1144 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1145 int i;
1146 int count = 0;
1147
1148 for (i = 0; i < R700_MAX_UCP; i++) {
1149 if (r700->ucp[i].enabled)
1150 count += 6;
1151 }
1152 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1153 return count;
1154 }
1155
1156 static int check_vtx(GLcontext *ctx, struct radeon_state_atom *atom)
1157 {
1158 context_t *context = R700_CONTEXT(ctx);
1159 int count = context->radeon.tcl.aos_count * 18;
1160
1161 if (count)
1162 count += 6;
1163
1164 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1165 return count;
1166 }
1167
1168 static int check_tx(GLcontext *ctx, struct radeon_state_atom *atom)
1169 {
1170 context_t *context = R700_CONTEXT(ctx);
1171 unsigned int i, count = 0;
1172 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1173
1174 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
1175 if (ctx->Texture.Unit[i]._ReallyEnabled) {
1176 radeonTexObj *t = r700->textures[i];
1177 if (t)
1178 count++;
1179 }
1180 }
1181 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1182 return count * 31;
1183 }
1184
1185 static int check_ps_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1186 {
1187 context_t *context = R700_CONTEXT(ctx);
1188 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1189 int count = r700->ps.num_consts * 4;
1190
1191 if (count)
1192 count += 2;
1193 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1194
1195 return count;
1196 }
1197
1198 static int check_vs_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1199 {
1200 context_t *context = R700_CONTEXT(ctx);
1201 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1202 int count = r700->vs.num_consts * 4;
1203
1204 if (count)
1205 count += 2;
1206 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1207
1208 return count;
1209 }
1210
1211 #define ALLOC_STATE( ATOM, CHK, SZ, EMIT ) \
1212 do { \
1213 context->atoms.ATOM.cmd_size = (SZ); \
1214 context->atoms.ATOM.cmd = NULL; \
1215 context->atoms.ATOM.name = #ATOM; \
1216 context->atoms.ATOM.idx = 0; \
1217 context->atoms.ATOM.check = check_##CHK; \
1218 context->atoms.ATOM.dirty = GL_FALSE; \
1219 context->atoms.ATOM.emit = (EMIT); \
1220 context->radeon.hw.max_state_size += (SZ); \
1221 insert_at_tail(&context->radeon.hw.atomlist, &context->atoms.ATOM); \
1222 } while (0)
1223
1224 void r600InitAtoms(context_t *context)
1225 {
1226 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %p\n", __func__, context);
1227 context->radeon.hw.max_state_size = 10 + 5 + 14; /* start 3d, idle, cb/db flush */
1228
1229 /* Setup the atom linked list */
1230 make_empty_list(&context->radeon.hw.atomlist);
1231 context->radeon.hw.atomlist.name = "atom-list";
1232
1233 ALLOC_STATE(sq, always, 34, r700SendSQConfig);
1234 ALLOC_STATE(db, always, 23, r700SendDBState);
1235 ALLOC_STATE(stencil, always, 4, r700SendStencilState);
1236 ALLOC_STATE(db_target, always, 12, r700SendDepthTargetState);
1237 ALLOC_STATE(sc, always, 15, r700SendSCState);
1238 ALLOC_STATE(scissor, always, 22, r700SendScissorState);
1239 ALLOC_STATE(aa, always, 12, r700SendAAState);
1240 ALLOC_STATE(cl, always, 12, r700SendCLState);
1241 ALLOC_STATE(gb, always, 6, r700SendGBState);
1242 ALLOC_STATE(ucp, ucp, (R700_MAX_UCP * 6), r700SendUCPState);
1243 ALLOC_STATE(su, always, 9, r700SendSUState);
1244 ALLOC_STATE(poly, always, 10, r700SendPolyState);
1245 ALLOC_STATE(cb, cb, 18, r700SendCBState);
1246 ALLOC_STATE(clrcmp, always, 6, r700SendCBCLRCMPState);
1247 ALLOC_STATE(blnd, blnd, (6 + (R700_MAX_RENDER_TARGETS * 3)), r700SendCBBlendState);
1248 ALLOC_STATE(blnd_clr, always, 6, r700SendCBBlendColorState);
1249 ALLOC_STATE(cb_target, always, 25, r700SendRenderTargetState);
1250 ALLOC_STATE(sx, always, 9, r700SendSXState);
1251 ALLOC_STATE(vgt, always, 41, r700SendVGTState);
1252 ALLOC_STATE(spi, always, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState);
1253 ALLOC_STATE(vpt, always, 16, r700SendViewportState);
1254 ALLOC_STATE(fs, always, 18, r700SendFSState);
1255 ALLOC_STATE(vs, always, 18, r700SendVSState);
1256 ALLOC_STATE(ps, always, 21, r700SendPSState);
1257 ALLOC_STATE(vs_consts, vs_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendVSConsts);
1258 ALLOC_STATE(ps_consts, ps_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendPSConsts);
1259 ALLOC_STATE(vtx, vtx, (6 + (VERT_ATTRIB_MAX * 18)), r700SendVTXState);
1260 ALLOC_STATE(tx, tx, (R700_TEXTURE_NUMBERUNITS * 20), r700SendTexState);
1261 ALLOC_STATE(tx_smplr, tx, (R700_TEXTURE_NUMBERUNITS * 5), r700SendTexSamplerState);
1262 ALLOC_STATE(tx_brdr_clr, tx, (R700_TEXTURE_NUMBERUNITS * 6), r700SendTexBorderColorState);
1263
1264 context->radeon.hw.is_dirty = GL_TRUE;
1265 context->radeon.hw.all_dirty = GL_TRUE;
1266 }