r600: fix state size prediction after dc0777d3
[mesa.git] / src / mesa / drivers / dri / r600 / r700_chip.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/imports.h"
29 #include "main/glheader.h"
30 #include "main/simple_list.h"
31
32 #include "r600_context.h"
33 #include "r600_cmdbuf.h"
34
35 #include "r700_state.h"
36 #include "r600_tex.h"
37 #include "r700_oglprog.h"
38 #include "r700_fragprog.h"
39 #include "r700_vertprog.h"
40 #include "r700_ioctl.h"
41
42 #include "radeon_mipmap_tree.h"
43
44 static void r700SendTexState(GLcontext *ctx, struct radeon_state_atom *atom)
45 {
46 context_t *context = R700_CONTEXT(ctx);
47 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
48 struct radeon_bo *bo = NULL;
49 unsigned int i;
50 BATCH_LOCALS(&context->radeon);
51
52 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
53
54 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
55 if (ctx->Texture.Unit[i]._ReallyEnabled) {
56 radeonTexObj *t = r700->textures[i];
57 if (t) {
58 if (!t->image_override)
59 bo = t->mt->bo;
60 else
61 bo = t->bo;
62 if (bo) {
63
64 r700SyncSurf(context, bo,
65 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
66 0, TC_ACTION_ENA_bit);
67
68 BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
69 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
70 R600_OUT_BATCH(i * 7);
71 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
72 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
73 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE2);
74 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
75 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
76 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
77 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
78 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
79 bo,
80 0,
81 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
82 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE3,
83 bo,
84 r700->textures[i]->SQ_TEX_RESOURCE3,
85 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
86 END_BATCH();
87 COMMIT_BATCH();
88 }
89 }
90 }
91 }
92 }
93
94 static void r700SendTexSamplerState(GLcontext *ctx, struct radeon_state_atom *atom)
95 {
96 context_t *context = R700_CONTEXT(ctx);
97 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
98 unsigned int i;
99 BATCH_LOCALS(&context->radeon);
100 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
101
102 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
103 if (ctx->Texture.Unit[i]._ReallyEnabled) {
104 radeonTexObj *t = r700->textures[i];
105 if (t) {
106 BEGIN_BATCH_NO_AUTOSTATE(5);
107 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, 3));
108 R600_OUT_BATCH(i * 3);
109 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER0);
110 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER1);
111 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER2);
112 END_BATCH();
113 COMMIT_BATCH();
114 }
115 }
116 }
117 }
118
119 static void r700SendTexBorderColorState(GLcontext *ctx, struct radeon_state_atom *atom)
120 {
121 context_t *context = R700_CONTEXT(ctx);
122 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
123 unsigned int i;
124 BATCH_LOCALS(&context->radeon);
125 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
126
127 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
128 if (ctx->Texture.Unit[i]._ReallyEnabled) {
129 radeonTexObj *t = r700->textures[i];
130 if (t) {
131 BEGIN_BATCH_NO_AUTOSTATE(2 + 4);
132 R600_OUT_BATCH_REGSEQ((TD_PS_SAMPLER0_BORDER_RED + (i * 16)), 4);
133 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_RED);
134 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_GREEN);
135 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_BLUE);
136 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_ALPHA);
137 END_BATCH();
138 COMMIT_BATCH();
139 }
140 }
141 }
142 }
143
144 static void r700SetupVTXConstants(GLcontext * ctx,
145 unsigned int nStreamID,
146 void * pAos,
147 unsigned int size, /* number of elements in vector */
148 unsigned int stride,
149 unsigned int count) /* number of vectors in stream */
150 {
151 context_t *context = R700_CONTEXT(ctx);
152 struct radeon_aos * paos = (struct radeon_aos *)pAos;
153 BATCH_LOCALS(&context->radeon);
154 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
155
156 unsigned int uSQ_VTX_CONSTANT_WORD0_0;
157 unsigned int uSQ_VTX_CONSTANT_WORD1_0;
158 unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
159 unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
160 unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;
161
162 if (!paos->bo)
163 return;
164
165 if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
166 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
167 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
168 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS880) ||
169 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
170 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
171 else
172 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);
173
174 uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
175 uSQ_VTX_CONSTANT_WORD1_0 = count * (size * 4) - 1;
176
177 SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
178 SETfield(uSQ_VTX_CONSTANT_WORD2_0, stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
179 SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
180 SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(GL_FLOAT, size, NULL),
181 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
182 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
183 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_SCALED,
184 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
185 SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
186
187 SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
188 SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
189 SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
190
191 BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
192
193 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
194 R600_OUT_BATCH((nStreamID + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
195 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
196 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
197 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
198 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
199 R600_OUT_BATCH(0);
200 R600_OUT_BATCH(0);
201 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
202 R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
203 paos->bo,
204 uSQ_VTX_CONSTANT_WORD0_0,
205 RADEON_GEM_DOMAIN_GTT, 0, 0);
206 END_BATCH();
207 COMMIT_BATCH();
208
209 }
210
211 void r700SetupStreams(GLcontext *ctx)
212 {
213 context_t *context = R700_CONTEXT(ctx);
214 struct r700_vertex_program *vp = context->selected_vp;
215 TNLcontext *tnl = TNL_CONTEXT(ctx);
216 struct vertex_buffer *vb = &tnl->vb;
217 unsigned int i, j = 0;
218 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
219
220 R600_STATECHANGE(context, vtx);
221
222 for(i=0; i<VERT_ATTRIB_MAX; i++) {
223 if(vp->mesa_program->Base.InputsRead & (1 << i)) {
224 rcommon_emit_vector(ctx,
225 &context->radeon.tcl.aos[j],
226 vb->AttribPtr[i]->data,
227 vb->AttribPtr[i]->size,
228 vb->AttribPtr[i]->stride,
229 vb->Count);
230 j++;
231 }
232 }
233 context->radeon.tcl.aos_count = j;
234 }
235
236 static void r700SendVTXState(GLcontext *ctx, struct radeon_state_atom *atom)
237 {
238 context_t *context = R700_CONTEXT(ctx);
239 struct r700_vertex_program *vp = context->selected_vp;
240 unsigned int i, j = 0;
241 BATCH_LOCALS(&context->radeon);
242 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
243
244 if (context->radeon.tcl.aos_count == 0)
245 return;
246
247 BEGIN_BATCH_NO_AUTOSTATE(6);
248 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
249 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
250 R600_OUT_BATCH(0);
251
252 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
253 R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
254 R600_OUT_BATCH(0);
255 END_BATCH();
256 COMMIT_BATCH();
257
258 for(i=0; i<VERT_ATTRIB_MAX; i++) {
259 if(vp->mesa_program->Base.InputsRead & (1 << i)) {
260 /* currently aos are packed */
261 r700SetupVTXConstants(ctx,
262 i,
263 (void*)(&context->radeon.tcl.aos[j]),
264 (unsigned int)context->radeon.tcl.aos[j].components,
265 (unsigned int)context->radeon.tcl.aos[j].stride * 4,
266 (unsigned int)context->radeon.tcl.aos[j].count);
267 j++;
268 }
269 }
270 }
271
272 static void r700SetRenderTarget(context_t *context, int id)
273 {
274 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
275
276 struct radeon_renderbuffer *rrb;
277 unsigned int nPitchInPixel;
278
279 rrb = radeon_get_colorbuffer(&context->radeon);
280 if (!rrb || !rrb->bo) {
281 return;
282 }
283
284 R600_STATECHANGE(context, cb_target);
285
286 /* color buffer */
287 r700->render_target[id].CB_COLOR0_BASE.u32All = context->radeon.state.color.draw_offset;
288
289 nPitchInPixel = rrb->pitch/rrb->cpp;
290 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, (nPitchInPixel/8)-1,
291 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
292 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
293 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask);
294 r700->render_target[id].CB_COLOR0_BASE.u32All = 0;
295 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ENDIAN_NONE, ENDIAN_shift, ENDIAN_mask);
296 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_LINEAR_GENERAL,
297 CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
298 if(4 == rrb->cpp)
299 {
300 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_8_8_8_8,
301 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
302 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT, COMP_SWAP_shift, COMP_SWAP_mask);
303 }
304 else
305 {
306 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_5_6_5,
307 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
308 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT_REV,
309 COMP_SWAP_shift, COMP_SWAP_mask);
310 }
311 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
312 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_CLAMP_bit);
313 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, NUMBER_UNORM, NUMBER_TYPE_shift, NUMBER_TYPE_mask);
314
315 r700->render_target[id].enabled = GL_TRUE;
316 }
317
318 static void r700SetDepthTarget(context_t *context)
319 {
320 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
321
322 struct radeon_renderbuffer *rrb;
323 unsigned int nPitchInPixel;
324
325 rrb = radeon_get_depthbuffer(&context->radeon);
326 if (!rrb)
327 return;
328
329 R600_STATECHANGE(context, db_target);
330
331 /* depth buf */
332 r700->DB_DEPTH_SIZE.u32All = 0;
333 r700->DB_DEPTH_BASE.u32All = 0;
334 r700->DB_DEPTH_INFO.u32All = 0;
335 r700->DB_DEPTH_VIEW.u32All = 0;
336
337 nPitchInPixel = rrb->pitch/rrb->cpp;
338
339 SETfield(r700->DB_DEPTH_SIZE.u32All, (nPitchInPixel/8)-1,
340 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
341 SETfield(r700->DB_DEPTH_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
342 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask); /* size in pixel / 64 - 1 */
343
344 if(4 == rrb->cpp)
345 {
346 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_8_24,
347 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
348 }
349 else
350 {
351 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_16,
352 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
353 }
354 SETfield(r700->DB_DEPTH_INFO.u32All, ARRAY_1D_TILED_THIN1,
355 DB_DEPTH_INFO__ARRAY_MODE_shift, DB_DEPTH_INFO__ARRAY_MODE_mask);
356 /* r700->DB_PREFETCH_LIMIT.bits.DEPTH_HEIGHT_TILE_MAX = (context->currentDraw->h >> 3) - 1; */ /* z buffer sie may much bigger than what need, so use actual used h. */
357 }
358
359 static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
360 {
361 context_t *context = R700_CONTEXT(ctx);
362 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
363 struct radeon_renderbuffer *rrb;
364 BATCH_LOCALS(&context->radeon);
365 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
366
367 rrb = radeon_get_depthbuffer(&context->radeon);
368 if (!rrb || !rrb->bo) {
369 fprintf(stderr, "no rrb\n");
370 return;
371 }
372
373 r700SetDepthTarget(context);
374
375 BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
376 R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
377 R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
378 R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
379 R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
380 R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
381 R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
382 R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
383 rrb->bo,
384 r700->DB_DEPTH_BASE.u32All,
385 0, RADEON_GEM_DOMAIN_VRAM, 0);
386 END_BATCH();
387
388 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
389 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
390 BEGIN_BATCH_NO_AUTOSTATE(2);
391 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
392 R600_OUT_BATCH(1 << 0);
393 END_BATCH();
394 }
395
396 COMMIT_BATCH();
397
398 }
399
400 static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
401 {
402 context_t *context = R700_CONTEXT(ctx);
403 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
404 struct radeon_renderbuffer *rrb;
405 BATCH_LOCALS(&context->radeon);
406 int id = 0;
407 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
408
409 rrb = radeon_get_colorbuffer(&context->radeon);
410 if (!rrb || !rrb->bo) {
411 fprintf(stderr, "no rrb\n");
412 return;
413 }
414
415 r700SetRenderTarget(context, 0);
416
417 if (id > R700_MAX_RENDER_TARGETS)
418 return;
419
420 if (!r700->render_target[id].enabled)
421 return;
422
423 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
424 R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
425 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
426 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
427 rrb->bo,
428 r700->render_target[id].CB_COLOR0_BASE.u32All,
429 0, RADEON_GEM_DOMAIN_VRAM, 0);
430 END_BATCH();
431
432 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
433 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
434 BEGIN_BATCH_NO_AUTOSTATE(2);
435 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
436 R600_OUT_BATCH((2 << id));
437 END_BATCH();
438 }
439
440 BEGIN_BATCH_NO_AUTOSTATE(18);
441 R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
442 R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
443 R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
444 R600_OUT_BATCH_REGVAL(CB_COLOR0_TILE + (4 * id), r700->render_target[id].CB_COLOR0_TILE.u32All);
445 R600_OUT_BATCH_REGVAL(CB_COLOR0_FRAG + (4 * id), r700->render_target[id].CB_COLOR0_FRAG.u32All);
446 R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
447 END_BATCH();
448
449 COMMIT_BATCH();
450
451 }
452
453 static void r700SendPSState(GLcontext *ctx, struct radeon_state_atom *atom)
454 {
455 context_t *context = R700_CONTEXT(ctx);
456 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
457 struct radeon_bo * pbo;
458 BATCH_LOCALS(&context->radeon);
459 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
460
461 pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));
462
463 if (!pbo)
464 return;
465
466 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
467
468 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
469 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
470 R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
471 R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
472 pbo,
473 r700->ps.SQ_PGM_START_PS.u32All,
474 RADEON_GEM_DOMAIN_GTT, 0, 0);
475 END_BATCH();
476
477 BEGIN_BATCH_NO_AUTOSTATE(9);
478 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
479 R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
480 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
481 END_BATCH();
482
483 COMMIT_BATCH();
484
485 }
486
487 static void r700SendVSState(GLcontext *ctx, struct radeon_state_atom *atom)
488 {
489 context_t *context = R700_CONTEXT(ctx);
490 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
491 struct radeon_bo * pbo;
492 BATCH_LOCALS(&context->radeon);
493 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
494
495 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
496
497 if (!pbo)
498 return;
499
500 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
501
502 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
503 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_VS, 1);
504 R600_OUT_BATCH(r700->vs.SQ_PGM_START_VS.u32All);
505 R600_OUT_BATCH_RELOC(r700->vs.SQ_PGM_START_VS.u32All,
506 pbo,
507 r700->vs.SQ_PGM_START_VS.u32All,
508 RADEON_GEM_DOMAIN_GTT, 0, 0);
509 END_BATCH();
510
511 BEGIN_BATCH_NO_AUTOSTATE(6);
512 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_VS, r700->vs.SQ_PGM_RESOURCES_VS.u32All);
513 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_VS, r700->vs.SQ_PGM_CF_OFFSET_VS.u32All);
514 END_BATCH();
515
516 COMMIT_BATCH();
517 }
518
519 static void r700SendFSState(GLcontext *ctx, struct radeon_state_atom *atom)
520 {
521 context_t *context = R700_CONTEXT(ctx);
522 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
523 struct radeon_bo * pbo;
524 BATCH_LOCALS(&context->radeon);
525 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
526
527 /* XXX fixme
528 * R6xx chips require a FS be emitted, even if it's not used.
529 * since we aren't using FS yet, just send the VS address to make
530 * the kernel command checker happy
531 */
532 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
533 r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
534 r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
535 r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
536 /* XXX */
537
538 if (!pbo)
539 return;
540
541 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
542
543 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
544 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
545 R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
546 R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
547 pbo,
548 r700->fs.SQ_PGM_START_FS.u32All,
549 RADEON_GEM_DOMAIN_GTT, 0, 0);
550 END_BATCH();
551
552 BEGIN_BATCH_NO_AUTOSTATE(6);
553 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
554 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
555 END_BATCH();
556
557 COMMIT_BATCH();
558
559 }
560
561 static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
562 {
563 context_t *context = R700_CONTEXT(ctx);
564 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
565 BATCH_LOCALS(&context->radeon);
566 int id = 0;
567 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
568
569 if (id > R700_MAX_VIEWPORTS)
570 return;
571
572 if (!r700->viewport[id].enabled)
573 return;
574
575 BEGIN_BATCH_NO_AUTOSTATE(16);
576 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
577 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
578 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
579 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
580 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
581 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
582 R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
583 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
584 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
585 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
586 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
587 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
588 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
589 END_BATCH();
590
591 COMMIT_BATCH();
592
593 }
594
595 static void r700SendSQConfig(GLcontext *ctx, struct radeon_state_atom *atom)
596 {
597 context_t *context = R700_CONTEXT(ctx);
598 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
599 BATCH_LOCALS(&context->radeon);
600 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
601
602 BEGIN_BATCH_NO_AUTOSTATE(34);
603 R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
604 R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
605 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
606 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_2.u32All);
607 R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
608 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
609 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);
610
611 R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
612 R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
613 R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
614 R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
615 R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);
616
617 R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
618 R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
619 R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
620 R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
621 R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
622 R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
623 R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
624 R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
625 R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
626 R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
627 END_BATCH();
628
629 COMMIT_BATCH();
630 }
631
632 static void r700SendUCPState(GLcontext *ctx, struct radeon_state_atom *atom)
633 {
634 context_t *context = R700_CONTEXT(ctx);
635 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
636 BATCH_LOCALS(&context->radeon);
637 int i;
638 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
639
640 for (i = 0; i < R700_MAX_UCP; i++) {
641 if (r700->ucp[i].enabled) {
642 BEGIN_BATCH_NO_AUTOSTATE(6);
643 R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
644 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
645 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Y.u32All);
646 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Z.u32All);
647 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
648 END_BATCH();
649 COMMIT_BATCH();
650 }
651 }
652 }
653
654 static void r700SendSPIState(GLcontext *ctx, struct radeon_state_atom *atom)
655 {
656 context_t *context = R700_CONTEXT(ctx);
657 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
658 BATCH_LOCALS(&context->radeon);
659 unsigned int ui;
660 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
661
662 BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);
663
664 R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
665 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
666 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
667 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
668 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
669 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
670 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
671 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
672 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
673 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
674 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
675 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
676 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
677 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
678 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
679 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
680 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
681 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
682 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
683 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
684 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
685 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
686 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
687 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
688 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
689 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
690 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
691 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
692 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
693 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
694 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
695 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
696 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);
697
698 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
699 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
700 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
701 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
702 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
703 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
704 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
705 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
706 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
707 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
708 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);
709
710 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
711 R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
712 R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
713 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
714 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
715 R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
716 R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
717 R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
718 R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
719 R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);
720
721 R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
722 for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
723 R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
724
725 END_BATCH();
726 COMMIT_BATCH();
727 }
728
729 static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom)
730 {
731 context_t *context = R700_CONTEXT(ctx);
732 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
733 BATCH_LOCALS(&context->radeon);
734 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
735
736 BEGIN_BATCH_NO_AUTOSTATE(41);
737
738 R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
739 R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
740 R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
741 R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
742 R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);
743
744 R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
745 R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
746 R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
747 R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
748 R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
749 R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
750 R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
751 R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
752 R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
753 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
754 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
755 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
756 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
757 R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);
758
759 R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
760 R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
761 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
762 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);
763
764 R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
765 R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
766 R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
767 R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);
768
769 R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);
770
771 END_BATCH();
772 COMMIT_BATCH();
773 }
774
775 static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom)
776 {
777 context_t *context = R700_CONTEXT(ctx);
778 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
779 BATCH_LOCALS(&context->radeon);
780 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
781
782 BEGIN_BATCH_NO_AUTOSTATE(9);
783 R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
784 R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
785 R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
786 END_BATCH();
787 COMMIT_BATCH();
788 }
789
790 static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
791 {
792 context_t *context = R700_CONTEXT(ctx);
793 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
794 BATCH_LOCALS(&context->radeon);
795 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
796
797 BEGIN_BATCH_NO_AUTOSTATE(17);
798
799 R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
800 R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
801 R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);
802
803 R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
804 R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);
805
806 R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
807 R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
808 R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);
809
810 R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);
811
812 END_BATCH();
813 COMMIT_BATCH();
814 }
815
816 static void r700SendStencilState(GLcontext *ctx, struct radeon_state_atom *atom)
817 {
818 context_t *context = R700_CONTEXT(ctx);
819 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
820 BATCH_LOCALS(&context->radeon);
821
822 BEGIN_BATCH_NO_AUTOSTATE(4);
823 R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
824 R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
825 R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
826 END_BATCH();
827 COMMIT_BATCH();
828 }
829
830 static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
831 {
832 context_t *context = R700_CONTEXT(ctx);
833 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
834 BATCH_LOCALS(&context->radeon);
835 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
836
837 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
838 BEGIN_BATCH_NO_AUTOSTATE(11);
839 R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
840 R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
841 R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
842 R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
843 R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
844 R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
845 R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
846 R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
847 R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
848 END_BATCH();
849 }
850
851 BEGIN_BATCH_NO_AUTOSTATE(7);
852 R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
853 R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
854 R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
855 R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
856 END_BATCH();
857 COMMIT_BATCH();
858 }
859
860 static void r700SendCBCLRCMPState(GLcontext *ctx, struct radeon_state_atom *atom)
861 {
862 context_t *context = R700_CONTEXT(ctx);
863 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
864 BATCH_LOCALS(&context->radeon);
865
866 BEGIN_BATCH_NO_AUTOSTATE(6);
867 R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
868 R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
869 R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
870 R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
871 R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
872 END_BATCH();
873 COMMIT_BATCH();
874 }
875
876 static void r700SendCBBlendState(GLcontext *ctx, struct radeon_state_atom *atom)
877 {
878 context_t *context = R700_CONTEXT(ctx);
879 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
880 BATCH_LOCALS(&context->radeon);
881 unsigned int ui;
882 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
883
884 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
885 BEGIN_BATCH_NO_AUTOSTATE(3);
886 R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
887 END_BATCH();
888 }
889
890 BEGIN_BATCH_NO_AUTOSTATE(3);
891 R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
892 END_BATCH();
893
894 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
895 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
896 if (r700->render_target[ui].enabled) {
897 BEGIN_BATCH_NO_AUTOSTATE(3);
898 R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
899 r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
900 END_BATCH();
901 }
902 }
903 }
904
905 COMMIT_BATCH();
906 }
907
908 static void r700SendCBBlendColorState(GLcontext *ctx, struct radeon_state_atom *atom)
909 {
910 context_t *context = R700_CONTEXT(ctx);
911 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
912 BATCH_LOCALS(&context->radeon);
913 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
914
915 BEGIN_BATCH_NO_AUTOSTATE(6);
916 R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
917 R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
918 R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
919 R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
920 R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
921 END_BATCH();
922 COMMIT_BATCH();
923 }
924
925 static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom)
926 {
927 context_t *context = R700_CONTEXT(ctx);
928 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
929 BATCH_LOCALS(&context->radeon);
930
931 BEGIN_BATCH_NO_AUTOSTATE(9);
932 R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
933 R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
934 R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
935 R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
936 R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
937 R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
938 END_BATCH();
939 COMMIT_BATCH();
940
941 }
942
943 static void r700SendPolyState(GLcontext *ctx, struct radeon_state_atom *atom)
944 {
945 context_t *context = R700_CONTEXT(ctx);
946 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
947 BATCH_LOCALS(&context->radeon);
948
949 BEGIN_BATCH_NO_AUTOSTATE(10);
950 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
951 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
952 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
953 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
954 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
955 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
956 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
957 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
958 END_BATCH();
959 COMMIT_BATCH();
960
961 }
962
963 static void r700SendCLState(GLcontext *ctx, struct radeon_state_atom *atom)
964 {
965 context_t *context = R700_CONTEXT(ctx);
966 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
967 BATCH_LOCALS(&context->radeon);
968 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
969
970 BEGIN_BATCH_NO_AUTOSTATE(12);
971 R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All);
972 R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All);
973 R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All);
974 R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All);
975 END_BATCH();
976 COMMIT_BATCH();
977 }
978
979 static void r700SendGBState(GLcontext *ctx, struct radeon_state_atom *atom)
980 {
981 context_t *context = R700_CONTEXT(ctx);
982 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
983 BATCH_LOCALS(&context->radeon);
984
985 BEGIN_BATCH_NO_AUTOSTATE(6);
986 R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
987 R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
988 R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
989 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
990 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
991 END_BATCH();
992 COMMIT_BATCH();
993 }
994
995 static void r700SendScissorState(GLcontext *ctx, struct radeon_state_atom *atom)
996 {
997 context_t *context = R700_CONTEXT(ctx);
998 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
999 BATCH_LOCALS(&context->radeon);
1000 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1001
1002 BEGIN_BATCH_NO_AUTOSTATE(22);
1003 R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
1004 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
1005 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);
1006
1007 R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12);
1008 R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
1009 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
1010 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
1011 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
1012 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
1013 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
1014 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
1015 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
1016 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
1017 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
1018 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
1019 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);
1020
1021 R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
1022 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
1023 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
1024 END_BATCH();
1025 COMMIT_BATCH();
1026 }
1027
1028 static void r700SendSCState(GLcontext *ctx, struct radeon_state_atom *atom)
1029 {
1030 context_t *context = R700_CONTEXT(ctx);
1031 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1032 BATCH_LOCALS(&context->radeon);
1033 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1034
1035 BEGIN_BATCH_NO_AUTOSTATE(15);
1036 R600_OUT_BATCH_REGVAL(R7xx_PA_SC_EDGERULE, r700->PA_SC_EDGERULE.u32All);
1037 R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All);
1038 R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All);
1039 R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All);
1040 R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All);
1041 END_BATCH();
1042 COMMIT_BATCH();
1043 }
1044
1045 static void r700SendAAState(GLcontext *ctx, struct radeon_state_atom *atom)
1046 {
1047 context_t *context = R700_CONTEXT(ctx);
1048 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1049 BATCH_LOCALS(&context->radeon);
1050
1051 BEGIN_BATCH_NO_AUTOSTATE(12);
1052 R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
1053 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
1054 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
1055 R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
1056 END_BATCH();
1057 COMMIT_BATCH();
1058 }
1059
1060 static void r700SendPSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1061 {
1062 context_t *context = R700_CONTEXT(ctx);
1063 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1064 int i;
1065 BATCH_LOCALS(&context->radeon);
1066
1067 if (r700->ps.num_consts == 0)
1068 return;
1069
1070 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->ps.num_consts * 4));
1071 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->ps.num_consts * 4)));
1072 /* assembler map const from very beginning. */
1073 R600_OUT_BATCH(SQ_ALU_CONSTANT_PS_OFFSET * 4);
1074 for (i = 0; i < r700->ps.num_consts; i++) {
1075 R600_OUT_BATCH(r700->ps.consts[i][0].u32All);
1076 R600_OUT_BATCH(r700->ps.consts[i][1].u32All);
1077 R600_OUT_BATCH(r700->ps.consts[i][2].u32All);
1078 R600_OUT_BATCH(r700->ps.consts[i][3].u32All);
1079 }
1080 END_BATCH();
1081 COMMIT_BATCH();
1082 }
1083
1084 static void r700SendVSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1085 {
1086 context_t *context = R700_CONTEXT(ctx);
1087 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1088 int i;
1089 BATCH_LOCALS(&context->radeon);
1090 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1091
1092 if (r700->vs.num_consts == 0)
1093 return;
1094
1095 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->vs.num_consts * 4));
1096 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->vs.num_consts * 4)));
1097 /* assembler map const from very beginning. */
1098 R600_OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
1099 for (i = 0; i < r700->vs.num_consts; i++) {
1100 R600_OUT_BATCH(r700->vs.consts[i][0].u32All);
1101 R600_OUT_BATCH(r700->vs.consts[i][1].u32All);
1102 R600_OUT_BATCH(r700->vs.consts[i][2].u32All);
1103 R600_OUT_BATCH(r700->vs.consts[i][3].u32All);
1104 }
1105 END_BATCH();
1106 COMMIT_BATCH();
1107 }
1108
1109 static int check_always(GLcontext *ctx, struct radeon_state_atom *atom)
1110 {
1111 return atom->cmd_size;
1112 }
1113
1114 static int check_cb(GLcontext *ctx, struct radeon_state_atom *atom)
1115 {
1116 context_t *context = R700_CONTEXT(ctx);
1117 int count = 7;
1118
1119 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1120 count += 11;
1121 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1122
1123 return count;
1124 }
1125
1126 static int check_blnd(GLcontext *ctx, struct radeon_state_atom *atom)
1127 {
1128 context_t *context = R700_CONTEXT(ctx);
1129 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1130 unsigned int ui;
1131 int count = 3;
1132
1133 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1134 count += 3;
1135
1136 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
1137 /* targets are enabled in r700SetRenderTarget but state
1138 size is calculated before that. Until MRT's are done
1139 hardcode target0 as enabled. */
1140 count += 3;
1141 for (ui = 1; ui < R700_MAX_RENDER_TARGETS; ui++) {
1142 if (r700->render_target[ui].enabled)
1143 count += 3;
1144 }
1145 }
1146 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1147
1148 return count;
1149 }
1150
1151 static int check_ucp(GLcontext *ctx, struct radeon_state_atom *atom)
1152 {
1153 context_t *context = R700_CONTEXT(ctx);
1154 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1155 int i;
1156 int count = 0;
1157
1158 for (i = 0; i < R700_MAX_UCP; i++) {
1159 if (r700->ucp[i].enabled)
1160 count += 6;
1161 }
1162 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1163 return count;
1164 }
1165
1166 static int check_vtx(GLcontext *ctx, struct radeon_state_atom *atom)
1167 {
1168 context_t *context = R700_CONTEXT(ctx);
1169 int count = context->radeon.tcl.aos_count * 18;
1170
1171 if (count)
1172 count += 6;
1173
1174 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1175 return count;
1176 }
1177
1178 static int check_tx(GLcontext *ctx, struct radeon_state_atom *atom)
1179 {
1180 context_t *context = R700_CONTEXT(ctx);
1181 unsigned int i, count = 0;
1182 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1183
1184 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
1185 if (ctx->Texture.Unit[i]._ReallyEnabled) {
1186 radeonTexObj *t = r700->textures[i];
1187 if (t)
1188 count++;
1189 }
1190 }
1191 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1192 return count * 31;
1193 }
1194
1195 static int check_ps_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1196 {
1197 context_t *context = R700_CONTEXT(ctx);
1198 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1199 int count = r700->ps.num_consts * 4;
1200
1201 if (count)
1202 count += 2;
1203 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1204
1205 return count;
1206 }
1207
1208 static int check_vs_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1209 {
1210 context_t *context = R700_CONTEXT(ctx);
1211 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1212 int count = r700->vs.num_consts * 4;
1213
1214 if (count)
1215 count += 2;
1216 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1217
1218 return count;
1219 }
1220
1221 #define ALLOC_STATE( ATOM, CHK, SZ, EMIT ) \
1222 do { \
1223 context->atoms.ATOM.cmd_size = (SZ); \
1224 context->atoms.ATOM.cmd = NULL; \
1225 context->atoms.ATOM.name = #ATOM; \
1226 context->atoms.ATOM.idx = 0; \
1227 context->atoms.ATOM.check = check_##CHK; \
1228 context->atoms.ATOM.dirty = GL_FALSE; \
1229 context->atoms.ATOM.emit = (EMIT); \
1230 context->radeon.hw.max_state_size += (SZ); \
1231 insert_at_tail(&context->radeon.hw.atomlist, &context->atoms.ATOM); \
1232 } while (0)
1233
1234 void r600InitAtoms(context_t *context)
1235 {
1236 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %p\n", __func__, context);
1237 context->radeon.hw.max_state_size = 10 + 5 + 14; /* start 3d, idle, cb/db flush */
1238
1239 /* Setup the atom linked list */
1240 make_empty_list(&context->radeon.hw.atomlist);
1241 context->radeon.hw.atomlist.name = "atom-list";
1242
1243 ALLOC_STATE(sq, always, 34, r700SendSQConfig);
1244 ALLOC_STATE(db, always, 17, r700SendDBState);
1245 ALLOC_STATE(stencil, always, 4, r700SendStencilState);
1246 ALLOC_STATE(db_target, always, 12, r700SendDepthTargetState);
1247 ALLOC_STATE(sc, always, 15, r700SendSCState);
1248 ALLOC_STATE(scissor, always, 22, r700SendScissorState);
1249 ALLOC_STATE(aa, always, 12, r700SendAAState);
1250 ALLOC_STATE(cl, always, 12, r700SendCLState);
1251 ALLOC_STATE(gb, always, 6, r700SendGBState);
1252 ALLOC_STATE(ucp, ucp, (R700_MAX_UCP * 6), r700SendUCPState);
1253 ALLOC_STATE(su, always, 9, r700SendSUState);
1254 ALLOC_STATE(poly, always, 10, r700SendPolyState);
1255 ALLOC_STATE(cb, cb, 18, r700SendCBState);
1256 ALLOC_STATE(clrcmp, always, 6, r700SendCBCLRCMPState);
1257 ALLOC_STATE(cb_target, always, 25, r700SendRenderTargetState);
1258 ALLOC_STATE(blnd, blnd, (6 + (R700_MAX_RENDER_TARGETS * 3)), r700SendCBBlendState);
1259 ALLOC_STATE(blnd_clr, always, 6, r700SendCBBlendColorState);
1260 ALLOC_STATE(sx, always, 9, r700SendSXState);
1261 ALLOC_STATE(vgt, always, 41, r700SendVGTState);
1262 ALLOC_STATE(spi, always, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState);
1263 ALLOC_STATE(vpt, always, 16, r700SendViewportState);
1264 ALLOC_STATE(fs, always, 18, r700SendFSState);
1265 ALLOC_STATE(vs, always, 18, r700SendVSState);
1266 ALLOC_STATE(ps, always, 21, r700SendPSState);
1267 ALLOC_STATE(vs_consts, vs_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendVSConsts);
1268 ALLOC_STATE(ps_consts, ps_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendPSConsts);
1269 ALLOC_STATE(vtx, vtx, (6 + (VERT_ATTRIB_MAX * 18)), r700SendVTXState);
1270 ALLOC_STATE(tx, tx, (R700_TEXTURE_NUMBERUNITS * 20), r700SendTexState);
1271 ALLOC_STATE(tx_smplr, tx, (R700_TEXTURE_NUMBERUNITS * 5), r700SendTexSamplerState);
1272 ALLOC_STATE(tx_brdr_clr, tx, (R700_TEXTURE_NUMBERUNITS * 6), r700SendTexBorderColorState);
1273
1274 context->radeon.hw.is_dirty = GL_TRUE;
1275 context->radeon.hw.all_dirty = GL_TRUE;
1276 }