r300: Zero-initialize register for NV_vertex_program
[mesa.git] / src / mesa / drivers / dri / r600 / r700_chip.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/imports.h"
29 #include "main/glheader.h"
30 #include "main/simple_list.h"
31
32 #include "r600_context.h"
33 #include "r600_cmdbuf.h"
34
35 #include "r700_state.h"
36 #include "r600_tex.h"
37 #include "r700_oglprog.h"
38 #include "r700_fragprog.h"
39 #include "r700_vertprog.h"
40 #include "r700_ioctl.h"
41
42 #include "radeon_mipmap_tree.h"
43
44 static void r700SendTexState(GLcontext *ctx, struct radeon_state_atom *atom)
45 {
46 context_t *context = R700_CONTEXT(ctx);
47 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
48 struct radeon_bo *bo = NULL;
49 unsigned int i;
50 BATCH_LOCALS(&context->radeon);
51
52 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
53
54 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
55 radeonTexObj *t = r700->textures[i];
56 if (t) {
57 if (!t->image_override)
58 bo = t->mt->bo;
59 else
60 bo = t->bo;
61 if (bo) {
62
63 r700SyncSurf(context, bo,
64 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
65 0, TC_ACTION_ENA_bit);
66
67 BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
68 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
69 R600_OUT_BATCH(i * 7);
70 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
71 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
72 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE2);
73 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
74 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
75 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
76 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
77 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
78 bo,
79 0,
80 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
81 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE3,
82 bo,
83 r700->textures[i]->SQ_TEX_RESOURCE3,
84 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
85 END_BATCH();
86 COMMIT_BATCH();
87 }
88 }
89 }
90 }
91
92 static void r700SendTexSamplerState(GLcontext *ctx, struct radeon_state_atom *atom)
93 {
94 context_t *context = R700_CONTEXT(ctx);
95 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
96 unsigned int i;
97 BATCH_LOCALS(&context->radeon);
98 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
99
100 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
101 radeonTexObj *t = r700->textures[i];
102 if (t) {
103 BEGIN_BATCH_NO_AUTOSTATE(5);
104 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, 3));
105 R600_OUT_BATCH(i * 3);
106 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER0);
107 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER1);
108 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER2);
109 END_BATCH();
110 COMMIT_BATCH();
111 }
112 }
113 }
114
115 static void r700SendTexBorderColorState(GLcontext *ctx, struct radeon_state_atom *atom)
116 {
117 context_t *context = R700_CONTEXT(ctx);
118 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
119 unsigned int i;
120 BATCH_LOCALS(&context->radeon);
121 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
122
123 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
124 radeonTexObj *t = r700->textures[i];
125 if (t) {
126 BEGIN_BATCH_NO_AUTOSTATE(2 + 4);
127 R600_OUT_BATCH_REGSEQ((TD_PS_SAMPLER0_BORDER_RED + (i * 16)), 4);
128 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_RED);
129 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_GREEN);
130 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_BLUE);
131 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_ALPHA);
132 END_BATCH();
133 COMMIT_BATCH();
134 }
135 }
136 }
137
138 static void r700SetupVTXConstants(GLcontext * ctx,
139 unsigned int nStreamID,
140 void * pAos,
141 unsigned int size, /* number of elements in vector */
142 unsigned int stride,
143 unsigned int count) /* number of vectors in stream */
144 {
145 context_t *context = R700_CONTEXT(ctx);
146 struct radeon_aos * paos = (struct radeon_aos *)pAos;
147 BATCH_LOCALS(&context->radeon);
148 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
149
150 unsigned int uSQ_VTX_CONSTANT_WORD0_0;
151 unsigned int uSQ_VTX_CONSTANT_WORD1_0;
152 unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
153 unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
154 unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;
155
156 if (!paos->bo)
157 return;
158
159 if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
160 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
161 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
162 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS880) ||
163 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
164 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
165 else
166 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);
167
168 uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
169 uSQ_VTX_CONSTANT_WORD1_0 = count * (size * 4) - 1;
170
171 SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
172 SETfield(uSQ_VTX_CONSTANT_WORD2_0, stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
173 SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
174 SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(GL_FLOAT, size, NULL),
175 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
176 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
177 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_SCALED,
178 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
179 SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
180
181 SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
182 SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
183 SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
184
185 BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
186
187 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
188 R600_OUT_BATCH((nStreamID + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
189 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
190 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
191 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
192 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
193 R600_OUT_BATCH(0);
194 R600_OUT_BATCH(0);
195 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
196 R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
197 paos->bo,
198 uSQ_VTX_CONSTANT_WORD0_0,
199 RADEON_GEM_DOMAIN_GTT, 0, 0);
200 END_BATCH();
201 COMMIT_BATCH();
202
203 }
204
205 void r700SetupStreams(GLcontext *ctx)
206 {
207 context_t *context = R700_CONTEXT(ctx);
208 struct r700_vertex_program *vpc
209 = (struct r700_vertex_program *)ctx->VertexProgram._Current;
210 TNLcontext *tnl = TNL_CONTEXT(ctx);
211 struct vertex_buffer *vb = &tnl->vb;
212 unsigned int i, j = 0;
213 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
214
215 R600_STATECHANGE(context, vtx);
216
217 for(i=0; i<VERT_ATTRIB_MAX; i++) {
218 if(vpc->mesa_program.Base.InputsRead & (1 << i)) {
219 rcommon_emit_vector(ctx,
220 &context->radeon.tcl.aos[j],
221 vb->AttribPtr[i]->data,
222 vb->AttribPtr[i]->size,
223 vb->AttribPtr[i]->stride,
224 vb->Count);
225 j++;
226 }
227 }
228 context->radeon.tcl.aos_count = j;
229 }
230
231 static void r700SendVTXState(GLcontext *ctx, struct radeon_state_atom *atom)
232 {
233 context_t *context = R700_CONTEXT(ctx);
234 struct r700_vertex_program *vpc
235 = (struct r700_vertex_program *)ctx->VertexProgram._Current;
236 unsigned int i, j = 0;
237 BATCH_LOCALS(&context->radeon);
238 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
239
240 if (context->radeon.tcl.aos_count == 0)
241 return;
242
243 BEGIN_BATCH_NO_AUTOSTATE(6);
244 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
245 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
246 R600_OUT_BATCH(0);
247
248 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
249 R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
250 R600_OUT_BATCH(0);
251 END_BATCH();
252 COMMIT_BATCH();
253
254 for(i=0; i<VERT_ATTRIB_MAX; i++) {
255 if(vpc->mesa_program.Base.InputsRead & (1 << i)) {
256 /* currently aos are packed */
257 r700SetupVTXConstants(ctx,
258 i,
259 (void*)(&context->radeon.tcl.aos[j]),
260 (unsigned int)context->radeon.tcl.aos[j].components,
261 (unsigned int)context->radeon.tcl.aos[j].stride * 4,
262 (unsigned int)context->radeon.tcl.aos[j].count);
263 j++;
264 }
265 }
266 }
267
268 static void r700SetRenderTarget(context_t *context, int id)
269 {
270 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
271
272 struct radeon_renderbuffer *rrb;
273 unsigned int nPitchInPixel;
274
275 rrb = radeon_get_colorbuffer(&context->radeon);
276 if (!rrb || !rrb->bo) {
277 return;
278 }
279
280 R600_STATECHANGE(context, cb_target);
281
282 /* color buffer */
283 r700->render_target[id].CB_COLOR0_BASE.u32All = context->radeon.state.color.draw_offset;
284
285 nPitchInPixel = rrb->pitch/rrb->cpp;
286 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, (nPitchInPixel/8)-1,
287 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
288 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
289 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask);
290 r700->render_target[id].CB_COLOR0_BASE.u32All = 0;
291 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ENDIAN_NONE, ENDIAN_shift, ENDIAN_mask);
292 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_LINEAR_GENERAL,
293 CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
294 if(4 == rrb->cpp)
295 {
296 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_8_8_8_8,
297 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
298 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT, COMP_SWAP_shift, COMP_SWAP_mask);
299 }
300 else
301 {
302 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_5_6_5,
303 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
304 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT_REV,
305 COMP_SWAP_shift, COMP_SWAP_mask);
306 }
307 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
308 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_CLAMP_bit);
309 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, NUMBER_UNORM, NUMBER_TYPE_shift, NUMBER_TYPE_mask);
310
311 r700->render_target[id].enabled = GL_TRUE;
312 }
313
314 static void r700SetDepthTarget(context_t *context)
315 {
316 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
317
318 struct radeon_renderbuffer *rrb;
319 unsigned int nPitchInPixel;
320
321 rrb = radeon_get_depthbuffer(&context->radeon);
322 if (!rrb)
323 return;
324
325 R600_STATECHANGE(context, db_target);
326
327 /* depth buf */
328 r700->DB_DEPTH_SIZE.u32All = 0;
329 r700->DB_DEPTH_BASE.u32All = 0;
330 r700->DB_DEPTH_INFO.u32All = 0;
331 r700->DB_DEPTH_VIEW.u32All = 0;
332
333 nPitchInPixel = rrb->pitch/rrb->cpp;
334
335 SETfield(r700->DB_DEPTH_SIZE.u32All, (nPitchInPixel/8)-1,
336 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
337 SETfield(r700->DB_DEPTH_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
338 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask); /* size in pixel / 64 - 1 */
339
340 if(4 == rrb->cpp)
341 {
342 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_8_24,
343 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
344 }
345 else
346 {
347 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_16,
348 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
349 }
350 SETfield(r700->DB_DEPTH_INFO.u32All, ARRAY_2D_TILED_THIN1,
351 DB_DEPTH_INFO__ARRAY_MODE_shift, DB_DEPTH_INFO__ARRAY_MODE_mask);
352 /* r700->DB_PREFETCH_LIMIT.bits.DEPTH_HEIGHT_TILE_MAX = (context->currentDraw->h >> 3) - 1; */ /* z buffer sie may much bigger than what need, so use actual used h. */
353 }
354
355 static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
356 {
357 context_t *context = R700_CONTEXT(ctx);
358 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
359 struct radeon_renderbuffer *rrb;
360 BATCH_LOCALS(&context->radeon);
361 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
362
363 rrb = radeon_get_depthbuffer(&context->radeon);
364 if (!rrb || !rrb->bo) {
365 fprintf(stderr, "no rrb\n");
366 return;
367 }
368
369 r700SetDepthTarget(context);
370
371 BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
372 R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
373 R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
374 R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
375 R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
376 R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
377 R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
378 R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
379 rrb->bo,
380 r700->DB_DEPTH_BASE.u32All,
381 0, RADEON_GEM_DOMAIN_VRAM, 0);
382 END_BATCH();
383
384 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
385 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
386 BEGIN_BATCH_NO_AUTOSTATE(2);
387 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
388 R600_OUT_BATCH(1 << 0);
389 END_BATCH();
390 }
391
392 COMMIT_BATCH();
393
394 }
395
396 static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
397 {
398 context_t *context = R700_CONTEXT(ctx);
399 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
400 struct radeon_renderbuffer *rrb;
401 BATCH_LOCALS(&context->radeon);
402 int id = 0;
403 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
404
405 rrb = radeon_get_colorbuffer(&context->radeon);
406 if (!rrb || !rrb->bo) {
407 fprintf(stderr, "no rrb\n");
408 return;
409 }
410
411 r700SetRenderTarget(context, 0);
412
413 if (id > R700_MAX_RENDER_TARGETS)
414 return;
415
416 if (!r700->render_target[id].enabled)
417 return;
418
419 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
420 R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
421 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
422 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
423 rrb->bo,
424 r700->render_target[id].CB_COLOR0_BASE.u32All,
425 0, RADEON_GEM_DOMAIN_VRAM, 0);
426 END_BATCH();
427
428 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
429 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
430 BEGIN_BATCH_NO_AUTOSTATE(2);
431 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
432 R600_OUT_BATCH((2 << id));
433 END_BATCH();
434 }
435
436 BEGIN_BATCH_NO_AUTOSTATE(18);
437 R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
438 R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
439 R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
440 R600_OUT_BATCH_REGVAL(CB_COLOR0_TILE + (4 * id), r700->render_target[id].CB_COLOR0_TILE.u32All);
441 R600_OUT_BATCH_REGVAL(CB_COLOR0_FRAG + (4 * id), r700->render_target[id].CB_COLOR0_FRAG.u32All);
442 R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
443 END_BATCH();
444
445 COMMIT_BATCH();
446
447 }
448
449 static void r700SendPSState(GLcontext *ctx, struct radeon_state_atom *atom)
450 {
451 context_t *context = R700_CONTEXT(ctx);
452 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
453 struct radeon_bo * pbo;
454 BATCH_LOCALS(&context->radeon);
455 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
456
457 pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));
458
459 if (!pbo)
460 return;
461
462 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
463
464 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
465 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
466 R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
467 R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
468 pbo,
469 r700->ps.SQ_PGM_START_PS.u32All,
470 RADEON_GEM_DOMAIN_GTT, 0, 0);
471 END_BATCH();
472
473 BEGIN_BATCH_NO_AUTOSTATE(9);
474 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
475 R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
476 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
477 END_BATCH();
478
479 COMMIT_BATCH();
480
481 }
482
483 static void r700SendVSState(GLcontext *ctx, struct radeon_state_atom *atom)
484 {
485 context_t *context = R700_CONTEXT(ctx);
486 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
487 struct radeon_bo * pbo;
488 BATCH_LOCALS(&context->radeon);
489 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
490
491 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
492
493 if (!pbo)
494 return;
495
496 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
497
498 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
499 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_VS, 1);
500 R600_OUT_BATCH(r700->vs.SQ_PGM_START_VS.u32All);
501 R600_OUT_BATCH_RELOC(r700->vs.SQ_PGM_START_VS.u32All,
502 pbo,
503 r700->vs.SQ_PGM_START_VS.u32All,
504 RADEON_GEM_DOMAIN_GTT, 0, 0);
505 END_BATCH();
506
507 BEGIN_BATCH_NO_AUTOSTATE(6);
508 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_VS, r700->vs.SQ_PGM_RESOURCES_VS.u32All);
509 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_VS, r700->vs.SQ_PGM_CF_OFFSET_VS.u32All);
510 END_BATCH();
511
512 COMMIT_BATCH();
513 }
514
515 static void r700SendFSState(GLcontext *ctx, struct radeon_state_atom *atom)
516 {
517 context_t *context = R700_CONTEXT(ctx);
518 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
519 struct radeon_bo * pbo;
520 BATCH_LOCALS(&context->radeon);
521 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
522
523 /* XXX fixme
524 * R6xx chips require a FS be emitted, even if it's not used.
525 * since we aren't using FS yet, just send the VS address to make
526 * the kernel command checker happy
527 */
528 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
529 r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
530 r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
531 r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
532 /* XXX */
533
534 if (!pbo)
535 return;
536
537 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
538
539 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
540 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
541 R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
542 R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
543 pbo,
544 r700->fs.SQ_PGM_START_FS.u32All,
545 RADEON_GEM_DOMAIN_GTT, 0, 0);
546 END_BATCH();
547
548 BEGIN_BATCH_NO_AUTOSTATE(6);
549 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
550 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
551 END_BATCH();
552
553 COMMIT_BATCH();
554
555 }
556
557 static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
558 {
559 context_t *context = R700_CONTEXT(ctx);
560 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
561 BATCH_LOCALS(&context->radeon);
562 int id = 0;
563 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
564
565 if (id > R700_MAX_VIEWPORTS)
566 return;
567
568 if (!r700->viewport[id].enabled)
569 return;
570
571 BEGIN_BATCH_NO_AUTOSTATE(16);
572 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
573 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
574 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
575 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
576 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
577 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
578 R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
579 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
580 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
581 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
582 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
583 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
584 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
585 END_BATCH();
586
587 COMMIT_BATCH();
588
589 }
590
591 static void r700SendSQConfig(GLcontext *ctx, struct radeon_state_atom *atom)
592 {
593 context_t *context = R700_CONTEXT(ctx);
594 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
595 BATCH_LOCALS(&context->radeon);
596 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
597
598 BEGIN_BATCH_NO_AUTOSTATE(34);
599 R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
600 R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
601 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
602 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_2.u32All);
603 R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
604 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
605 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);
606
607 R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
608 R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
609 R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
610 R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
611 R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);
612
613 R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
614 R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
615 R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
616 R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
617 R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
618 R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
619 R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
620 R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
621 R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
622 R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
623 END_BATCH();
624
625 COMMIT_BATCH();
626 }
627
628 static void r700SendUCPState(GLcontext *ctx, struct radeon_state_atom *atom)
629 {
630 context_t *context = R700_CONTEXT(ctx);
631 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
632 BATCH_LOCALS(&context->radeon);
633 int i;
634 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
635
636 for (i = 0; i < R700_MAX_UCP; i++) {
637 if (r700->ucp[i].enabled) {
638 BEGIN_BATCH_NO_AUTOSTATE(6);
639 R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
640 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
641 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Y.u32All);
642 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Z.u32All);
643 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
644 END_BATCH();
645 COMMIT_BATCH();
646 }
647 }
648 }
649
650 static void r700SendSPIState(GLcontext *ctx, struct radeon_state_atom *atom)
651 {
652 context_t *context = R700_CONTEXT(ctx);
653 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
654 BATCH_LOCALS(&context->radeon);
655 unsigned int ui;
656 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
657
658 BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);
659
660 R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
661 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
662 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
663 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
664 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
665 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
666 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
667 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
668 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
669 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
670 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
671 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
672 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
673 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
674 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
675 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
676 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
677 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
678 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
679 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
680 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
681 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
682 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
683 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
684 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
685 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
686 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
687 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
688 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
689 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
690 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
691 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
692 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);
693
694 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
695 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
696 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
697 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
698 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
699 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
700 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
701 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
702 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
703 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
704 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);
705
706 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
707 R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
708 R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
709 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
710 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
711 R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
712 R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
713 R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
714 R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
715 R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);
716
717 R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
718 for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
719 R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
720
721 END_BATCH();
722 COMMIT_BATCH();
723 }
724
725 static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom)
726 {
727 context_t *context = R700_CONTEXT(ctx);
728 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
729 BATCH_LOCALS(&context->radeon);
730 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
731
732 BEGIN_BATCH_NO_AUTOSTATE(41);
733
734 R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
735 R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
736 R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
737 R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
738 R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);
739
740 R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
741 R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
742 R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
743 R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
744 R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
745 R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
746 R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
747 R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
748 R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
749 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
750 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
751 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
752 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
753 R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);
754
755 R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
756 R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
757 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
758 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);
759
760 R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
761 R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
762 R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
763 R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);
764
765 R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);
766
767 END_BATCH();
768 COMMIT_BATCH();
769 }
770
771 static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom)
772 {
773 context_t *context = R700_CONTEXT(ctx);
774 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
775 BATCH_LOCALS(&context->radeon);
776 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
777
778 BEGIN_BATCH_NO_AUTOSTATE(9);
779 R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
780 R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
781 R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
782 END_BATCH();
783 COMMIT_BATCH();
784 }
785
786 static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
787 {
788 context_t *context = R700_CONTEXT(ctx);
789 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
790 BATCH_LOCALS(&context->radeon);
791 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
792
793 BEGIN_BATCH_NO_AUTOSTATE(23);
794 R600_OUT_BATCH_REGVAL(DB_HTILE_DATA_BASE, r700->DB_HTILE_DATA_BASE.u32All);
795
796 R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
797 R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
798 R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);
799
800 R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
801 R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);
802
803 R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
804 R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
805 R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);
806
807 R600_OUT_BATCH_REGVAL(DB_HTILE_SURFACE, r700->DB_HTILE_SURFACE.u32All);
808 R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);
809
810 END_BATCH();
811 COMMIT_BATCH();
812 }
813
814 static void r700SendStencilState(GLcontext *ctx, struct radeon_state_atom *atom)
815 {
816 context_t *context = R700_CONTEXT(ctx);
817 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
818 BATCH_LOCALS(&context->radeon);
819
820 BEGIN_BATCH_NO_AUTOSTATE(4);
821 R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
822 R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
823 R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
824 END_BATCH();
825 COMMIT_BATCH();
826 }
827
828 static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
829 {
830 context_t *context = R700_CONTEXT(ctx);
831 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
832 BATCH_LOCALS(&context->radeon);
833 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
834
835 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
836 BEGIN_BATCH_NO_AUTOSTATE(11);
837 R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
838 R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
839 R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
840 R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
841 R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
842 R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
843 R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
844 R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
845 R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
846 END_BATCH();
847 }
848
849 BEGIN_BATCH_NO_AUTOSTATE(7);
850 R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
851 R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
852 R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
853 R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
854 END_BATCH();
855 COMMIT_BATCH();
856 }
857
858 static void r700SendCBCLRCMPState(GLcontext *ctx, struct radeon_state_atom *atom)
859 {
860 context_t *context = R700_CONTEXT(ctx);
861 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
862 BATCH_LOCALS(&context->radeon);
863
864 BEGIN_BATCH_NO_AUTOSTATE(6);
865 R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
866 R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
867 R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
868 R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
869 R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
870 END_BATCH();
871 COMMIT_BATCH();
872 }
873
874 static void r700SendCBBlendState(GLcontext *ctx, struct radeon_state_atom *atom)
875 {
876 context_t *context = R700_CONTEXT(ctx);
877 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
878 BATCH_LOCALS(&context->radeon);
879 unsigned int ui;
880 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
881
882 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
883 BEGIN_BATCH_NO_AUTOSTATE(3);
884 R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
885 END_BATCH();
886 }
887
888 BEGIN_BATCH_NO_AUTOSTATE(3);
889 R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
890 END_BATCH();
891
892 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
893 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
894 if (r700->render_target[ui].enabled) {
895 BEGIN_BATCH_NO_AUTOSTATE(3);
896 R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
897 r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
898 END_BATCH();
899 }
900 }
901 }
902
903 COMMIT_BATCH();
904 }
905
906 static void r700SendCBBlendColorState(GLcontext *ctx, struct radeon_state_atom *atom)
907 {
908 context_t *context = R700_CONTEXT(ctx);
909 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
910 BATCH_LOCALS(&context->radeon);
911 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
912
913 BEGIN_BATCH_NO_AUTOSTATE(6);
914 R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
915 R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
916 R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
917 R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
918 R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
919 END_BATCH();
920 COMMIT_BATCH();
921 }
922
923 static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom)
924 {
925 context_t *context = R700_CONTEXT(ctx);
926 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
927 BATCH_LOCALS(&context->radeon);
928
929 BEGIN_BATCH_NO_AUTOSTATE(9);
930 R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
931 R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
932 R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
933 R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
934 R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
935 R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
936 END_BATCH();
937 COMMIT_BATCH();
938
939 }
940
941 static void r700SendPolyState(GLcontext *ctx, struct radeon_state_atom *atom)
942 {
943 context_t *context = R700_CONTEXT(ctx);
944 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
945 BATCH_LOCALS(&context->radeon);
946
947 BEGIN_BATCH_NO_AUTOSTATE(10);
948 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
949 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
950 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
951 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
952 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
953 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
954 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
955 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
956 END_BATCH();
957 COMMIT_BATCH();
958
959 }
960
961 static void r700SendCLState(GLcontext *ctx, struct radeon_state_atom *atom)
962 {
963 context_t *context = R700_CONTEXT(ctx);
964 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
965 BATCH_LOCALS(&context->radeon);
966 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
967
968 BEGIN_BATCH_NO_AUTOSTATE(12);
969 R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All);
970 R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All);
971 R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All);
972 R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All);
973 END_BATCH();
974 COMMIT_BATCH();
975 }
976
977 static void r700SendGBState(GLcontext *ctx, struct radeon_state_atom *atom)
978 {
979 context_t *context = R700_CONTEXT(ctx);
980 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
981 BATCH_LOCALS(&context->radeon);
982
983 BEGIN_BATCH_NO_AUTOSTATE(6);
984 R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
985 R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
986 R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
987 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
988 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
989 END_BATCH();
990 COMMIT_BATCH();
991 }
992
993 static void r700SendScissorState(GLcontext *ctx, struct radeon_state_atom *atom)
994 {
995 context_t *context = R700_CONTEXT(ctx);
996 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
997 BATCH_LOCALS(&context->radeon);
998 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
999
1000 BEGIN_BATCH_NO_AUTOSTATE(22);
1001 R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
1002 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
1003 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);
1004
1005 R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12);
1006 R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
1007 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
1008 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
1009 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
1010 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
1011 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
1012 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
1013 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
1014 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
1015 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
1016 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
1017 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);
1018
1019 R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
1020 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
1021 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
1022 END_BATCH();
1023 COMMIT_BATCH();
1024 }
1025
1026 static void r700SendSCState(GLcontext *ctx, struct radeon_state_atom *atom)
1027 {
1028 context_t *context = R700_CONTEXT(ctx);
1029 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1030 BATCH_LOCALS(&context->radeon);
1031 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1032
1033 BEGIN_BATCH_NO_AUTOSTATE(15);
1034 R600_OUT_BATCH_REGVAL(R7xx_PA_SC_EDGERULE, r700->PA_SC_EDGERULE.u32All);
1035 R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All);
1036 R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All);
1037 R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All);
1038 R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All);
1039 END_BATCH();
1040 COMMIT_BATCH();
1041 }
1042
1043 static void r700SendAAState(GLcontext *ctx, struct radeon_state_atom *atom)
1044 {
1045 context_t *context = R700_CONTEXT(ctx);
1046 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1047 BATCH_LOCALS(&context->radeon);
1048
1049 BEGIN_BATCH_NO_AUTOSTATE(12);
1050 R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
1051 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
1052 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
1053 R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
1054 END_BATCH();
1055 COMMIT_BATCH();
1056 }
1057
1058 static void r700SendPSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1059 {
1060 context_t *context = R700_CONTEXT(ctx);
1061 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1062 int i;
1063 BATCH_LOCALS(&context->radeon);
1064
1065 if (r700->ps.num_consts == 0)
1066 return;
1067
1068 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->ps.num_consts * 4));
1069 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->ps.num_consts * 4)));
1070 /* assembler map const from very beginning. */
1071 R600_OUT_BATCH(SQ_ALU_CONSTANT_PS_OFFSET * 4);
1072 for (i = 0; i < r700->ps.num_consts; i++) {
1073 R600_OUT_BATCH(r700->ps.consts[i][0].u32All);
1074 R600_OUT_BATCH(r700->ps.consts[i][1].u32All);
1075 R600_OUT_BATCH(r700->ps.consts[i][2].u32All);
1076 R600_OUT_BATCH(r700->ps.consts[i][3].u32All);
1077 }
1078 END_BATCH();
1079 COMMIT_BATCH();
1080 }
1081
1082 static void r700SendVSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1083 {
1084 context_t *context = R700_CONTEXT(ctx);
1085 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1086 int i;
1087 BATCH_LOCALS(&context->radeon);
1088 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1089
1090 if (r700->vs.num_consts == 0)
1091 return;
1092
1093 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->vs.num_consts * 4));
1094 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->vs.num_consts * 4)));
1095 /* assembler map const from very beginning. */
1096 R600_OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
1097 for (i = 0; i < r700->vs.num_consts; i++) {
1098 R600_OUT_BATCH(r700->vs.consts[i][0].u32All);
1099 R600_OUT_BATCH(r700->vs.consts[i][1].u32All);
1100 R600_OUT_BATCH(r700->vs.consts[i][2].u32All);
1101 R600_OUT_BATCH(r700->vs.consts[i][3].u32All);
1102 }
1103 END_BATCH();
1104 COMMIT_BATCH();
1105 }
1106
1107 static int check_always(GLcontext *ctx, struct radeon_state_atom *atom)
1108 {
1109 return atom->cmd_size;
1110 }
1111
1112 static int check_cb(GLcontext *ctx, struct radeon_state_atom *atom)
1113 {
1114 context_t *context = R700_CONTEXT(ctx);
1115 int count = 7;
1116
1117 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1118 count += 11;
1119 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1120
1121 return count;
1122 }
1123
1124 static int check_blnd(GLcontext *ctx, struct radeon_state_atom *atom)
1125 {
1126 context_t *context = R700_CONTEXT(ctx);
1127 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1128 unsigned int ui;
1129 int count = 3;
1130
1131 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1132 count += 3;
1133
1134 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
1135 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
1136 if (r700->render_target[ui].enabled)
1137 count += 3;
1138 }
1139 }
1140 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1141
1142 return count;
1143 }
1144
1145 static int check_ucp(GLcontext *ctx, struct radeon_state_atom *atom)
1146 {
1147 context_t *context = R700_CONTEXT(ctx);
1148 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1149 int i;
1150 int count = 0;
1151
1152 for (i = 0; i < R700_MAX_UCP; i++) {
1153 if (r700->ucp[i].enabled)
1154 count += 6;
1155 }
1156 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1157 return count;
1158 }
1159
1160 static int check_vtx(GLcontext *ctx, struct radeon_state_atom *atom)
1161 {
1162 context_t *context = R700_CONTEXT(ctx);
1163 int count = context->radeon.tcl.aos_count * 18;
1164
1165 if (count)
1166 count += 6;
1167
1168 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1169 return count;
1170 }
1171
1172 static int check_tx(GLcontext *ctx, struct radeon_state_atom *atom)
1173 {
1174 context_t *context = R700_CONTEXT(ctx);
1175 unsigned int i, count = 0;
1176 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1177
1178 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
1179 radeonTexObj *t = r700->textures[i];
1180 if (t)
1181 count++;
1182 }
1183 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1184 return count * 31;
1185 }
1186
1187 static int check_ps_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1188 {
1189 context_t *context = R700_CONTEXT(ctx);
1190 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1191 int count = r700->ps.num_consts * 4;
1192
1193 if (count)
1194 count += 2;
1195 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1196
1197 return count;
1198 }
1199
1200 static int check_vs_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1201 {
1202 context_t *context = R700_CONTEXT(ctx);
1203 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1204 int count = r700->vs.num_consts * 4;
1205
1206 if (count)
1207 count += 2;
1208 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1209
1210 return count;
1211 }
1212
1213 #define ALLOC_STATE( ATOM, CHK, SZ, EMIT ) \
1214 do { \
1215 context->atoms.ATOM.cmd_size = (SZ); \
1216 context->atoms.ATOM.cmd = NULL; \
1217 context->atoms.ATOM.name = #ATOM; \
1218 context->atoms.ATOM.idx = 0; \
1219 context->atoms.ATOM.check = check_##CHK; \
1220 context->atoms.ATOM.dirty = GL_FALSE; \
1221 context->atoms.ATOM.emit = (EMIT); \
1222 context->radeon.hw.max_state_size += (SZ); \
1223 insert_at_tail(&context->radeon.hw.atomlist, &context->atoms.ATOM); \
1224 } while (0)
1225
1226 void r600InitAtoms(context_t *context)
1227 {
1228 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %p\n", __func__, context);
1229 context->radeon.hw.max_state_size = 10 + 5 + 14; /* start 3d, idle, cb/db flush */
1230
1231 /* Setup the atom linked list */
1232 make_empty_list(&context->radeon.hw.atomlist);
1233 context->radeon.hw.atomlist.name = "atom-list";
1234
1235 ALLOC_STATE(sq, always, 34, r700SendSQConfig);
1236 ALLOC_STATE(db, always, 23, r700SendDBState);
1237 ALLOC_STATE(stencil, always, 4, r700SendStencilState);
1238 ALLOC_STATE(db_target, always, 12, r700SendDepthTargetState);
1239 ALLOC_STATE(sc, always, 15, r700SendSCState);
1240 ALLOC_STATE(scissor, always, 22, r700SendScissorState);
1241 ALLOC_STATE(aa, always, 12, r700SendAAState);
1242 ALLOC_STATE(cl, always, 12, r700SendCLState);
1243 ALLOC_STATE(gb, always, 6, r700SendGBState);
1244 ALLOC_STATE(ucp, ucp, (R700_MAX_UCP * 6), r700SendUCPState);
1245 ALLOC_STATE(su, always, 9, r700SendSUState);
1246 ALLOC_STATE(poly, always, 10, r700SendPolyState);
1247 ALLOC_STATE(cb, cb, 18, r700SendCBState);
1248 ALLOC_STATE(clrcmp, always, 6, r700SendCBCLRCMPState);
1249 ALLOC_STATE(blnd, blnd, (6 + (R700_MAX_RENDER_TARGETS * 3)), r700SendCBBlendState);
1250 ALLOC_STATE(blnd_clr, always, 6, r700SendCBBlendColorState);
1251 ALLOC_STATE(cb_target, always, 25, r700SendRenderTargetState);
1252 ALLOC_STATE(sx, always, 9, r700SendSXState);
1253 ALLOC_STATE(vgt, always, 41, r700SendVGTState);
1254 ALLOC_STATE(spi, always, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState);
1255 ALLOC_STATE(vpt, always, 16, r700SendViewportState);
1256 ALLOC_STATE(fs, always, 18, r700SendFSState);
1257 ALLOC_STATE(vs, always, 18, r700SendVSState);
1258 ALLOC_STATE(ps, always, 21, r700SendPSState);
1259 ALLOC_STATE(vs_consts, vs_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendVSConsts);
1260 ALLOC_STATE(ps_consts, ps_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendPSConsts);
1261 ALLOC_STATE(vtx, vtx, (6 + (VERT_ATTRIB_MAX * 18)), r700SendVTXState);
1262 ALLOC_STATE(tx, tx, (R700_TEXTURE_NUMBERUNITS * 20), r700SendTexState);
1263 ALLOC_STATE(tx_smplr, tx, (R700_TEXTURE_NUMBERUNITS * 5), r700SendTexSamplerState);
1264 ALLOC_STATE(tx_brdr_clr, tx, (R700_TEXTURE_NUMBERUNITS * 6), r700SendTexBorderColorState);
1265
1266 context->radeon.hw.is_dirty = GL_TRUE;
1267 context->radeon.hw.all_dirty = GL_TRUE;
1268 }