r600: better default state size.
[mesa.git] / src / mesa / drivers / dri / r600 / r700_chip.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/imports.h"
29 #include "main/glheader.h"
30 #include "main/simple_list.h"
31
32 #include "r600_context.h"
33 #include "r600_cmdbuf.h"
34
35 #include "r700_state.h"
36 #include "r600_tex.h"
37 #include "r700_oglprog.h"
38 #include "r700_fragprog.h"
39 #include "r700_vertprog.h"
40 #include "r700_ioctl.h"
41
42 #include "radeon_mipmap_tree.h"
43
44 GLboolean r700SendTextureState(context_t *context)
45 {
46 unsigned int i;
47 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
48 struct radeon_bo *bo = NULL;
49 BATCH_LOCALS(&context->radeon);
50
51 for (i=0; i<R700_TEXTURE_NUMBERUNITS; i++) {
52 radeonTexObj *t = r700->textures[i];
53 if (t) {
54 if (!t->image_override)
55 bo = t->mt->bo;
56 else
57 bo = t->bo;
58 if (bo) {
59
60 r700SyncSurf(context, bo,
61 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
62 0, TC_ACTION_ENA_bit);
63
64 BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
65 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
66 R600_OUT_BATCH(i * 7);
67 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
68 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
69 R600_OUT_BATCH(0); /* r700->textures[i]->SQ_TEX_RESOURCE2 */
70 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
71 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
72 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
73 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
74 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
75 bo,
76 0,
77 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
78 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE3,
79 bo,
80 r700->textures[i]->SQ_TEX_RESOURCE3,
81 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
82 END_BATCH();
83
84 BEGIN_BATCH_NO_AUTOSTATE(5);
85 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, 3));
86 R600_OUT_BATCH(i * 3);
87 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER0);
88 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER1);
89 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER2);
90 END_BATCH();
91
92 BEGIN_BATCH_NO_AUTOSTATE(2 + 4);
93 R600_OUT_BATCH_REGSEQ((TD_PS_SAMPLER0_BORDER_RED + (i * 16)), 4);
94 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_RED);
95 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_GREEN);
96 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_BLUE);
97 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_ALPHA);
98 END_BATCH();
99
100 COMMIT_BATCH();
101 }
102 }
103 }
104 return GL_TRUE;
105 }
106
107 void r700SetupVTXConstants(GLcontext * ctx,
108 unsigned int nStreamID,
109 void * pAos,
110 unsigned int size, /* number of elements in vector */
111 unsigned int stride,
112 unsigned int count) /* number of vectors in stream */
113 {
114 context_t *context = R700_CONTEXT(ctx);
115 struct radeon_aos * paos = (struct radeon_aos *)pAos;
116 BATCH_LOCALS(&context->radeon);
117
118 unsigned int uSQ_VTX_CONSTANT_WORD0_0;
119 unsigned int uSQ_VTX_CONSTANT_WORD1_0;
120 unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
121 unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
122 unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;
123
124 if (!paos->bo)
125 return;
126
127 if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
128 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
129 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
130 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
131 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
132 else
133 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);
134
135 uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
136 uSQ_VTX_CONSTANT_WORD1_0 = count * (size * 4) - 1;
137
138 SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
139 SETfield(uSQ_VTX_CONSTANT_WORD2_0, stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
140 SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
141 SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(GL_FLOAT, size, NULL),
142 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
143 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
144 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_SCALED,
145 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
146 SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
147
148 SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
149 SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
150 SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
151
152 BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
153
154 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
155 R600_OUT_BATCH((nStreamID + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
156 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
157 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
158 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
159 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
160 R600_OUT_BATCH(0);
161 R600_OUT_BATCH(0);
162 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
163 R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
164 paos->bo,
165 uSQ_VTX_CONSTANT_WORD0_0,
166 RADEON_GEM_DOMAIN_GTT, 0, 0);
167 END_BATCH();
168 COMMIT_BATCH();
169
170 }
171
172 int r700SetupStreams(GLcontext * ctx)
173 {
174 context_t *context = R700_CONTEXT(ctx);
175 BATCH_LOCALS(&context->radeon);
176
177 struct r700_vertex_program *vpc
178 = (struct r700_vertex_program *)ctx->VertexProgram._Current;
179
180 TNLcontext *tnl = TNL_CONTEXT(ctx);
181 struct vertex_buffer *vb = &tnl->vb;
182
183 unsigned int unBit;
184 unsigned int i, j = 0;
185
186 BEGIN_BATCH_NO_AUTOSTATE(6);
187 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
188 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
189 R600_OUT_BATCH(0);
190
191 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
192 R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
193 R600_OUT_BATCH(0);
194 END_BATCH();
195 COMMIT_BATCH();
196
197 for(i=0; i<VERT_ATTRIB_MAX; i++)
198 {
199 unBit = 1 << i;
200 if(vpc->mesa_program.Base.InputsRead & unBit)
201 {
202 rcommon_emit_vector(ctx,
203 &context->radeon.tcl.aos[j],
204 vb->AttribPtr[i]->data,
205 vb->AttribPtr[i]->size,
206 vb->AttribPtr[i]->stride,
207 vb->Count);
208
209 /* currently aos are packed */
210 r700SetupVTXConstants(ctx,
211 i,
212 (void*)(&context->radeon.tcl.aos[j]),
213 (unsigned int)context->radeon.tcl.aos[j].components,
214 (unsigned int)context->radeon.tcl.aos[j].stride * 4,
215 (unsigned int)context->radeon.tcl.aos[j].count);
216 j++;
217 }
218 }
219 context->radeon.tcl.aos_count = j;
220
221 return R600_FALLBACK_NONE;
222 }
223
224 static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
225 {
226 context_t *context = R700_CONTEXT(ctx);
227 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
228 struct radeon_renderbuffer *rrb;
229 BATCH_LOCALS(&context->radeon);
230
231 rrb = radeon_get_depthbuffer(&context->radeon);
232 if (!rrb || !rrb->bo) {
233 fprintf(stderr, "no rrb\n");
234 return;
235 }
236
237 BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
238 R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
239 R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
240 R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
241 R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
242 R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
243 R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
244 R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
245 rrb->bo,
246 r700->DB_DEPTH_BASE.u32All,
247 0, RADEON_GEM_DOMAIN_VRAM, 0);
248 END_BATCH();
249
250 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
251 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
252 BEGIN_BATCH_NO_AUTOSTATE(2);
253 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
254 R600_OUT_BATCH(1 << 0);
255 END_BATCH();
256 }
257
258 COMMIT_BATCH();
259
260 }
261
262 static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
263 {
264 context_t *context = R700_CONTEXT(ctx);
265 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
266 struct radeon_renderbuffer *rrb;
267 BATCH_LOCALS(&context->radeon);
268 int id = 0;
269
270 rrb = radeon_get_colorbuffer(&context->radeon);
271 if (!rrb || !rrb->bo) {
272 fprintf(stderr, "no rrb\n");
273 return;
274 }
275
276 if (id > R700_MAX_RENDER_TARGETS)
277 return;
278
279 if (!r700->render_target[id].enabled)
280 return;
281
282 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
283 R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
284 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
285 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
286 rrb->bo,
287 r700->render_target[id].CB_COLOR0_BASE.u32All,
288 0, RADEON_GEM_DOMAIN_VRAM, 0);
289 END_BATCH();
290
291 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
292 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
293 BEGIN_BATCH_NO_AUTOSTATE(2);
294 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
295 R600_OUT_BATCH((2 << id));
296 END_BATCH();
297 }
298
299 BEGIN_BATCH_NO_AUTOSTATE(18);
300 R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
301 R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
302 R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
303 R600_OUT_BATCH_REGVAL(CB_COLOR0_TILE + (4 * id), r700->render_target[id].CB_COLOR0_TILE.u32All);
304 R600_OUT_BATCH_REGVAL(CB_COLOR0_FRAG + (4 * id), r700->render_target[id].CB_COLOR0_FRAG.u32All);
305 R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
306 END_BATCH();
307
308 COMMIT_BATCH();
309
310 }
311
312 GLboolean r700SendPSState(context_t *context)
313 {
314 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
315 struct radeon_bo * pbo;
316 BATCH_LOCALS(&context->radeon);
317
318 pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));
319
320 if (!pbo)
321 return GL_FALSE;
322
323 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
324
325 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
326 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
327 R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
328 R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
329 pbo,
330 r700->ps.SQ_PGM_START_PS.u32All,
331 RADEON_GEM_DOMAIN_GTT, 0, 0);
332 END_BATCH();
333
334 BEGIN_BATCH_NO_AUTOSTATE(9);
335 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
336 R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
337 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
338 END_BATCH();
339
340 COMMIT_BATCH();
341
342 r700->ps.dirty = GL_FALSE;
343
344 return GL_TRUE;
345 }
346
347 GLboolean r700SendVSState(context_t *context)
348 {
349 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
350 struct radeon_bo * pbo;
351 BATCH_LOCALS(&context->radeon);
352
353 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
354
355 if (!pbo)
356 return GL_FALSE;
357
358 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
359
360 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
361 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_VS, 1);
362 R600_OUT_BATCH(r700->vs.SQ_PGM_START_VS.u32All);
363 R600_OUT_BATCH_RELOC(r700->vs.SQ_PGM_START_VS.u32All,
364 pbo,
365 r700->vs.SQ_PGM_START_VS.u32All,
366 RADEON_GEM_DOMAIN_GTT, 0, 0);
367 END_BATCH();
368
369 BEGIN_BATCH_NO_AUTOSTATE(6);
370 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_VS, r700->vs.SQ_PGM_RESOURCES_VS.u32All);
371 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_VS, r700->vs.SQ_PGM_CF_OFFSET_VS.u32All);
372 END_BATCH();
373
374 COMMIT_BATCH();
375
376 r700->vs.dirty = GL_FALSE;
377
378 return GL_TRUE;
379 }
380
381 GLboolean r700SendFSState(context_t *context)
382 {
383 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
384 struct radeon_bo * pbo;
385 BATCH_LOCALS(&context->radeon);
386
387 /* XXX fixme
388 * R6xx chips require a FS be emitted, even if it's not used.
389 * since we aren't using FS yet, just send the VS address to make
390 * the kernel command checker happy
391 */
392 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
393 r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
394 r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
395 r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
396 /* XXX */
397
398 if (!pbo)
399 return GL_FALSE;
400
401 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
402
403 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
404 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
405 R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
406 R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
407 pbo,
408 r700->fs.SQ_PGM_START_FS.u32All,
409 RADEON_GEM_DOMAIN_GTT, 0, 0);
410 END_BATCH();
411
412 BEGIN_BATCH_NO_AUTOSTATE(6);
413 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
414 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
415 END_BATCH();
416
417 COMMIT_BATCH();
418
419 r700->fs.dirty = GL_FALSE;
420
421 return GL_TRUE;
422 }
423
424 static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
425 {
426 context_t *context = R700_CONTEXT(ctx);
427 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
428 BATCH_LOCALS(&context->radeon);
429 int id = 0;
430
431 if (id > R700_MAX_VIEWPORTS)
432 return;
433
434 if (!r700->viewport[id].enabled)
435 return;
436
437 BEGIN_BATCH_NO_AUTOSTATE(16);
438 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
439 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
440 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
441 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
442 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
443 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
444 R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
445 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
446 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
447 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
448 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
449 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
450 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
451 END_BATCH();
452
453 COMMIT_BATCH();
454
455 }
456
457 static void r700SendSQConfig(GLcontext *ctx, struct radeon_state_atom *atom)
458 {
459 context_t *context = R700_CONTEXT(ctx);
460 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
461 BATCH_LOCALS(&context->radeon);
462
463 BEGIN_BATCH_NO_AUTOSTATE(34);
464 R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
465 R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
466 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
467 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_2.u32All);
468 R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
469 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
470 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);
471
472 R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
473 R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
474 R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
475 R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
476 R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);
477
478 R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
479 R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
480 R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
481 R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
482 R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
483 R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
484 R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
485 R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
486 R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
487 R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
488 END_BATCH();
489
490 COMMIT_BATCH();
491 }
492
493 static void r700SendUCPState(GLcontext *ctx, struct radeon_state_atom *atom)
494 {
495 context_t *context = R700_CONTEXT(ctx);
496 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
497 BATCH_LOCALS(&context->radeon);
498 int i;
499
500 for (i = 0; i < R700_MAX_UCP; i++) {
501 if (r700->ucp[i].enabled) {
502 BEGIN_BATCH_NO_AUTOSTATE(6);
503 R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
504 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
505 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Y.u32All);
506 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Z.u32All);
507 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
508 END_BATCH();
509 COMMIT_BATCH();
510 }
511 }
512 }
513
514 static void r700SendSPIState(GLcontext *ctx, struct radeon_state_atom *atom)
515 {
516 context_t *context = R700_CONTEXT(ctx);
517 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
518 BATCH_LOCALS(&context->radeon);
519 unsigned int ui;
520
521 BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);
522
523 R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
524 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
525 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
526 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
527 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
528 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
529 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
530 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
531 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
532 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
533 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
534 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
535 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
536 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
537 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
538 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
539 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
540 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
541 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
542 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
543 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
544 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
545 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
546 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
547 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
548 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
549 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
550 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
551 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
552 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
553 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
554 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
555 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);
556
557 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
558 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
559 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
560 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
561 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
562 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
563 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
564 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
565 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
566 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
567 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);
568
569 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
570 R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
571 R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
572 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
573 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
574 R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
575 R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
576 R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
577 R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
578 R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);
579
580 R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
581 for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
582 R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
583
584 END_BATCH();
585 COMMIT_BATCH();
586 }
587
588 static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom)
589 {
590 context_t *context = R700_CONTEXT(ctx);
591 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
592 BATCH_LOCALS(&context->radeon);
593
594 BEGIN_BATCH_NO_AUTOSTATE(41);
595
596 R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
597 R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
598 R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
599 R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
600 R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);
601
602 R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
603 R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
604 R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
605 R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
606 R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
607 R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
608 R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
609 R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
610 R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
611 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
612 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
613 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
614 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
615 R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);
616
617 R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
618 R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
619 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
620 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);
621
622 R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
623 R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
624 R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
625 R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);
626
627 R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);
628
629 END_BATCH();
630 COMMIT_BATCH();
631 }
632
633 static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom)
634 {
635 context_t *context = R700_CONTEXT(ctx);
636 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
637 BATCH_LOCALS(&context->radeon);
638
639 BEGIN_BATCH_NO_AUTOSTATE(9);
640 R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
641 R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
642 R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
643 END_BATCH();
644 COMMIT_BATCH();
645 }
646
647 static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
648 {
649 context_t *context = R700_CONTEXT(ctx);
650 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
651 BATCH_LOCALS(&context->radeon);
652
653 BEGIN_BATCH_NO_AUTOSTATE(27);
654 R600_OUT_BATCH_REGVAL(DB_HTILE_DATA_BASE, r700->DB_HTILE_DATA_BASE.u32All);
655
656 R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
657 R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
658 R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);
659
660 R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
661 R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
662 R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
663
664 R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
665 R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);
666
667 R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
668 R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
669 R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);
670
671 R600_OUT_BATCH_REGVAL(DB_HTILE_SURFACE, r700->DB_HTILE_SURFACE.u32All);
672 R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);
673
674 END_BATCH();
675 COMMIT_BATCH();
676 }
677
678 static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
679 {
680 context_t *context = R700_CONTEXT(ctx);
681 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
682 BATCH_LOCALS(&context->radeon);
683 unsigned int ui;
684
685 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
686 BEGIN_BATCH_NO_AUTOSTATE(14);
687 R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
688 R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
689 R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
690 R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
691 R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
692 R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
693 R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
694 R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
695 R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
696 /* R600 does not have per-MRT blend */
697 R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
698 END_BATCH();
699 }
700
701 BEGIN_BATCH_NO_AUTOSTATE(22);
702 R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
703 R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
704 R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
705
706 R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
707 R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
708 R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
709 R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
710 R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
711
712 R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
713 R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
714
715 R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
716 R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
717 R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
718 R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
719 R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
720 END_BATCH();
721
722 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
723 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
724 if (r700->render_target[ui].enabled) {
725 BEGIN_BATCH_NO_AUTOSTATE(3);
726 R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
727 r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
728 END_BATCH();
729 }
730 }
731 }
732
733 COMMIT_BATCH();
734
735 }
736
737 static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom)
738 {
739 context_t *context = R700_CONTEXT(ctx);
740 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
741 BATCH_LOCALS(&context->radeon);
742
743 BEGIN_BATCH_NO_AUTOSTATE(19);
744 R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
745
746 R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
747 R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
748 R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
749 R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
750 R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
751
752 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
753 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
754 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
755
756 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
757 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
758 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
759 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
760 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
761
762 END_BATCH();
763 COMMIT_BATCH();
764
765 }
766
767 static void r700SendCLState(GLcontext *ctx, struct radeon_state_atom *atom)
768 {
769 context_t *context = R700_CONTEXT(ctx);
770 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
771 BATCH_LOCALS(&context->radeon);
772
773 BEGIN_BATCH_NO_AUTOSTATE(18);
774 R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All);
775 R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All);
776 R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All);
777 R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All);
778
779 R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
780 R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
781 R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
782 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
783 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
784
785 END_BATCH();
786 COMMIT_BATCH();
787 }
788
789 // XXX need to split this up
790 static void r700SendSCState(GLcontext *ctx, struct radeon_state_atom *atom)
791 {
792 context_t *context = R700_CONTEXT(ctx);
793 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
794 BATCH_LOCALS(&context->radeon);
795
796 BEGIN_BATCH_NO_AUTOSTATE(47);
797 R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
798 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
799 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);
800
801 R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 13);
802 R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
803 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
804 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
805 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
806 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
807 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
808 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
809 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
810 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
811 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
812 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
813 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);
814 R600_OUT_BATCH(r700->PA_SC_EDGERULE.u32All);
815
816 R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
817 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
818 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
819
820 R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All);
821 R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All);
822 R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All);
823 R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All);
824 R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
825 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
826 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
827 R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
828
829 END_BATCH();
830 COMMIT_BATCH();
831 }
832
833 static int check_always(GLcontext *ctx, struct radeon_state_atom *atom)
834 {
835 return atom->cmd_size;
836 }
837
838 #define ALLOC_STATE( ATOM, SZ, EMIT ) \
839 do { \
840 context->atoms.ATOM.cmd_size = (SZ); \
841 context->atoms.ATOM.cmd = NULL; \
842 context->atoms.ATOM.name = #ATOM; \
843 context->atoms.ATOM.idx = 0; \
844 context->atoms.ATOM.check = check_always; \
845 context->atoms.ATOM.dirty = GL_FALSE; \
846 context->atoms.ATOM.emit = (EMIT); \
847 context->radeon.hw.max_state_size += (SZ); \
848 insert_at_tail(&context->radeon.hw.atomlist, &context->atoms.ATOM); \
849 } while (0)
850
851 void r600InitAtoms(context_t *context)
852 {
853 /* FIXME: rough estimate for "large" const and shader state */
854 context->radeon.hw.max_state_size = 7500;
855
856 /* Setup the atom linked list */
857 make_empty_list(&context->radeon.hw.atomlist);
858 context->radeon.hw.atomlist.name = "atom-list";
859
860 ALLOC_STATE(sq, 34, r700SendSQConfig);
861 ALLOC_STATE(db, 27, r700SendDBState);
862 ALLOC_STATE(db_target, 19, r700SendDepthTargetState);
863 ALLOC_STATE(sc, 47, r700SendSCState);
864 ALLOC_STATE(cl, 18, r700SendCLState);
865 ALLOC_STATE(ucp, 36, r700SendUCPState);
866 ALLOC_STATE(su, 19, r700SendSUState);
867 ALLOC_STATE(cb, 39, r700SendCBState);
868 ALLOC_STATE(cb_target, 32, r700SendRenderTargetState);
869 ALLOC_STATE(sx, 9, r700SendSXState);
870 ALLOC_STATE(vgt, 41, r700SendVGTState);
871 ALLOC_STATE(spi, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState);
872 ALLOC_STATE(vpt, 16, r700SendViewportState);
873
874 context->radeon.hw.is_dirty = GL_TRUE;
875 context->radeon.hw.all_dirty = GL_TRUE;
876 }