e67e544d5384138ba12abe86d7bddaec67490015
[mesa.git] / src / mesa / drivers / dri / r600 / r700_chip.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/imports.h"
29 #include "main/glheader.h"
30
31 #include "r600_context.h"
32 #include "r600_cmdbuf.h"
33
34 #include "r700_state.h"
35 #include "r600_tex.h"
36 #include "r700_oglprog.h"
37 #include "r700_fragprog.h"
38 #include "r700_vertprog.h"
39 #include "r700_ioctl.h"
40
41 #include "radeon_mipmap_tree.h"
42
43 GLboolean r700SendTextureState(context_t *context)
44 {
45 unsigned int i;
46 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
47 struct radeon_bo *bo = NULL;
48 BATCH_LOCALS(&context->radeon);
49
50 for (i=0; i<R700_TEXTURE_NUMBERUNITS; i++) {
51 radeonTexObj *t = r700->textures[i];
52 if (t) {
53 if (!t->image_override)
54 bo = t->mt->bo;
55 else
56 bo = t->bo;
57 if (bo) {
58
59 r700SyncSurf(context, bo,
60 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
61 0, TC_ACTION_ENA_bit);
62
63 BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
64 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
65 R600_OUT_BATCH(i * 7);
66 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
67 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
68 R600_OUT_BATCH(0); /* r700->textures[i]->SQ_TEX_RESOURCE2 */
69 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
70 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
71 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
72 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
73 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
74 bo,
75 0,
76 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
77 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE3,
78 bo,
79 r700->textures[i]->SQ_TEX_RESOURCE3,
80 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
81 END_BATCH();
82
83 BEGIN_BATCH_NO_AUTOSTATE(5);
84 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, 3));
85 R600_OUT_BATCH(i * 3);
86 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER0);
87 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER1);
88 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER2);
89 END_BATCH();
90
91 BEGIN_BATCH_NO_AUTOSTATE(2 + 4);
92 R600_OUT_BATCH_REGSEQ((TD_PS_SAMPLER0_BORDER_RED + (i * 16)), 4);
93 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_RED);
94 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_GREEN);
95 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_BLUE);
96 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_ALPHA);
97 END_BATCH();
98
99 COMMIT_BATCH();
100 }
101 }
102 }
103 return GL_TRUE;
104 }
105
106 void r700SetupVTXConstants(GLcontext * ctx,
107 unsigned int nStreamID,
108 void * pAos,
109 unsigned int size, /* number of elements in vector */
110 unsigned int stride,
111 unsigned int count) /* number of vectors in stream */
112 {
113 context_t *context = R700_CONTEXT(ctx);
114 struct radeon_aos * paos = (struct radeon_aos *)pAos;
115 BATCH_LOCALS(&context->radeon);
116
117 unsigned int uSQ_VTX_CONSTANT_WORD0_0;
118 unsigned int uSQ_VTX_CONSTANT_WORD1_0;
119 unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
120 unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
121 unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;
122
123 if (!paos->bo)
124 return;
125
126 if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
127 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
128 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
129 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
130 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
131 else
132 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);
133
134 uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
135 uSQ_VTX_CONSTANT_WORD1_0 = count * (size * 4) - 1;
136
137 SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
138 SETfield(uSQ_VTX_CONSTANT_WORD2_0, stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
139 SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
140 SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(GL_FLOAT, size, NULL),
141 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
142 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
143 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_SCALED,
144 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
145 SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
146
147 SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
148 SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
149 SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
150
151 BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
152
153 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
154 R600_OUT_BATCH((nStreamID + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
155 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
156 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
157 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
158 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
159 R600_OUT_BATCH(0);
160 R600_OUT_BATCH(0);
161 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
162 R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
163 paos->bo,
164 uSQ_VTX_CONSTANT_WORD0_0,
165 RADEON_GEM_DOMAIN_GTT, 0, 0);
166 END_BATCH();
167 COMMIT_BATCH();
168
169 }
170
171 int r700SetupStreams(GLcontext * ctx)
172 {
173 context_t *context = R700_CONTEXT(ctx);
174 BATCH_LOCALS(&context->radeon);
175
176 struct r700_vertex_program *vpc
177 = (struct r700_vertex_program *)ctx->VertexProgram._Current;
178
179 TNLcontext *tnl = TNL_CONTEXT(ctx);
180 struct vertex_buffer *vb = &tnl->vb;
181
182 unsigned int unBit;
183 unsigned int i, j = 0;
184
185 BEGIN_BATCH_NO_AUTOSTATE(6);
186 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
187 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
188 R600_OUT_BATCH(0);
189
190 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
191 R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
192 R600_OUT_BATCH(0);
193 END_BATCH();
194 COMMIT_BATCH();
195
196 for(i=0; i<VERT_ATTRIB_MAX; i++)
197 {
198 unBit = 1 << i;
199 if(vpc->mesa_program.Base.InputsRead & unBit)
200 {
201 rcommon_emit_vector(ctx,
202 &context->radeon.tcl.aos[j],
203 vb->AttribPtr[i]->data,
204 vb->AttribPtr[i]->size,
205 vb->AttribPtr[i]->stride,
206 vb->Count);
207
208 /* currently aos are packed */
209 r700SetupVTXConstants(ctx,
210 i,
211 (void*)(&context->radeon.tcl.aos[j]),
212 (unsigned int)context->radeon.tcl.aos[j].components,
213 (unsigned int)context->radeon.tcl.aos[j].stride * 4,
214 (unsigned int)context->radeon.tcl.aos[j].count);
215 j++;
216 }
217 }
218 context->radeon.tcl.aos_count = j;
219
220 return R600_FALLBACK_NONE;
221 }
222
223 GLboolean r700SendDepthTargetState(context_t *context)
224 {
225 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
226 struct radeon_renderbuffer *rrb;
227 BATCH_LOCALS(&context->radeon);
228
229 rrb = radeon_get_depthbuffer(&context->radeon);
230 if (!rrb || !rrb->bo) {
231 fprintf(stderr, "no rrb\n");
232 return GL_FALSE;
233 }
234
235 BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
236 R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
237 R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
238 R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
239 R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
240 R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
241 R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
242 R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
243 rrb->bo,
244 r700->DB_DEPTH_BASE.u32All,
245 0, RADEON_GEM_DOMAIN_VRAM, 0);
246 END_BATCH();
247
248 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
249 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
250 BEGIN_BATCH_NO_AUTOSTATE(2);
251 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
252 R600_OUT_BATCH(1 << 0);
253 END_BATCH();
254 }
255
256 COMMIT_BATCH();
257
258 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
259 DB_ACTION_ENA_bit | DB_DEST_BASE_ENA_bit);
260
261 return GL_TRUE;
262 }
263
264 GLboolean r700SendRenderTargetState(context_t *context, int id)
265 {
266 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
267 struct radeon_renderbuffer *rrb;
268 BATCH_LOCALS(&context->radeon);
269
270 rrb = radeon_get_colorbuffer(&context->radeon);
271 if (!rrb || !rrb->bo) {
272 fprintf(stderr, "no rrb\n");
273 return GL_FALSE;
274 }
275
276 if (id > R700_MAX_RENDER_TARGETS)
277 return GL_FALSE;
278
279 if (!r700->render_target[id].enabled)
280 return GL_FALSE;
281
282 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
283 R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
284 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
285 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
286 rrb->bo,
287 r700->render_target[id].CB_COLOR0_BASE.u32All,
288 0, RADEON_GEM_DOMAIN_VRAM, 0);
289 END_BATCH();
290
291 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
292 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
293 BEGIN_BATCH_NO_AUTOSTATE(2);
294 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
295 R600_OUT_BATCH((2 << id));
296 END_BATCH();
297 }
298
299 BEGIN_BATCH_NO_AUTOSTATE(18);
300 R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
301 R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
302 R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
303 R600_OUT_BATCH_REGVAL(CB_COLOR0_TILE + (4 * id), r700->render_target[id].CB_COLOR0_TILE.u32All);
304 R600_OUT_BATCH_REGVAL(CB_COLOR0_FRAG + (4 * id), r700->render_target[id].CB_COLOR0_FRAG.u32All);
305 R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
306 END_BATCH();
307
308 COMMIT_BATCH();
309
310 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
311 CB_ACTION_ENA_bit | (1 << (id + 6)));
312
313 return GL_TRUE;
314 }
315
316 GLboolean r700SendPSState(context_t *context)
317 {
318 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
319 struct radeon_bo * pbo;
320 BATCH_LOCALS(&context->radeon);
321
322 pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));
323
324 if (!pbo)
325 return GL_FALSE;
326
327 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
328
329 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
330 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
331 R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
332 R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
333 pbo,
334 r700->ps.SQ_PGM_START_PS.u32All,
335 RADEON_GEM_DOMAIN_GTT, 0, 0);
336 END_BATCH();
337
338 BEGIN_BATCH_NO_AUTOSTATE(9);
339 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
340 R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
341 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
342 END_BATCH();
343
344 COMMIT_BATCH();
345
346 return GL_TRUE;
347 }
348
349 GLboolean r700SendVSState(context_t *context)
350 {
351 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
352 struct radeon_bo * pbo;
353 BATCH_LOCALS(&context->radeon);
354
355 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
356
357 if (!pbo)
358 return GL_FALSE;
359
360 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
361
362 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
363 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_VS, 1);
364 R600_OUT_BATCH(r700->vs.SQ_PGM_START_VS.u32All);
365 R600_OUT_BATCH_RELOC(r700->vs.SQ_PGM_START_VS.u32All,
366 pbo,
367 r700->vs.SQ_PGM_START_VS.u32All,
368 RADEON_GEM_DOMAIN_GTT, 0, 0);
369 END_BATCH();
370
371 BEGIN_BATCH_NO_AUTOSTATE(6);
372 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_VS, r700->vs.SQ_PGM_RESOURCES_VS.u32All);
373 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_VS, r700->vs.SQ_PGM_CF_OFFSET_VS.u32All);
374 END_BATCH();
375
376 COMMIT_BATCH();
377
378 return GL_TRUE;
379 }
380
381 GLboolean r700SendFSState(context_t *context)
382 {
383 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
384 struct radeon_bo * pbo;
385 BATCH_LOCALS(&context->radeon);
386
387 /* XXX fixme
388 * R6xx chips require a FS be emitted, even if it's not used.
389 * since we aren't using FS yet, just send the VS address to make
390 * the kernel command checker happy
391 */
392 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
393 r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
394 r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
395 r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
396 /* XXX */
397
398 if (!pbo)
399 return GL_FALSE;
400
401 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
402
403 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
404 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
405 R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
406 R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
407 pbo,
408 r700->fs.SQ_PGM_START_FS.u32All,
409 RADEON_GEM_DOMAIN_GTT, 0, 0);
410 END_BATCH();
411
412 BEGIN_BATCH_NO_AUTOSTATE(6);
413 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
414 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
415 END_BATCH();
416
417 COMMIT_BATCH();
418
419 return GL_TRUE;
420 }
421
422 GLboolean r700SendViewportState(context_t *context, int id)
423 {
424 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
425 BATCH_LOCALS(&context->radeon);
426
427 if (id > R700_MAX_VIEWPORTS)
428 return GL_FALSE;
429
430 if (!r700->viewport[id].enabled)
431 return GL_FALSE;
432
433 BEGIN_BATCH_NO_AUTOSTATE(16);
434 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
435 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
436 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
437 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
438 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
439 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
440 R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
441 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
442 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
443 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
444 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
445 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
446 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
447 END_BATCH();
448
449 COMMIT_BATCH();
450
451 return GL_TRUE;
452 }
453
454 GLboolean r700SendSQConfig(context_t *context)
455 {
456 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
457 BATCH_LOCALS(&context->radeon);
458
459 BEGIN_BATCH_NO_AUTOSTATE(34);
460 R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
461 R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
462 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
463 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_2.u32All);
464 R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
465 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
466 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);
467
468 R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
469 R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
470 R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
471 R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
472 R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);
473
474 R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
475 R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
476 R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
477 R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
478 R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
479 R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
480 R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
481 R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
482 R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
483 R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
484 END_BATCH();
485
486 COMMIT_BATCH();
487
488 return GL_TRUE;
489 }
490
491 GLboolean r700SendUCPState(context_t *context)
492 {
493 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
494 BATCH_LOCALS(&context->radeon);
495 int i;
496
497 for (i = 0; i < R700_MAX_UCP; i++) {
498 if (r700->ucp[i].enabled) {
499 BEGIN_BATCH_NO_AUTOSTATE(6);
500 R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
501 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
502 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Y.u32All);
503 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Z.u32All);
504 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
505 END_BATCH();
506 COMMIT_BATCH();
507 }
508 }
509
510 return GL_TRUE;
511 }
512
513 GLboolean r700SendSPIState(context_t *context)
514 {
515 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
516 BATCH_LOCALS(&context->radeon);
517 unsigned int ui;
518
519 BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);
520
521 R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
522 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
523 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
524 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
525 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
526 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
527 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
528 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
529 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
530 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
531 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
532 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
533 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
534 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
535 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
536 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
537 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
538 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
539 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
540 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
541 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
542 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
543 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
544 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
545 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
546 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
547 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
548 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
549 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
550 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
551 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
552 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
553 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);
554
555 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
556 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
557 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
558 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
559 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
560 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
561 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
562 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
563 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
564 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
565 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);
566
567 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
568 R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
569 R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
570 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
571 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
572 R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
573 R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
574 R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
575 R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
576 R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);
577
578 R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
579 for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
580 R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
581
582 END_BATCH();
583 COMMIT_BATCH();
584
585 return GL_TRUE;
586 }
587
588 GLboolean r700SendVGTState(context_t *context)
589 {
590 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
591 BATCH_LOCALS(&context->radeon);
592
593 BEGIN_BATCH_NO_AUTOSTATE(41);
594
595 R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
596 R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
597 R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
598 R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
599 R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);
600
601 R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
602 R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
603 R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
604 R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
605 R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
606 R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
607 R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
608 R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
609 R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
610 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
611 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
612 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
613 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
614 R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);
615
616 R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
617 R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
618 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
619 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);
620
621 R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
622 R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
623 R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
624 R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);
625
626 R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);
627
628 END_BATCH();
629 COMMIT_BATCH();
630
631 return GL_TRUE;
632 }
633
634 GLboolean r700SendSXState(context_t *context)
635 {
636 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
637 BATCH_LOCALS(&context->radeon);
638
639 BEGIN_BATCH_NO_AUTOSTATE(9);
640 R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
641 R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
642 R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
643 END_BATCH();
644 COMMIT_BATCH();
645
646 return GL_TRUE;
647 }
648
649 GLboolean r700SendDBState(context_t *context)
650 {
651 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
652 BATCH_LOCALS(&context->radeon);
653
654 BEGIN_BATCH_NO_AUTOSTATE(27);
655 R600_OUT_BATCH_REGVAL(DB_HTILE_DATA_BASE, r700->DB_HTILE_DATA_BASE.u32All);
656
657 R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
658 R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
659 R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);
660
661 R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
662 R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
663 R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
664
665 R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
666 R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);
667
668 R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
669 R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
670 R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);
671
672 R600_OUT_BATCH_REGVAL(DB_HTILE_SURFACE, r700->DB_HTILE_SURFACE.u32All);
673 R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);
674
675 END_BATCH();
676 COMMIT_BATCH();
677
678 return GL_TRUE;
679 }
680
681 GLboolean r700SendCBState(context_t *context)
682 {
683 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
684 BATCH_LOCALS(&context->radeon);
685 unsigned int ui;
686
687 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
688 BEGIN_BATCH_NO_AUTOSTATE(14);
689 R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
690 R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
691 R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
692 R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
693 R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
694 R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
695 R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
696 R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
697 R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
698 /* R600 does not have per-MRT blend */
699 R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
700 END_BATCH();
701 }
702
703 BEGIN_BATCH_NO_AUTOSTATE(22);
704 R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
705 R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
706 R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
707
708 R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
709 R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
710 R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
711 R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
712 R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
713
714 R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
715 R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
716
717 R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
718 R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
719 R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
720 R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
721 R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
722 END_BATCH();
723
724 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
725 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
726 if (r700->render_target[ui].enabled) {
727 BEGIN_BATCH_NO_AUTOSTATE(3);
728 R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
729 r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
730 END_BATCH();
731 }
732 }
733 }
734
735 COMMIT_BATCH();
736
737 return GL_TRUE;
738 }
739
740 GLboolean r700SendSUState(context_t *context)
741 {
742 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
743 BATCH_LOCALS(&context->radeon);
744
745 BEGIN_BATCH_NO_AUTOSTATE(19);
746 R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
747
748 R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
749 R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
750 R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
751 R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
752 R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
753
754 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
755 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
756 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
757
758 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
759 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
760 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
761 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
762 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
763
764 END_BATCH();
765 COMMIT_BATCH();
766
767 return GL_TRUE;
768 }
769
770 GLboolean r700SendCLState(context_t *context)
771 {
772 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
773 BATCH_LOCALS(&context->radeon);
774
775 BEGIN_BATCH_NO_AUTOSTATE(18);
776 R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All);
777 R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All);
778 R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All);
779 R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All);
780
781 R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
782 R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
783 R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
784 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
785 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
786
787 END_BATCH();
788 COMMIT_BATCH();
789
790 return GL_TRUE;
791 }
792
793 // XXX need to split this up
794 GLboolean r700SendSCState(context_t *context)
795 {
796 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
797 BATCH_LOCALS(&context->radeon);
798
799 BEGIN_BATCH_NO_AUTOSTATE(47);
800 R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
801 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
802 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);
803
804 R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 13);
805 R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
806 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
807 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
808 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
809 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
810 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
811 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
812 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
813 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
814 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
815 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
816 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);
817 R600_OUT_BATCH(r700->PA_SC_EDGERULE.u32All);
818
819 R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
820 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
821 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
822
823 R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All);
824 R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All);
825 R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All);
826 R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All);
827 R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
828 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
829 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
830 R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
831
832 END_BATCH();
833 COMMIT_BATCH();
834
835 return GL_TRUE;
836 }