Merge branch 'master' of ssh://git.freedesktop.org/git/mesa/mesa into r600_state_predict
[mesa.git] / src / mesa / drivers / dri / r600 / r700_chip.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/imports.h"
29 #include "main/glheader.h"
30 #include "main/simple_list.h"
31
32 #include "r600_context.h"
33 #include "r600_cmdbuf.h"
34
35 #include "r700_state.h"
36 #include "r600_tex.h"
37 #include "r700_oglprog.h"
38 #include "r700_fragprog.h"
39 #include "r700_vertprog.h"
40 #include "r700_ioctl.h"
41
42 #include "radeon_mipmap_tree.h"
43
44 static void r700SendTexState(GLcontext *ctx, struct radeon_state_atom *atom)
45 {
46 context_t *context = R700_CONTEXT(ctx);
47 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
48 struct radeon_bo *bo = NULL;
49 unsigned int i;
50 BATCH_LOCALS(&context->radeon);
51
52 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
53 radeonTexObj *t = r700->textures[i];
54 if (t) {
55 if (!t->image_override)
56 bo = t->mt->bo;
57 else
58 bo = t->bo;
59 if (bo) {
60
61 r700SyncSurf(context, bo,
62 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
63 0, TC_ACTION_ENA_bit);
64
65 BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
66 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
67 R600_OUT_BATCH(i * 7);
68 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
69 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
70 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE2);
71 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
72 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
73 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
74 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
75 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
76 bo,
77 0,
78 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
79 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE3,
80 bo,
81 r700->textures[i]->SQ_TEX_RESOURCE3,
82 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
83 END_BATCH();
84 COMMIT_BATCH();
85 }
86 }
87 }
88 }
89
90 static void r700SendTexSamplerState(GLcontext *ctx, struct radeon_state_atom *atom)
91 {
92 context_t *context = R700_CONTEXT(ctx);
93 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
94 unsigned int i;
95 BATCH_LOCALS(&context->radeon);
96
97 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
98 radeonTexObj *t = r700->textures[i];
99 if (t) {
100 BEGIN_BATCH_NO_AUTOSTATE(5);
101 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, 3));
102 R600_OUT_BATCH(i * 3);
103 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER0);
104 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER1);
105 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER2);
106 END_BATCH();
107 COMMIT_BATCH();
108 }
109 }
110 }
111
112 static void r700SendTexBorderColorState(GLcontext *ctx, struct radeon_state_atom *atom)
113 {
114 context_t *context = R700_CONTEXT(ctx);
115 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
116 unsigned int i;
117 BATCH_LOCALS(&context->radeon);
118
119 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
120 radeonTexObj *t = r700->textures[i];
121 if (t) {
122 BEGIN_BATCH_NO_AUTOSTATE(2 + 4);
123 R600_OUT_BATCH_REGSEQ((TD_PS_SAMPLER0_BORDER_RED + (i * 16)), 4);
124 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_RED);
125 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_GREEN);
126 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_BLUE);
127 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_ALPHA);
128 END_BATCH();
129 COMMIT_BATCH();
130 }
131 }
132 }
133
134 static void r700SetupVTXConstants(GLcontext * ctx,
135 unsigned int nStreamID,
136 void * pAos,
137 unsigned int size, /* number of elements in vector */
138 unsigned int stride,
139 unsigned int count) /* number of vectors in stream */
140 {
141 context_t *context = R700_CONTEXT(ctx);
142 struct radeon_aos * paos = (struct radeon_aos *)pAos;
143 BATCH_LOCALS(&context->radeon);
144
145 unsigned int uSQ_VTX_CONSTANT_WORD0_0;
146 unsigned int uSQ_VTX_CONSTANT_WORD1_0;
147 unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
148 unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
149 unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;
150
151 if (!paos->bo)
152 return;
153
154 if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
155 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
156 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
157 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS880) ||
158 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
159 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
160 else
161 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);
162
163 uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
164 uSQ_VTX_CONSTANT_WORD1_0 = count * (size * 4) - 1;
165
166 SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
167 SETfield(uSQ_VTX_CONSTANT_WORD2_0, stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
168 SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
169 SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(GL_FLOAT, size, NULL),
170 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
171 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
172 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_SCALED,
173 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
174 SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
175
176 SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
177 SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
178 SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
179
180 BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
181
182 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
183 R600_OUT_BATCH((nStreamID + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
184 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
185 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
186 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
187 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
188 R600_OUT_BATCH(0);
189 R600_OUT_BATCH(0);
190 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
191 R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
192 paos->bo,
193 uSQ_VTX_CONSTANT_WORD0_0,
194 RADEON_GEM_DOMAIN_GTT, 0, 0);
195 END_BATCH();
196 COMMIT_BATCH();
197
198 }
199
200 void r700SetupStreams(GLcontext *ctx)
201 {
202 context_t *context = R700_CONTEXT(ctx);
203 struct r700_vertex_program *vpc
204 = (struct r700_vertex_program *)ctx->VertexProgram._Current;
205 TNLcontext *tnl = TNL_CONTEXT(ctx);
206 struct vertex_buffer *vb = &tnl->vb;
207 unsigned int i, j = 0;
208
209 R600_STATECHANGE(context, vtx);
210
211 for(i=0; i<VERT_ATTRIB_MAX; i++) {
212 if(vpc->mesa_program.Base.InputsRead & (1 << i)) {
213 rcommon_emit_vector(ctx,
214 &context->radeon.tcl.aos[j],
215 vb->AttribPtr[i]->data,
216 vb->AttribPtr[i]->size,
217 vb->AttribPtr[i]->stride,
218 vb->Count);
219 j++;
220 }
221 }
222 context->radeon.tcl.aos_count = j;
223 }
224
225 static void r700SendVTXState(GLcontext *ctx, struct radeon_state_atom *atom)
226 {
227 context_t *context = R700_CONTEXT(ctx);
228 struct r700_vertex_program *vpc
229 = (struct r700_vertex_program *)ctx->VertexProgram._Current;
230 unsigned int i, j = 0;
231 BATCH_LOCALS(&context->radeon);
232
233 if (context->radeon.tcl.aos_count == 0)
234 return;
235
236 BEGIN_BATCH_NO_AUTOSTATE(6);
237 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
238 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
239 R600_OUT_BATCH(0);
240
241 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
242 R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
243 R600_OUT_BATCH(0);
244 END_BATCH();
245 COMMIT_BATCH();
246
247 for(i=0; i<VERT_ATTRIB_MAX; i++) {
248 if(vpc->mesa_program.Base.InputsRead & (1 << i)) {
249 /* currently aos are packed */
250 r700SetupVTXConstants(ctx,
251 i,
252 (void*)(&context->radeon.tcl.aos[j]),
253 (unsigned int)context->radeon.tcl.aos[j].components,
254 (unsigned int)context->radeon.tcl.aos[j].stride * 4,
255 (unsigned int)context->radeon.tcl.aos[j].count);
256 j++;
257 }
258 }
259 }
260
261 static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
262 {
263 context_t *context = R700_CONTEXT(ctx);
264 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
265 struct radeon_renderbuffer *rrb;
266 BATCH_LOCALS(&context->radeon);
267
268 rrb = radeon_get_depthbuffer(&context->radeon);
269 if (!rrb || !rrb->bo) {
270 fprintf(stderr, "no rrb\n");
271 return;
272 }
273
274 BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
275 R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
276 R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
277 R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
278 R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
279 R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
280 R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
281 R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
282 rrb->bo,
283 r700->DB_DEPTH_BASE.u32All,
284 0, RADEON_GEM_DOMAIN_VRAM, 0);
285 END_BATCH();
286
287 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
288 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
289 BEGIN_BATCH_NO_AUTOSTATE(2);
290 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
291 R600_OUT_BATCH(1 << 0);
292 END_BATCH();
293 }
294
295 COMMIT_BATCH();
296
297 }
298
299 static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
300 {
301 context_t *context = R700_CONTEXT(ctx);
302 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
303 struct radeon_renderbuffer *rrb;
304 BATCH_LOCALS(&context->radeon);
305 int id = 0;
306
307 rrb = radeon_get_colorbuffer(&context->radeon);
308 if (!rrb || !rrb->bo) {
309 fprintf(stderr, "no rrb\n");
310 return;
311 }
312
313 if (id > R700_MAX_RENDER_TARGETS)
314 return;
315
316 if (!r700->render_target[id].enabled)
317 return;
318
319 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
320 R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
321 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
322 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
323 rrb->bo,
324 r700->render_target[id].CB_COLOR0_BASE.u32All,
325 0, RADEON_GEM_DOMAIN_VRAM, 0);
326 END_BATCH();
327
328 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
329 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
330 BEGIN_BATCH_NO_AUTOSTATE(2);
331 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
332 R600_OUT_BATCH((2 << id));
333 END_BATCH();
334 }
335
336 BEGIN_BATCH_NO_AUTOSTATE(18);
337 R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
338 R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
339 R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
340 R600_OUT_BATCH_REGVAL(CB_COLOR0_TILE + (4 * id), r700->render_target[id].CB_COLOR0_TILE.u32All);
341 R600_OUT_BATCH_REGVAL(CB_COLOR0_FRAG + (4 * id), r700->render_target[id].CB_COLOR0_FRAG.u32All);
342 R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
343 END_BATCH();
344
345 COMMIT_BATCH();
346
347 }
348
349 static void r700SendPSState(GLcontext *ctx, struct radeon_state_atom *atom)
350 {
351 context_t *context = R700_CONTEXT(ctx);
352 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
353 struct radeon_bo * pbo;
354 BATCH_LOCALS(&context->radeon);
355
356 pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));
357
358 if (!pbo)
359 return;
360
361 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
362
363 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
364 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
365 R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
366 R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
367 pbo,
368 r700->ps.SQ_PGM_START_PS.u32All,
369 RADEON_GEM_DOMAIN_GTT, 0, 0);
370 END_BATCH();
371
372 BEGIN_BATCH_NO_AUTOSTATE(9);
373 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
374 R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
375 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
376 END_BATCH();
377
378 COMMIT_BATCH();
379
380 }
381
382 static void r700SendVSState(GLcontext *ctx, struct radeon_state_atom *atom)
383 {
384 context_t *context = R700_CONTEXT(ctx);
385 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
386 struct radeon_bo * pbo;
387 BATCH_LOCALS(&context->radeon);
388
389 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
390
391 if (!pbo)
392 return;
393
394 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
395
396 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
397 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_VS, 1);
398 R600_OUT_BATCH(r700->vs.SQ_PGM_START_VS.u32All);
399 R600_OUT_BATCH_RELOC(r700->vs.SQ_PGM_START_VS.u32All,
400 pbo,
401 r700->vs.SQ_PGM_START_VS.u32All,
402 RADEON_GEM_DOMAIN_GTT, 0, 0);
403 END_BATCH();
404
405 BEGIN_BATCH_NO_AUTOSTATE(6);
406 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_VS, r700->vs.SQ_PGM_RESOURCES_VS.u32All);
407 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_VS, r700->vs.SQ_PGM_CF_OFFSET_VS.u32All);
408 END_BATCH();
409
410 COMMIT_BATCH();
411 }
412
413 static void r700SendFSState(GLcontext *ctx, struct radeon_state_atom *atom)
414 {
415 context_t *context = R700_CONTEXT(ctx);
416 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
417 struct radeon_bo * pbo;
418 BATCH_LOCALS(&context->radeon);
419
420 /* XXX fixme
421 * R6xx chips require a FS be emitted, even if it's not used.
422 * since we aren't using FS yet, just send the VS address to make
423 * the kernel command checker happy
424 */
425 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
426 r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
427 r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
428 r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
429 /* XXX */
430
431 if (!pbo)
432 return;
433
434 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
435
436 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
437 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
438 R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
439 R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
440 pbo,
441 r700->fs.SQ_PGM_START_FS.u32All,
442 RADEON_GEM_DOMAIN_GTT, 0, 0);
443 END_BATCH();
444
445 BEGIN_BATCH_NO_AUTOSTATE(6);
446 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
447 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
448 END_BATCH();
449
450 COMMIT_BATCH();
451
452 }
453
454 static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
455 {
456 context_t *context = R700_CONTEXT(ctx);
457 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
458 BATCH_LOCALS(&context->radeon);
459 int id = 0;
460
461 if (id > R700_MAX_VIEWPORTS)
462 return;
463
464 if (!r700->viewport[id].enabled)
465 return;
466
467 BEGIN_BATCH_NO_AUTOSTATE(16);
468 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
469 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
470 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
471 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
472 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
473 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
474 R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
475 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
476 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
477 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
478 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
479 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
480 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
481 END_BATCH();
482
483 COMMIT_BATCH();
484
485 }
486
487 static void r700SendSQConfig(GLcontext *ctx, struct radeon_state_atom *atom)
488 {
489 context_t *context = R700_CONTEXT(ctx);
490 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
491 BATCH_LOCALS(&context->radeon);
492
493 BEGIN_BATCH_NO_AUTOSTATE(34);
494 R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
495 R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
496 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
497 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_2.u32All);
498 R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
499 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
500 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);
501
502 R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
503 R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
504 R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
505 R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
506 R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);
507
508 R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
509 R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
510 R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
511 R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
512 R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
513 R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
514 R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
515 R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
516 R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
517 R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
518 END_BATCH();
519
520 COMMIT_BATCH();
521 }
522
523 static void r700SendUCPState(GLcontext *ctx, struct radeon_state_atom *atom)
524 {
525 context_t *context = R700_CONTEXT(ctx);
526 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
527 BATCH_LOCALS(&context->radeon);
528 int i;
529
530 for (i = 0; i < R700_MAX_UCP; i++) {
531 if (r700->ucp[i].enabled) {
532 BEGIN_BATCH_NO_AUTOSTATE(6);
533 R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
534 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
535 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Y.u32All);
536 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Z.u32All);
537 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
538 END_BATCH();
539 COMMIT_BATCH();
540 }
541 }
542 }
543
544 static void r700SendSPIState(GLcontext *ctx, struct radeon_state_atom *atom)
545 {
546 context_t *context = R700_CONTEXT(ctx);
547 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
548 BATCH_LOCALS(&context->radeon);
549 unsigned int ui;
550
551 BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);
552
553 R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
554 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
555 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
556 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
557 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
558 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
559 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
560 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
561 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
562 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
563 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
564 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
565 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
566 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
567 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
568 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
569 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
570 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
571 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
572 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
573 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
574 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
575 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
576 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
577 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
578 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
579 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
580 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
581 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
582 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
583 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
584 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
585 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);
586
587 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
588 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
589 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
590 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
591 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
592 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
593 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
594 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
595 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
596 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
597 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);
598
599 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
600 R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
601 R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
602 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
603 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
604 R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
605 R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
606 R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
607 R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
608 R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);
609
610 R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
611 for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
612 R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
613
614 END_BATCH();
615 COMMIT_BATCH();
616 }
617
618 static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom)
619 {
620 context_t *context = R700_CONTEXT(ctx);
621 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
622 BATCH_LOCALS(&context->radeon);
623
624 BEGIN_BATCH_NO_AUTOSTATE(41);
625
626 R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
627 R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
628 R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
629 R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
630 R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);
631
632 R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
633 R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
634 R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
635 R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
636 R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
637 R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
638 R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
639 R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
640 R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
641 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
642 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
643 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
644 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
645 R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);
646
647 R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
648 R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
649 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
650 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);
651
652 R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
653 R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
654 R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
655 R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);
656
657 R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);
658
659 END_BATCH();
660 COMMIT_BATCH();
661 }
662
663 static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom)
664 {
665 context_t *context = R700_CONTEXT(ctx);
666 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
667 BATCH_LOCALS(&context->radeon);
668
669 BEGIN_BATCH_NO_AUTOSTATE(9);
670 R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
671 R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
672 R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
673 END_BATCH();
674 COMMIT_BATCH();
675 }
676
677 static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
678 {
679 context_t *context = R700_CONTEXT(ctx);
680 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
681 BATCH_LOCALS(&context->radeon);
682
683 BEGIN_BATCH_NO_AUTOSTATE(23);
684 R600_OUT_BATCH_REGVAL(DB_HTILE_DATA_BASE, r700->DB_HTILE_DATA_BASE.u32All);
685
686 R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
687 R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
688 R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);
689
690 R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
691 R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);
692
693 R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
694 R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
695 R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);
696
697 R600_OUT_BATCH_REGVAL(DB_HTILE_SURFACE, r700->DB_HTILE_SURFACE.u32All);
698 R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);
699
700 END_BATCH();
701 COMMIT_BATCH();
702 }
703
704 static void r700SendStencilState(GLcontext *ctx, struct radeon_state_atom *atom)
705 {
706 context_t *context = R700_CONTEXT(ctx);
707 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
708 BATCH_LOCALS(&context->radeon);
709
710 BEGIN_BATCH_NO_AUTOSTATE(4);
711 R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
712 R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
713 R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
714 END_BATCH();
715 COMMIT_BATCH();
716 }
717
718 static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
719 {
720 context_t *context = R700_CONTEXT(ctx);
721 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
722 BATCH_LOCALS(&context->radeon);
723
724 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
725 BEGIN_BATCH_NO_AUTOSTATE(11);
726 R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
727 R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
728 R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
729 R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
730 R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
731 R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
732 R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
733 R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
734 R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
735 END_BATCH();
736 }
737
738 BEGIN_BATCH_NO_AUTOSTATE(7);
739 R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
740 R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
741 R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
742 R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
743 END_BATCH();
744 COMMIT_BATCH();
745 }
746
747 static void r700SendCBCLRCMPState(GLcontext *ctx, struct radeon_state_atom *atom)
748 {
749 context_t *context = R700_CONTEXT(ctx);
750 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
751 BATCH_LOCALS(&context->radeon);
752
753 BEGIN_BATCH_NO_AUTOSTATE(6);
754 R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
755 R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
756 R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
757 R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
758 R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
759 END_BATCH();
760 COMMIT_BATCH();
761 }
762
763 static void r700SendCBBlendState(GLcontext *ctx, struct radeon_state_atom *atom)
764 {
765 context_t *context = R700_CONTEXT(ctx);
766 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
767 BATCH_LOCALS(&context->radeon);
768 unsigned int ui;
769
770 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
771 BEGIN_BATCH_NO_AUTOSTATE(3);
772 R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
773 END_BATCH();
774 }
775
776 BEGIN_BATCH_NO_AUTOSTATE(3);
777 R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
778 END_BATCH();
779
780 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
781 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
782 if (r700->render_target[ui].enabled) {
783 BEGIN_BATCH_NO_AUTOSTATE(3);
784 R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
785 r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
786 END_BATCH();
787 }
788 }
789 }
790
791 COMMIT_BATCH();
792 }
793
794 static void r700SendCBBlendColorState(GLcontext *ctx, struct radeon_state_atom *atom)
795 {
796 context_t *context = R700_CONTEXT(ctx);
797 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
798 BATCH_LOCALS(&context->radeon);
799
800 BEGIN_BATCH_NO_AUTOSTATE(6);
801 R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
802 R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
803 R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
804 R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
805 R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
806 END_BATCH();
807 COMMIT_BATCH();
808 }
809
810 static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom)
811 {
812 context_t *context = R700_CONTEXT(ctx);
813 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
814 BATCH_LOCALS(&context->radeon);
815
816 BEGIN_BATCH_NO_AUTOSTATE(9);
817 R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
818 R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
819 R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
820 R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
821 R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
822 R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
823 END_BATCH();
824 COMMIT_BATCH();
825
826 }
827
828 static void r700SendPolyState(GLcontext *ctx, struct radeon_state_atom *atom)
829 {
830 context_t *context = R700_CONTEXT(ctx);
831 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
832 BATCH_LOCALS(&context->radeon);
833
834 BEGIN_BATCH_NO_AUTOSTATE(10);
835 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
836 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
837 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
838 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
839 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
840 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
841 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
842 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
843 END_BATCH();
844 COMMIT_BATCH();
845
846 }
847
848 static void r700SendCLState(GLcontext *ctx, struct radeon_state_atom *atom)
849 {
850 context_t *context = R700_CONTEXT(ctx);
851 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
852 BATCH_LOCALS(&context->radeon);
853
854 BEGIN_BATCH_NO_AUTOSTATE(12);
855 R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All);
856 R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All);
857 R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All);
858 R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All);
859 END_BATCH();
860 COMMIT_BATCH();
861 }
862
863 static void r700SendGBState(GLcontext *ctx, struct radeon_state_atom *atom)
864 {
865 context_t *context = R700_CONTEXT(ctx);
866 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
867 BATCH_LOCALS(&context->radeon);
868
869 BEGIN_BATCH_NO_AUTOSTATE(6);
870 R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
871 R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
872 R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
873 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
874 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
875 END_BATCH();
876 COMMIT_BATCH();
877 }
878
879 static void r700SendScissorState(GLcontext *ctx, struct radeon_state_atom *atom)
880 {
881 context_t *context = R700_CONTEXT(ctx);
882 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
883 BATCH_LOCALS(&context->radeon);
884
885 BEGIN_BATCH_NO_AUTOSTATE(22);
886 R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
887 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
888 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);
889
890 R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12);
891 R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
892 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
893 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
894 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
895 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
896 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
897 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
898 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
899 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
900 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
901 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
902 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);
903
904 R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
905 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
906 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
907 END_BATCH();
908 COMMIT_BATCH();
909 }
910
911 static void r700SendSCState(GLcontext *ctx, struct radeon_state_atom *atom)
912 {
913 context_t *context = R700_CONTEXT(ctx);
914 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
915 BATCH_LOCALS(&context->radeon);
916
917 BEGIN_BATCH_NO_AUTOSTATE(15);
918 R600_OUT_BATCH_REGVAL(R7xx_PA_SC_EDGERULE, r700->PA_SC_EDGERULE.u32All);
919 R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All);
920 R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All);
921 R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All);
922 R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All);
923 END_BATCH();
924 COMMIT_BATCH();
925 }
926
927 static void r700SendAAState(GLcontext *ctx, struct radeon_state_atom *atom)
928 {
929 context_t *context = R700_CONTEXT(ctx);
930 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
931 BATCH_LOCALS(&context->radeon);
932
933 BEGIN_BATCH_NO_AUTOSTATE(12);
934 R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
935 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
936 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
937 R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
938 END_BATCH();
939 COMMIT_BATCH();
940 }
941
942 static void r700SendPSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
943 {
944 context_t *context = R700_CONTEXT(ctx);
945 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
946 int i;
947 BATCH_LOCALS(&context->radeon);
948
949 if (r700->ps.num_consts == 0)
950 return;
951
952 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->ps.num_consts * 4));
953 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->ps.num_consts * 4)));
954 /* assembler map const from very beginning. */
955 R600_OUT_BATCH(SQ_ALU_CONSTANT_PS_OFFSET * 4);
956 for (i = 0; i < r700->ps.num_consts; i++) {
957 R600_OUT_BATCH(r700->ps.consts[i][0].u32All);
958 R600_OUT_BATCH(r700->ps.consts[i][1].u32All);
959 R600_OUT_BATCH(r700->ps.consts[i][2].u32All);
960 R600_OUT_BATCH(r700->ps.consts[i][3].u32All);
961 }
962 END_BATCH();
963 COMMIT_BATCH();
964 }
965
966 static void r700SendVSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
967 {
968 context_t *context = R700_CONTEXT(ctx);
969 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
970 int i;
971 BATCH_LOCALS(&context->radeon);
972
973 if (r700->vs.num_consts == 0)
974 return;
975
976 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->vs.num_consts * 4));
977 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->vs.num_consts * 4)));
978 /* assembler map const from very beginning. */
979 R600_OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
980 for (i = 0; i < r700->vs.num_consts; i++) {
981 R600_OUT_BATCH(r700->vs.consts[i][0].u32All);
982 R600_OUT_BATCH(r700->vs.consts[i][1].u32All);
983 R600_OUT_BATCH(r700->vs.consts[i][2].u32All);
984 R600_OUT_BATCH(r700->vs.consts[i][3].u32All);
985 }
986 END_BATCH();
987 COMMIT_BATCH();
988 }
989
990 static int check_always(GLcontext *ctx, struct radeon_state_atom *atom)
991 {
992 return atom->cmd_size;
993 }
994
995 static int check_cb(GLcontext *ctx, struct radeon_state_atom *atom)
996 {
997 context_t *context = R700_CONTEXT(ctx);
998 int count = 7;
999
1000 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1001 count += 11;
1002
1003 return count;
1004 }
1005
1006 static int check_blnd(GLcontext *ctx, struct radeon_state_atom *atom)
1007 {
1008 context_t *context = R700_CONTEXT(ctx);
1009 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1010 unsigned int ui;
1011 int count = 3;
1012
1013 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1014 count += 3;
1015
1016 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
1017 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
1018 if (r700->render_target[ui].enabled)
1019 count += 3;
1020 }
1021 }
1022
1023 return count;
1024 }
1025
1026 static int check_ucp(GLcontext *ctx, struct radeon_state_atom *atom)
1027 {
1028 context_t *context = R700_CONTEXT(ctx);
1029 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1030 int i;
1031 int count = 0;
1032
1033 for (i = 0; i < R700_MAX_UCP; i++) {
1034 if (r700->ucp[i].enabled)
1035 count += 6;
1036 }
1037 return count;
1038 }
1039
1040 static int check_vtx(GLcontext *ctx, struct radeon_state_atom *atom)
1041 {
1042 context_t *context = R700_CONTEXT(ctx);
1043 int count = context->radeon.tcl.aos_count * 18;
1044
1045 if (count)
1046 count += 6;
1047
1048 return count;
1049 }
1050
1051 static int check_tx(GLcontext *ctx, struct radeon_state_atom *atom)
1052 {
1053 context_t *context = R700_CONTEXT(ctx);
1054 unsigned int i, count = 0;
1055 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1056
1057 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
1058 radeonTexObj *t = r700->textures[i];
1059 if (t)
1060 count++;
1061 }
1062 return count * 31;
1063 }
1064
1065 static int check_ps_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1066 {
1067 context_t *context = R700_CONTEXT(ctx);
1068 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1069 int count = r700->ps.num_consts * 4;
1070
1071 if (count)
1072 count += 2;
1073
1074 return count;
1075 }
1076
1077 static int check_vs_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1078 {
1079 context_t *context = R700_CONTEXT(ctx);
1080 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1081 int count = r700->vs.num_consts * 4;
1082
1083 if (count)
1084 count += 2;
1085
1086 return count;
1087 }
1088
1089 #define ALLOC_STATE( ATOM, CHK, SZ, EMIT ) \
1090 do { \
1091 context->atoms.ATOM.cmd_size = (SZ); \
1092 context->atoms.ATOM.cmd = NULL; \
1093 context->atoms.ATOM.name = #ATOM; \
1094 context->atoms.ATOM.idx = 0; \
1095 context->atoms.ATOM.check = check_##CHK; \
1096 context->atoms.ATOM.dirty = GL_FALSE; \
1097 context->atoms.ATOM.emit = (EMIT); \
1098 context->radeon.hw.max_state_size += (SZ); \
1099 insert_at_tail(&context->radeon.hw.atomlist, &context->atoms.ATOM); \
1100 } while (0)
1101
1102 void r600InitAtoms(context_t *context)
1103 {
1104 context->radeon.hw.max_state_size = 10 + 5 + 14; /* start 3d, idle, cb/db flush */
1105
1106 /* Setup the atom linked list */
1107 make_empty_list(&context->radeon.hw.atomlist);
1108 context->radeon.hw.atomlist.name = "atom-list";
1109
1110 ALLOC_STATE(sq, always, 34, r700SendSQConfig);
1111 ALLOC_STATE(db, always, 23, r700SendDBState);
1112 ALLOC_STATE(stencil, always, 4, r700SendStencilState);
1113 ALLOC_STATE(db_target, always, 12, r700SendDepthTargetState);
1114 ALLOC_STATE(sc, always, 15, r700SendSCState);
1115 ALLOC_STATE(scissor, always, 22, r700SendScissorState);
1116 ALLOC_STATE(aa, always, 12, r700SendAAState);
1117 ALLOC_STATE(cl, always, 12, r700SendCLState);
1118 ALLOC_STATE(gb, always, 6, r700SendGBState);
1119 ALLOC_STATE(ucp, ucp, (R700_MAX_UCP * 6), r700SendUCPState);
1120 ALLOC_STATE(su, always, 9, r700SendSUState);
1121 ALLOC_STATE(poly, always, 10, r700SendPolyState);
1122 ALLOC_STATE(cb, cb, 18, r700SendCBState);
1123 ALLOC_STATE(clrcmp, always, 6, r700SendCBCLRCMPState);
1124 ALLOC_STATE(blnd, blnd, (6 + (R700_MAX_RENDER_TARGETS * 3)), r700SendCBBlendState);
1125 ALLOC_STATE(blnd_clr, always, 6, r700SendCBBlendColorState);
1126 ALLOC_STATE(cb_target, always, 25, r700SendRenderTargetState);
1127 ALLOC_STATE(sx, always, 9, r700SendSXState);
1128 ALLOC_STATE(vgt, always, 41, r700SendVGTState);
1129 ALLOC_STATE(spi, always, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState);
1130 ALLOC_STATE(vpt, always, 16, r700SendViewportState);
1131 ALLOC_STATE(fs, always, 18, r700SendFSState);
1132 ALLOC_STATE(vs, always, 18, r700SendVSState);
1133 ALLOC_STATE(ps, always, 21, r700SendPSState);
1134 ALLOC_STATE(vs_consts, vs_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendVSConsts);
1135 ALLOC_STATE(ps_consts, ps_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendPSConsts);
1136 ALLOC_STATE(vtx, vtx, (6 + (VERT_ATTRIB_MAX * 18)), r700SendVTXState);
1137 ALLOC_STATE(tx, tx, (R700_TEXTURE_NUMBERUNITS * 20), r700SendTexState);
1138 ALLOC_STATE(tx_smplr, tx, (R700_TEXTURE_NUMBERUNITS * 5), r700SendTexSamplerState);
1139 ALLOC_STATE(tx_brdr_clr, tx, (R700_TEXTURE_NUMBERUNITS * 6), r700SendTexBorderColorState);
1140
1141 context->radeon.hw.is_dirty = GL_TRUE;
1142 context->radeon.hw.all_dirty = GL_TRUE;
1143 }