Merge branch 'mesa_7_7_branch'
[mesa.git] / src / mesa / drivers / dri / r600 / r700_chip.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/imports.h"
29 #include "main/glheader.h"
30 #include "main/simple_list.h"
31
32 #include "r600_context.h"
33 #include "r600_cmdbuf.h"
34
35 #include "r700_state.h"
36 #include "r600_tex.h"
37 #include "r700_oglprog.h"
38 #include "r700_fragprog.h"
39 #include "r700_vertprog.h"
40 #include "r700_ioctl.h"
41
42 #include "radeon_mipmap_tree.h"
43
44 static void r700SendTexState(GLcontext *ctx, struct radeon_state_atom *atom)
45 {
46 context_t *context = R700_CONTEXT(ctx);
47 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
48
49 struct r700_vertex_program *vp = context->selected_vp;
50
51 struct radeon_bo *bo = NULL;
52 unsigned int i;
53 BATCH_LOCALS(&context->radeon);
54
55 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
56
57 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
58 if (ctx->Texture.Unit[i]._ReallyEnabled) {
59 radeonTexObj *t = r700->textures[i];
60 if (t) {
61 if (!t->image_override) {
62 bo = t->mt->bo;
63 } else {
64 bo = t->bo;
65 }
66 if (bo) {
67
68 r700SyncSurf(context, bo,
69 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
70 0, TC_ACTION_ENA_bit);
71
72 BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
73 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
74
75 if( (1<<i) & vp->r700AsmCode.unVetTexBits )
76 { /* vs texture */
77 R600_OUT_BATCH((i + VERT_ATTRIB_MAX + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
78 }
79 else
80 {
81 R600_OUT_BATCH(i * 7);
82 }
83
84 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
85 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
86 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE2);
87 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
88 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
89 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
90 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
91 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
92 bo,
93 r700->textures[i]->SQ_TEX_RESOURCE2,
94 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
95 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE3,
96 bo,
97 r700->textures[i]->SQ_TEX_RESOURCE3,
98 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
99 END_BATCH();
100 COMMIT_BATCH();
101 }
102 }
103 }
104 }
105 }
106
107 #define SAMPLER_STRIDE 3
108
109 static void r700SendTexSamplerState(GLcontext *ctx, struct radeon_state_atom *atom)
110 {
111 context_t *context = R700_CONTEXT(ctx);
112 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
113 unsigned int i;
114
115 struct r700_vertex_program *vp = context->selected_vp;
116
117 BATCH_LOCALS(&context->radeon);
118 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
119
120 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
121 if (ctx->Texture.Unit[i]._ReallyEnabled) {
122 radeonTexObj *t = r700->textures[i];
123 if (t) {
124 BEGIN_BATCH_NO_AUTOSTATE(5);
125 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, 3));
126
127 if( (1<<i) & vp->r700AsmCode.unVetTexBits )
128 { /* vs texture */
129 R600_OUT_BATCH((i+SQ_TEX_SAMPLER_VS_OFFSET) * SAMPLER_STRIDE); //work 1
130 }
131 else
132 {
133 R600_OUT_BATCH(i * 3);
134 }
135
136 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER0);
137 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER1);
138 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER2);
139 END_BATCH();
140 COMMIT_BATCH();
141 }
142 }
143 }
144 }
145
146 static void r700SendTexBorderColorState(GLcontext *ctx, struct radeon_state_atom *atom)
147 {
148 context_t *context = R700_CONTEXT(ctx);
149 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
150 unsigned int i;
151 BATCH_LOCALS(&context->radeon);
152 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
153
154 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
155 if (ctx->Texture.Unit[i]._ReallyEnabled) {
156 radeonTexObj *t = r700->textures[i];
157 if (t) {
158 BEGIN_BATCH_NO_AUTOSTATE(2 + 4);
159 R600_OUT_BATCH_REGSEQ((TD_PS_SAMPLER0_BORDER_RED + (i * 16)), 4);
160 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_RED);
161 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_GREEN);
162 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_BLUE);
163 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_ALPHA);
164 END_BATCH();
165 COMMIT_BATCH();
166 }
167 }
168 }
169 }
170
171 extern int getTypeSize(GLenum type);
172 static void r700SetupVTXConstants(GLcontext * ctx,
173 void * pAos,
174 StreamDesc * pStreamDesc)
175 {
176 context_t *context = R700_CONTEXT(ctx);
177 struct radeon_aos * paos = (struct radeon_aos *)pAos;
178 unsigned int nVBsize;
179 BATCH_LOCALS(&context->radeon);
180
181 unsigned int uSQ_VTX_CONSTANT_WORD0_0;
182 unsigned int uSQ_VTX_CONSTANT_WORD1_0;
183 unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
184 unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
185 unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;
186
187 if (!paos->bo)
188 return;
189
190 if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
191 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
192 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
193 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS880) ||
194 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
195 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
196 else
197 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);
198
199 if(0 == pStreamDesc->stride)
200 {
201 nVBsize = paos->count * pStreamDesc->size * getTypeSize(pStreamDesc->type);
202 }
203 else
204 {
205 nVBsize = paos->count * pStreamDesc->stride;
206 }
207
208 uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
209 uSQ_VTX_CONSTANT_WORD1_0 = nVBsize - 1;
210
211 SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
212 SETfield(uSQ_VTX_CONSTANT_WORD2_0, pStreamDesc->stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
213 SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
214 SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(pStreamDesc->type, pStreamDesc->size, NULL),
215 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
216 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
217
218 if(GL_TRUE == pStreamDesc->normalize)
219 {
220 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_NORM,
221 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
222 }
223 //else
224 //{
225 // SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_INT,
226 // SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
227 //}
228
229 if(1 == pStreamDesc->_signed)
230 {
231 SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
232 }
233
234 SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
235 SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
236 SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
237
238 BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
239
240 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
241 R600_OUT_BATCH((pStreamDesc->element + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
242 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
243 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
244 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
245 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
246 R600_OUT_BATCH(0);
247 R600_OUT_BATCH(0);
248 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
249 R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
250 paos->bo,
251 uSQ_VTX_CONSTANT_WORD0_0,
252 RADEON_GEM_DOMAIN_GTT, 0, 0);
253 END_BATCH();
254 COMMIT_BATCH();
255
256 }
257
258 static void r700SendVTXState(GLcontext *ctx, struct radeon_state_atom *atom)
259 {
260 context_t *context = R700_CONTEXT(ctx);
261 struct r700_vertex_program *vp = context->selected_vp;
262 unsigned int i, j = 0;
263 BATCH_LOCALS(&context->radeon);
264 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
265
266 if (context->radeon.tcl.aos_count == 0)
267 return;
268
269 BEGIN_BATCH_NO_AUTOSTATE(6);
270 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
271 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
272 R600_OUT_BATCH(0);
273
274 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
275 R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
276 R600_OUT_BATCH(0);
277 END_BATCH();
278 COMMIT_BATCH();
279
280 for(i=0; i<VERT_ATTRIB_MAX; i++) {
281 if(vp->mesa_program->Base.InputsRead & (1 << i))
282 {
283 r700SetupVTXConstants(ctx,
284 (void*)(&context->radeon.tcl.aos[j]),
285 &(context->stream_desc[j]));
286 j++;
287 }
288 }
289 }
290
291 static void r700SetRenderTarget(context_t *context, int id)
292 {
293 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
294
295 struct radeon_renderbuffer *rrb;
296 unsigned int nPitchInPixel;
297
298 rrb = radeon_get_colorbuffer(&context->radeon);
299 if (!rrb || !rrb->bo) {
300 return;
301 }
302
303 R600_STATECHANGE(context, cb_target);
304
305 /* color buffer */
306 r700->render_target[id].CB_COLOR0_BASE.u32All = context->radeon.state.color.draw_offset;
307
308 nPitchInPixel = rrb->pitch/rrb->cpp;
309 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, (nPitchInPixel/8)-1,
310 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
311 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
312 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask);
313 r700->render_target[id].CB_COLOR0_BASE.u32All = 0;
314 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ENDIAN_NONE, ENDIAN_shift, ENDIAN_mask);
315 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_LINEAR_GENERAL,
316 CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
317 if(4 == rrb->cpp)
318 {
319 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_8_8_8_8,
320 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
321 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT, COMP_SWAP_shift, COMP_SWAP_mask);
322 }
323 else
324 {
325 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_5_6_5,
326 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
327 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT_REV,
328 COMP_SWAP_shift, COMP_SWAP_mask);
329 }
330 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
331 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_CLAMP_bit);
332 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, NUMBER_UNORM, NUMBER_TYPE_shift, NUMBER_TYPE_mask);
333
334 r700->render_target[id].enabled = GL_TRUE;
335 }
336
337 static void r700SetDepthTarget(context_t *context)
338 {
339 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
340
341 struct radeon_renderbuffer *rrb;
342 unsigned int nPitchInPixel;
343
344 rrb = radeon_get_depthbuffer(&context->radeon);
345 if (!rrb)
346 return;
347
348 R600_STATECHANGE(context, db_target);
349
350 /* depth buf */
351 r700->DB_DEPTH_SIZE.u32All = 0;
352 r700->DB_DEPTH_BASE.u32All = 0;
353 r700->DB_DEPTH_INFO.u32All = 0;
354 r700->DB_DEPTH_VIEW.u32All = 0;
355
356 nPitchInPixel = rrb->pitch/rrb->cpp;
357
358 SETfield(r700->DB_DEPTH_SIZE.u32All, (nPitchInPixel/8)-1,
359 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
360 SETfield(r700->DB_DEPTH_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
361 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask); /* size in pixel / 64 - 1 */
362
363 if(4 == rrb->cpp)
364 {
365 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_8_24,
366 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
367 }
368 else
369 {
370 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_16,
371 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
372 }
373 SETfield(r700->DB_DEPTH_INFO.u32All, ARRAY_1D_TILED_THIN1,
374 DB_DEPTH_INFO__ARRAY_MODE_shift, DB_DEPTH_INFO__ARRAY_MODE_mask);
375 /* r700->DB_PREFETCH_LIMIT.bits.DEPTH_HEIGHT_TILE_MAX = (context->currentDraw->h >> 3) - 1; */ /* z buffer sie may much bigger than what need, so use actual used h. */
376 }
377
378 static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
379 {
380 context_t *context = R700_CONTEXT(ctx);
381 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
382 struct radeon_renderbuffer *rrb;
383 BATCH_LOCALS(&context->radeon);
384 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
385
386 rrb = radeon_get_depthbuffer(&context->radeon);
387 if (!rrb || !rrb->bo) {
388 return;
389 }
390
391 r700SetDepthTarget(context);
392
393 BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
394 R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
395 R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
396 R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
397 R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
398 R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
399 R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
400 R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
401 rrb->bo,
402 r700->DB_DEPTH_BASE.u32All,
403 0, RADEON_GEM_DOMAIN_VRAM, 0);
404 END_BATCH();
405
406 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
407 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
408 BEGIN_BATCH_NO_AUTOSTATE(2);
409 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
410 R600_OUT_BATCH(1 << 0);
411 END_BATCH();
412 }
413
414 COMMIT_BATCH();
415
416 }
417
418 static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
419 {
420 context_t *context = R700_CONTEXT(ctx);
421 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
422 struct radeon_renderbuffer *rrb;
423 BATCH_LOCALS(&context->radeon);
424 int id = 0;
425 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
426
427 rrb = radeon_get_colorbuffer(&context->radeon);
428 if (!rrb || !rrb->bo) {
429 return;
430 }
431
432 r700SetRenderTarget(context, 0);
433
434 if (id > R700_MAX_RENDER_TARGETS)
435 return;
436
437 if (!r700->render_target[id].enabled)
438 return;
439
440 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
441 R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
442 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
443 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
444 rrb->bo,
445 r700->render_target[id].CB_COLOR0_BASE.u32All,
446 0, RADEON_GEM_DOMAIN_VRAM, 0);
447 END_BATCH();
448
449 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
450 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
451 BEGIN_BATCH_NO_AUTOSTATE(2);
452 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
453 R600_OUT_BATCH((2 << id));
454 END_BATCH();
455 }
456
457 BEGIN_BATCH_NO_AUTOSTATE(18);
458 R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
459 R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
460 R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
461 R600_OUT_BATCH_REGVAL(CB_COLOR0_TILE + (4 * id), r700->render_target[id].CB_COLOR0_TILE.u32All);
462 R600_OUT_BATCH_REGVAL(CB_COLOR0_FRAG + (4 * id), r700->render_target[id].CB_COLOR0_FRAG.u32All);
463 R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
464 END_BATCH();
465
466 COMMIT_BATCH();
467
468 }
469
470 static void r700SendPSState(GLcontext *ctx, struct radeon_state_atom *atom)
471 {
472 context_t *context = R700_CONTEXT(ctx);
473 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
474 struct radeon_bo * pbo;
475 BATCH_LOCALS(&context->radeon);
476 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
477
478 pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));
479
480 if (!pbo)
481 return;
482
483 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
484
485 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
486 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
487 R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
488 R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
489 pbo,
490 r700->ps.SQ_PGM_START_PS.u32All,
491 RADEON_GEM_DOMAIN_GTT, 0, 0);
492 END_BATCH();
493
494 BEGIN_BATCH_NO_AUTOSTATE(9);
495 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
496 R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
497 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
498 END_BATCH();
499
500 BEGIN_BATCH_NO_AUTOSTATE(3);
501 R600_OUT_BATCH_REGVAL(SQ_LOOP_CONST_0, 0x01000FFF);
502 END_BATCH();
503
504 COMMIT_BATCH();
505
506 }
507
508 static void r700SendVSState(GLcontext *ctx, struct radeon_state_atom *atom)
509 {
510 context_t *context = R700_CONTEXT(ctx);
511 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
512 struct radeon_bo * pbo;
513 BATCH_LOCALS(&context->radeon);
514 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
515
516 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
517
518 if (!pbo)
519 return;
520
521 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
522
523 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
524 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_VS, 1);
525 R600_OUT_BATCH(r700->vs.SQ_PGM_START_VS.u32All);
526 R600_OUT_BATCH_RELOC(r700->vs.SQ_PGM_START_VS.u32All,
527 pbo,
528 r700->vs.SQ_PGM_START_VS.u32All,
529 RADEON_GEM_DOMAIN_GTT, 0, 0);
530 END_BATCH();
531
532 BEGIN_BATCH_NO_AUTOSTATE(6);
533 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_VS, r700->vs.SQ_PGM_RESOURCES_VS.u32All);
534 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_VS, r700->vs.SQ_PGM_CF_OFFSET_VS.u32All);
535 END_BATCH();
536
537 BEGIN_BATCH_NO_AUTOSTATE(3);
538 R600_OUT_BATCH_REGVAL((SQ_LOOP_CONST_0 + 32*4), 0x0100000F);
539 //R600_OUT_BATCH_REGVAL((SQ_LOOP_CONST_0 + (SQ_LOOP_CONST_vs<2)), 0x0100000F);
540 END_BATCH();
541
542 COMMIT_BATCH();
543 }
544
545 static void r700SendFSState(GLcontext *ctx, struct radeon_state_atom *atom)
546 {
547 context_t *context = R700_CONTEXT(ctx);
548 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
549 struct radeon_bo * pbo;
550 BATCH_LOCALS(&context->radeon);
551 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
552
553 /* XXX fixme
554 * R6xx chips require a FS be emitted, even if it's not used.
555 * since we aren't using FS yet, just send the VS address to make
556 * the kernel command checker happy
557 */
558 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
559 r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
560 r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
561 r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
562 /* XXX */
563
564 if (!pbo)
565 return;
566
567 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
568
569 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
570 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
571 R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
572 R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
573 pbo,
574 r700->fs.SQ_PGM_START_FS.u32All,
575 RADEON_GEM_DOMAIN_GTT, 0, 0);
576 END_BATCH();
577
578 BEGIN_BATCH_NO_AUTOSTATE(6);
579 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
580 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
581 END_BATCH();
582
583 COMMIT_BATCH();
584
585 }
586
587 static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
588 {
589 context_t *context = R700_CONTEXT(ctx);
590 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
591 BATCH_LOCALS(&context->radeon);
592 int id = 0;
593 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
594
595 if (id > R700_MAX_VIEWPORTS)
596 return;
597
598 if (!r700->viewport[id].enabled)
599 return;
600
601 BEGIN_BATCH_NO_AUTOSTATE(16);
602 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
603 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
604 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
605 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
606 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
607 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
608 R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
609 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
610 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
611 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
612 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
613 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
614 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
615 END_BATCH();
616
617 COMMIT_BATCH();
618
619 }
620
621 static void r700SendSQConfig(GLcontext *ctx, struct radeon_state_atom *atom)
622 {
623 context_t *context = R700_CONTEXT(ctx);
624 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
625 BATCH_LOCALS(&context->radeon);
626 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
627
628 BEGIN_BATCH_NO_AUTOSTATE(34);
629 R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
630 R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
631 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
632 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_2.u32All);
633 R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
634 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
635 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);
636
637 R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
638 R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
639 R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
640 R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
641 R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);
642
643 R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
644 R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
645 R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
646 R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
647 R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
648 R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
649 R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
650 R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
651 R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
652 R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
653 END_BATCH();
654
655 COMMIT_BATCH();
656 }
657
658 static void r700SendUCPState(GLcontext *ctx, struct radeon_state_atom *atom)
659 {
660 context_t *context = R700_CONTEXT(ctx);
661 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
662 BATCH_LOCALS(&context->radeon);
663 int i;
664 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
665
666 for (i = 0; i < R700_MAX_UCP; i++) {
667 if (r700->ucp[i].enabled) {
668 BEGIN_BATCH_NO_AUTOSTATE(6);
669 R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
670 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
671 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Y.u32All);
672 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Z.u32All);
673 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
674 END_BATCH();
675 COMMIT_BATCH();
676 }
677 }
678 }
679
680 static void r700SendSPIState(GLcontext *ctx, struct radeon_state_atom *atom)
681 {
682 context_t *context = R700_CONTEXT(ctx);
683 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
684 BATCH_LOCALS(&context->radeon);
685 unsigned int ui;
686 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
687
688 BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);
689
690 R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
691 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
692 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
693 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
694 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
695 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
696 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
697 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
698 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
699 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
700 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
701 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
702 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
703 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
704 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
705 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
706 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
707 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
708 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
709 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
710 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
711 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
712 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
713 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
714 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
715 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
716 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
717 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
718 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
719 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
720 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
721 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
722 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);
723
724 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
725 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
726 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
727 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
728 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
729 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
730 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
731 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
732 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
733 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
734 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);
735
736 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
737 R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
738 R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
739 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
740 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
741 R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
742 R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
743 R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
744 R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
745 R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);
746
747 R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
748 for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
749 R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
750
751 END_BATCH();
752 COMMIT_BATCH();
753 }
754
755 static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom)
756 {
757 context_t *context = R700_CONTEXT(ctx);
758 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
759 BATCH_LOCALS(&context->radeon);
760 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
761
762 BEGIN_BATCH_NO_AUTOSTATE(41);
763
764 R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
765 R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
766 R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
767 R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
768 R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);
769
770 R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
771 R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
772 R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
773 R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
774 R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
775 R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
776 R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
777 R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
778 R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
779 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
780 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
781 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
782 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
783 R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);
784
785 R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
786 R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
787 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
788 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);
789
790 R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
791 R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
792 R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
793 R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);
794
795 R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);
796
797 END_BATCH();
798 COMMIT_BATCH();
799 }
800
801 static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom)
802 {
803 context_t *context = R700_CONTEXT(ctx);
804 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
805 BATCH_LOCALS(&context->radeon);
806 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
807
808 BEGIN_BATCH_NO_AUTOSTATE(9);
809 R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
810 R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
811 R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
812 END_BATCH();
813 COMMIT_BATCH();
814 }
815
816 static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
817 {
818 context_t *context = R700_CONTEXT(ctx);
819 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
820 BATCH_LOCALS(&context->radeon);
821 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
822
823 BEGIN_BATCH_NO_AUTOSTATE(17);
824
825 R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
826 R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
827 R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);
828
829 R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
830 R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);
831
832 R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
833 R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
834 R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);
835
836 R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);
837
838 END_BATCH();
839 COMMIT_BATCH();
840 }
841
842 static void r700SendStencilState(GLcontext *ctx, struct radeon_state_atom *atom)
843 {
844 context_t *context = R700_CONTEXT(ctx);
845 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
846 BATCH_LOCALS(&context->radeon);
847
848 BEGIN_BATCH_NO_AUTOSTATE(4);
849 R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
850 R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
851 R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
852 END_BATCH();
853 COMMIT_BATCH();
854 }
855
856 static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
857 {
858 context_t *context = R700_CONTEXT(ctx);
859 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
860 BATCH_LOCALS(&context->radeon);
861 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
862
863 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
864 BEGIN_BATCH_NO_AUTOSTATE(11);
865 R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
866 R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
867 R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
868 R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
869 R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
870 R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
871 R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
872 R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
873 R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
874 END_BATCH();
875 }
876
877 BEGIN_BATCH_NO_AUTOSTATE(7);
878 R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
879 R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
880 R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
881 R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
882 END_BATCH();
883 COMMIT_BATCH();
884 }
885
886 static void r700SendCBCLRCMPState(GLcontext *ctx, struct radeon_state_atom *atom)
887 {
888 context_t *context = R700_CONTEXT(ctx);
889 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
890 BATCH_LOCALS(&context->radeon);
891
892 BEGIN_BATCH_NO_AUTOSTATE(6);
893 R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
894 R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
895 R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
896 R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
897 R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
898 END_BATCH();
899 COMMIT_BATCH();
900 }
901
902 static void r700SendCBBlendState(GLcontext *ctx, struct radeon_state_atom *atom)
903 {
904 context_t *context = R700_CONTEXT(ctx);
905 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
906 BATCH_LOCALS(&context->radeon);
907 unsigned int ui;
908 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
909
910 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
911 BEGIN_BATCH_NO_AUTOSTATE(3);
912 R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
913 END_BATCH();
914 }
915
916 BEGIN_BATCH_NO_AUTOSTATE(3);
917 R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
918 END_BATCH();
919
920 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
921 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
922 if (r700->render_target[ui].enabled) {
923 BEGIN_BATCH_NO_AUTOSTATE(3);
924 R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
925 r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
926 END_BATCH();
927 }
928 }
929 }
930
931 COMMIT_BATCH();
932 }
933
934 static void r700SendCBBlendColorState(GLcontext *ctx, struct radeon_state_atom *atom)
935 {
936 context_t *context = R700_CONTEXT(ctx);
937 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
938 BATCH_LOCALS(&context->radeon);
939 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
940
941 BEGIN_BATCH_NO_AUTOSTATE(6);
942 R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
943 R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
944 R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
945 R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
946 R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
947 END_BATCH();
948 COMMIT_BATCH();
949 }
950
951 static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom)
952 {
953 context_t *context = R700_CONTEXT(ctx);
954 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
955 BATCH_LOCALS(&context->radeon);
956
957 BEGIN_BATCH_NO_AUTOSTATE(9);
958 R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
959 R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
960 R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
961 R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
962 R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
963 R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
964 END_BATCH();
965 COMMIT_BATCH();
966
967 }
968
969 static void r700SendPolyState(GLcontext *ctx, struct radeon_state_atom *atom)
970 {
971 context_t *context = R700_CONTEXT(ctx);
972 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
973 BATCH_LOCALS(&context->radeon);
974
975 BEGIN_BATCH_NO_AUTOSTATE(10);
976 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
977 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
978 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
979 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
980 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
981 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
982 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
983 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
984 END_BATCH();
985 COMMIT_BATCH();
986
987 }
988
989 static void r700SendCLState(GLcontext *ctx, struct radeon_state_atom *atom)
990 {
991 context_t *context = R700_CONTEXT(ctx);
992 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
993 BATCH_LOCALS(&context->radeon);
994 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
995
996 BEGIN_BATCH_NO_AUTOSTATE(12);
997 R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All);
998 R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All);
999 R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All);
1000 R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All);
1001 END_BATCH();
1002 COMMIT_BATCH();
1003 }
1004
1005 static void r700SendGBState(GLcontext *ctx, struct radeon_state_atom *atom)
1006 {
1007 context_t *context = R700_CONTEXT(ctx);
1008 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1009 BATCH_LOCALS(&context->radeon);
1010
1011 BEGIN_BATCH_NO_AUTOSTATE(6);
1012 R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
1013 R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
1014 R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
1015 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
1016 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
1017 END_BATCH();
1018 COMMIT_BATCH();
1019 }
1020
1021 static void r700SendScissorState(GLcontext *ctx, struct radeon_state_atom *atom)
1022 {
1023 context_t *context = R700_CONTEXT(ctx);
1024 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1025 BATCH_LOCALS(&context->radeon);
1026 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1027
1028 BEGIN_BATCH_NO_AUTOSTATE(22);
1029 R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
1030 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
1031 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);
1032
1033 R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12);
1034 R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
1035 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
1036 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
1037 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
1038 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
1039 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
1040 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
1041 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
1042 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
1043 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
1044 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
1045 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);
1046
1047 R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
1048 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
1049 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
1050 END_BATCH();
1051 COMMIT_BATCH();
1052 }
1053
1054 static void r700SendSCState(GLcontext *ctx, struct radeon_state_atom *atom)
1055 {
1056 context_t *context = R700_CONTEXT(ctx);
1057 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1058 BATCH_LOCALS(&context->radeon);
1059 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1060
1061 BEGIN_BATCH_NO_AUTOSTATE(15);
1062 R600_OUT_BATCH_REGVAL(R7xx_PA_SC_EDGERULE, r700->PA_SC_EDGERULE.u32All);
1063 R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All);
1064 R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All);
1065 R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All);
1066 R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All);
1067 END_BATCH();
1068 COMMIT_BATCH();
1069 }
1070
1071 static void r700SendAAState(GLcontext *ctx, struct radeon_state_atom *atom)
1072 {
1073 context_t *context = R700_CONTEXT(ctx);
1074 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1075 BATCH_LOCALS(&context->radeon);
1076
1077 BEGIN_BATCH_NO_AUTOSTATE(12);
1078 R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
1079 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
1080 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
1081 R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
1082 END_BATCH();
1083 COMMIT_BATCH();
1084 }
1085
1086 static void r700SendPSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1087 {
1088 context_t *context = R700_CONTEXT(ctx);
1089 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1090 int i;
1091 BATCH_LOCALS(&context->radeon);
1092
1093 if (r700->ps.num_consts == 0)
1094 return;
1095
1096 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->ps.num_consts * 4));
1097 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->ps.num_consts * 4)));
1098 /* assembler map const from very beginning. */
1099 R600_OUT_BATCH(SQ_ALU_CONSTANT_PS_OFFSET * 4);
1100 for (i = 0; i < r700->ps.num_consts; i++) {
1101 R600_OUT_BATCH(r700->ps.consts[i][0].u32All);
1102 R600_OUT_BATCH(r700->ps.consts[i][1].u32All);
1103 R600_OUT_BATCH(r700->ps.consts[i][2].u32All);
1104 R600_OUT_BATCH(r700->ps.consts[i][3].u32All);
1105 }
1106 END_BATCH();
1107 COMMIT_BATCH();
1108 }
1109
1110 static void r700SendVSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1111 {
1112 context_t *context = R700_CONTEXT(ctx);
1113 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1114 int i;
1115 BATCH_LOCALS(&context->radeon);
1116 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1117
1118 if (r700->vs.num_consts == 0)
1119 return;
1120
1121 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->vs.num_consts * 4));
1122 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->vs.num_consts * 4)));
1123 /* assembler map const from very beginning. */
1124 R600_OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
1125 for (i = 0; i < r700->vs.num_consts; i++) {
1126 R600_OUT_BATCH(r700->vs.consts[i][0].u32All);
1127 R600_OUT_BATCH(r700->vs.consts[i][1].u32All);
1128 R600_OUT_BATCH(r700->vs.consts[i][2].u32All);
1129 R600_OUT_BATCH(r700->vs.consts[i][3].u32All);
1130 }
1131 END_BATCH();
1132 COMMIT_BATCH();
1133 }
1134
1135 static void r700SendQueryBegin(GLcontext *ctx, struct radeon_state_atom *atom)
1136 {
1137 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1138 struct radeon_query_object *query = radeon->query.current;
1139 BATCH_LOCALS(radeon);
1140 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1141
1142 /* clear the buffer */
1143 radeon_bo_map(query->bo, GL_FALSE);
1144 memset(query->bo->ptr, 0, 4 * 2 * sizeof(uint64_t)); /* 4 DBs, 2 qwords each */
1145 radeon_bo_unmap(query->bo);
1146
1147 radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
1148 query->bo,
1149 0, RADEON_GEM_DOMAIN_GTT);
1150
1151 BEGIN_BATCH_NO_AUTOSTATE(4 + 2);
1152 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 2));
1153 R600_OUT_BATCH(ZPASS_DONE);
1154 R600_OUT_BATCH(query->curr_offset); /* hw writes qwords */
1155 R600_OUT_BATCH(0x00000000);
1156 R600_OUT_BATCH_RELOC(VGT_EVENT_INITIATOR, query->bo, 0, 0, RADEON_GEM_DOMAIN_GTT, 0);
1157 END_BATCH();
1158 query->emitted_begin = GL_TRUE;
1159 }
1160
1161 static int check_always(GLcontext *ctx, struct radeon_state_atom *atom)
1162 {
1163 return atom->cmd_size;
1164 }
1165
1166 static int check_cb(GLcontext *ctx, struct radeon_state_atom *atom)
1167 {
1168 context_t *context = R700_CONTEXT(ctx);
1169 int count = 7;
1170
1171 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1172 count += 11;
1173 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1174
1175 return count;
1176 }
1177
1178 static int check_blnd(GLcontext *ctx, struct radeon_state_atom *atom)
1179 {
1180 context_t *context = R700_CONTEXT(ctx);
1181 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1182 unsigned int ui;
1183 int count = 3;
1184
1185 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1186 count += 3;
1187
1188 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
1189 /* targets are enabled in r700SetRenderTarget but state
1190 size is calculated before that. Until MRT's are done
1191 hardcode target0 as enabled. */
1192 count += 3;
1193 for (ui = 1; ui < R700_MAX_RENDER_TARGETS; ui++) {
1194 if (r700->render_target[ui].enabled)
1195 count += 3;
1196 }
1197 }
1198 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1199
1200 return count;
1201 }
1202
1203 static int check_ucp(GLcontext *ctx, struct radeon_state_atom *atom)
1204 {
1205 context_t *context = R700_CONTEXT(ctx);
1206 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1207 int i;
1208 int count = 0;
1209
1210 for (i = 0; i < R700_MAX_UCP; i++) {
1211 if (r700->ucp[i].enabled)
1212 count += 6;
1213 }
1214 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1215 return count;
1216 }
1217
1218 static int check_vtx(GLcontext *ctx, struct radeon_state_atom *atom)
1219 {
1220 context_t *context = R700_CONTEXT(ctx);
1221 int count = context->radeon.tcl.aos_count * 18;
1222
1223 if (count)
1224 count += 6;
1225
1226 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1227 return count;
1228 }
1229
1230 static int check_tx(GLcontext *ctx, struct radeon_state_atom *atom)
1231 {
1232 context_t *context = R700_CONTEXT(ctx);
1233 unsigned int i, count = 0;
1234 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1235
1236 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
1237 if (ctx->Texture.Unit[i]._ReallyEnabled) {
1238 radeonTexObj *t = r700->textures[i];
1239 if (t)
1240 count++;
1241 }
1242 }
1243 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1244 return count * 31;
1245 }
1246
1247 static int check_ps_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1248 {
1249 context_t *context = R700_CONTEXT(ctx);
1250 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1251 int count = r700->ps.num_consts * 4;
1252
1253 if (count)
1254 count += 2;
1255 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1256
1257 return count;
1258 }
1259
1260 static int check_vs_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1261 {
1262 context_t *context = R700_CONTEXT(ctx);
1263 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1264 int count = r700->vs.num_consts * 4;
1265
1266 if (count)
1267 count += 2;
1268 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1269
1270 return count;
1271 }
1272
1273 static int check_queryobj(GLcontext *ctx, struct radeon_state_atom *atom)
1274 {
1275 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1276 struct radeon_query_object *query = radeon->query.current;
1277 int count;
1278
1279 if (!query || query->emitted_begin)
1280 count = 0;
1281 else
1282 count = atom->cmd_size;
1283 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1284 return count;
1285 }
1286
1287 #define ALLOC_STATE( ATOM, CHK, SZ, EMIT ) \
1288 do { \
1289 context->atoms.ATOM.cmd_size = (SZ); \
1290 context->atoms.ATOM.cmd = NULL; \
1291 context->atoms.ATOM.name = #ATOM; \
1292 context->atoms.ATOM.idx = 0; \
1293 context->atoms.ATOM.check = check_##CHK; \
1294 context->atoms.ATOM.dirty = GL_FALSE; \
1295 context->atoms.ATOM.emit = (EMIT); \
1296 context->radeon.hw.max_state_size += (SZ); \
1297 insert_at_tail(&context->radeon.hw.atomlist, &context->atoms.ATOM); \
1298 } while (0)
1299
1300 static void r600_init_query_stateobj(radeonContextPtr radeon, int SZ)
1301 {
1302 radeon->query.queryobj.cmd_size = (SZ);
1303 radeon->query.queryobj.cmd = NULL;
1304 radeon->query.queryobj.name = "queryobj";
1305 radeon->query.queryobj.idx = 0;
1306 radeon->query.queryobj.check = check_queryobj;
1307 radeon->query.queryobj.dirty = GL_FALSE;
1308 radeon->query.queryobj.emit = r700SendQueryBegin;
1309 radeon->hw.max_state_size += (SZ);
1310 insert_at_tail(&radeon->hw.atomlist, &radeon->query.queryobj);
1311 }
1312
1313 void r600InitAtoms(context_t *context)
1314 {
1315 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %p\n", __func__, context);
1316 context->radeon.hw.max_state_size = 10 + 5 + 14; /* start 3d, idle, cb/db flush */
1317
1318 /* Setup the atom linked list */
1319 make_empty_list(&context->radeon.hw.atomlist);
1320 context->radeon.hw.atomlist.name = "atom-list";
1321
1322 ALLOC_STATE(sq, always, 34, r700SendSQConfig);
1323 ALLOC_STATE(db, always, 17, r700SendDBState);
1324 ALLOC_STATE(stencil, always, 4, r700SendStencilState);
1325 ALLOC_STATE(db_target, always, 12, r700SendDepthTargetState);
1326 ALLOC_STATE(sc, always, 15, r700SendSCState);
1327 ALLOC_STATE(scissor, always, 22, r700SendScissorState);
1328 ALLOC_STATE(aa, always, 12, r700SendAAState);
1329 ALLOC_STATE(cl, always, 12, r700SendCLState);
1330 ALLOC_STATE(gb, always, 6, r700SendGBState);
1331 ALLOC_STATE(ucp, ucp, (R700_MAX_UCP * 6), r700SendUCPState);
1332 ALLOC_STATE(su, always, 9, r700SendSUState);
1333 ALLOC_STATE(poly, always, 10, r700SendPolyState);
1334 ALLOC_STATE(cb, cb, 18, r700SendCBState);
1335 ALLOC_STATE(clrcmp, always, 6, r700SendCBCLRCMPState);
1336 ALLOC_STATE(cb_target, always, 25, r700SendRenderTargetState);
1337 ALLOC_STATE(blnd, blnd, (6 + (R700_MAX_RENDER_TARGETS * 3)), r700SendCBBlendState);
1338 ALLOC_STATE(blnd_clr, always, 6, r700SendCBBlendColorState);
1339 ALLOC_STATE(sx, always, 9, r700SendSXState);
1340 ALLOC_STATE(vgt, always, 41, r700SendVGTState);
1341 ALLOC_STATE(spi, always, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState);
1342 ALLOC_STATE(vpt, always, 16, r700SendViewportState);
1343 ALLOC_STATE(fs, always, 18, r700SendFSState);
1344 ALLOC_STATE(vs, always, 21, r700SendVSState);
1345 ALLOC_STATE(ps, always, 24, r700SendPSState);
1346 ALLOC_STATE(vs_consts, vs_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendVSConsts);
1347 ALLOC_STATE(ps_consts, ps_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendPSConsts);
1348 ALLOC_STATE(vtx, vtx, (6 + (VERT_ATTRIB_MAX * 18)), r700SendVTXState);
1349 ALLOC_STATE(tx, tx, (R700_TEXTURE_NUMBERUNITS * 20), r700SendTexState);
1350 ALLOC_STATE(tx_smplr, tx, (R700_TEXTURE_NUMBERUNITS * 5), r700SendTexSamplerState);
1351 ALLOC_STATE(tx_brdr_clr, tx, (R700_TEXTURE_NUMBERUNITS * 6), r700SendTexBorderColorState);
1352 r600_init_query_stateobj(&context->radeon, 6 * 2);
1353
1354 context->radeon.hw.is_dirty = GL_TRUE;
1355 context->radeon.hw.all_dirty = GL_TRUE;
1356 }