Merge branch 'mesa_7_7_branch'
[mesa.git] / src / mesa / drivers / dri / r600 / r700_chip.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/imports.h"
29 #include "main/glheader.h"
30 #include "main/simple_list.h"
31
32 #include "r600_context.h"
33 #include "r600_cmdbuf.h"
34
35 #include "r700_state.h"
36 #include "r600_tex.h"
37 #include "r700_oglprog.h"
38 #include "r700_fragprog.h"
39 #include "r700_vertprog.h"
40 #include "r700_ioctl.h"
41
42 #include "radeon_mipmap_tree.h"
43
44 static void r700SendTexState(GLcontext *ctx, struct radeon_state_atom *atom)
45 {
46 context_t *context = R700_CONTEXT(ctx);
47 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
48
49 struct r700_vertex_program *vp = context->selected_vp;
50
51 struct radeon_bo *bo = NULL;
52 unsigned int i;
53 BATCH_LOCALS(&context->radeon);
54
55 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
56
57 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
58 if (ctx->Texture.Unit[i]._ReallyEnabled) {
59 radeonTexObj *t = r700->textures[i];
60 uint32_t offset;
61 if (t) {
62 if (!t->image_override) {
63 bo = t->mt->bo;
64 offset = get_base_teximage_offset(t);
65 } else {
66 bo = t->bo;
67 offset = 0;
68 }
69 if (bo) {
70
71 r700SyncSurf(context, bo,
72 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
73 0, TC_ACTION_ENA_bit);
74
75 BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
76 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
77
78 if( (1<<i) & vp->r700AsmCode.unVetTexBits )
79 { /* vs texture */
80 R600_OUT_BATCH((i + VERT_ATTRIB_MAX + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
81 }
82 else
83 {
84 R600_OUT_BATCH(i * 7);
85 }
86
87 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
88 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
89 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE2);
90 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
91 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
92 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
93 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
94 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
95 bo,
96 offset,
97 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
98 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE3,
99 bo,
100 r700->textures[i]->SQ_TEX_RESOURCE3,
101 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
102 END_BATCH();
103 COMMIT_BATCH();
104 }
105 }
106 }
107 }
108 }
109
110 #define SAMPLER_STRIDE 3
111
112 static void r700SendTexSamplerState(GLcontext *ctx, struct radeon_state_atom *atom)
113 {
114 context_t *context = R700_CONTEXT(ctx);
115 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
116 unsigned int i;
117
118 struct r700_vertex_program *vp = context->selected_vp;
119
120 BATCH_LOCALS(&context->radeon);
121 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
122
123 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
124 if (ctx->Texture.Unit[i]._ReallyEnabled) {
125 radeonTexObj *t = r700->textures[i];
126 if (t) {
127 BEGIN_BATCH_NO_AUTOSTATE(5);
128 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, 3));
129
130 if( (1<<i) & vp->r700AsmCode.unVetTexBits )
131 { /* vs texture */
132 R600_OUT_BATCH((i+SQ_TEX_SAMPLER_VS_OFFSET) * SAMPLER_STRIDE); //work 1
133 }
134 else
135 {
136 R600_OUT_BATCH(i * 3);
137 }
138
139 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER0);
140 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER1);
141 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER2);
142 END_BATCH();
143 COMMIT_BATCH();
144 }
145 }
146 }
147 }
148
149 static void r700SendTexBorderColorState(GLcontext *ctx, struct radeon_state_atom *atom)
150 {
151 context_t *context = R700_CONTEXT(ctx);
152 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
153 unsigned int i;
154 BATCH_LOCALS(&context->radeon);
155 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
156
157 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
158 if (ctx->Texture.Unit[i]._ReallyEnabled) {
159 radeonTexObj *t = r700->textures[i];
160 if (t) {
161 BEGIN_BATCH_NO_AUTOSTATE(2 + 4);
162 R600_OUT_BATCH_REGSEQ((TD_PS_SAMPLER0_BORDER_RED + (i * 16)), 4);
163 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_RED);
164 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_GREEN);
165 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_BLUE);
166 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_ALPHA);
167 END_BATCH();
168 COMMIT_BATCH();
169 }
170 }
171 }
172 }
173
174 extern int getTypeSize(GLenum type);
175 static void r700SetupVTXConstants(GLcontext * ctx,
176 void * pAos,
177 StreamDesc * pStreamDesc)
178 {
179 context_t *context = R700_CONTEXT(ctx);
180 struct radeon_aos * paos = (struct radeon_aos *)pAos;
181 unsigned int nVBsize;
182 BATCH_LOCALS(&context->radeon);
183
184 unsigned int uSQ_VTX_CONSTANT_WORD0_0;
185 unsigned int uSQ_VTX_CONSTANT_WORD1_0;
186 unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
187 unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
188 unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;
189
190 if (!paos->bo)
191 return;
192
193 if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
194 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
195 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
196 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS880) ||
197 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
198 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
199 else
200 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);
201
202 if(0 == pStreamDesc->stride)
203 {
204 nVBsize = paos->count * pStreamDesc->size * getTypeSize(pStreamDesc->type);
205 }
206 else
207 {
208 nVBsize = paos->count * pStreamDesc->stride;
209 }
210
211 uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
212 uSQ_VTX_CONSTANT_WORD1_0 = nVBsize - 1;
213
214 SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
215 SETfield(uSQ_VTX_CONSTANT_WORD2_0, pStreamDesc->stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
216 SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
217 SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(pStreamDesc->type, pStreamDesc->size, NULL),
218 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
219 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
220
221 if(GL_TRUE == pStreamDesc->normalize)
222 {
223 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_NORM,
224 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
225 }
226 //else
227 //{
228 // SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_INT,
229 // SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
230 //}
231
232 if(1 == pStreamDesc->_signed)
233 {
234 SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
235 }
236
237 SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
238 SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
239 SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
240
241 BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
242
243 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
244 R600_OUT_BATCH((pStreamDesc->element + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
245 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
246 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
247 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
248 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
249 R600_OUT_BATCH(0);
250 R600_OUT_BATCH(0);
251 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
252 R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
253 paos->bo,
254 uSQ_VTX_CONSTANT_WORD0_0,
255 RADEON_GEM_DOMAIN_GTT, 0, 0);
256 END_BATCH();
257 COMMIT_BATCH();
258
259 }
260
261 static void r700SendVTXState(GLcontext *ctx, struct radeon_state_atom *atom)
262 {
263 context_t *context = R700_CONTEXT(ctx);
264 struct r700_vertex_program *vp = context->selected_vp;
265 unsigned int i, j = 0;
266 BATCH_LOCALS(&context->radeon);
267 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
268
269 if (context->radeon.tcl.aos_count == 0)
270 return;
271
272 BEGIN_BATCH_NO_AUTOSTATE(6);
273 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
274 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
275 R600_OUT_BATCH(0);
276
277 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
278 R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
279 R600_OUT_BATCH(0);
280 END_BATCH();
281 COMMIT_BATCH();
282
283 for(i=0; i<VERT_ATTRIB_MAX; i++) {
284 if(vp->mesa_program->Base.InputsRead & (1 << i))
285 {
286 r700SetupVTXConstants(ctx,
287 (void*)(&context->radeon.tcl.aos[j]),
288 &(context->stream_desc[j]));
289 j++;
290 }
291 }
292 }
293
294 static void r700SetRenderTarget(context_t *context, int id)
295 {
296 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
297
298 struct radeon_renderbuffer *rrb;
299 unsigned int nPitchInPixel;
300
301 rrb = radeon_get_colorbuffer(&context->radeon);
302 if (!rrb || !rrb->bo) {
303 return;
304 }
305
306 R600_STATECHANGE(context, cb_target);
307
308 /* color buffer */
309 r700->render_target[id].CB_COLOR0_BASE.u32All = context->radeon.state.color.draw_offset;
310
311 nPitchInPixel = rrb->pitch/rrb->cpp;
312 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, (nPitchInPixel/8)-1,
313 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
314 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
315 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask);
316 r700->render_target[id].CB_COLOR0_BASE.u32All = 0;
317 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ENDIAN_NONE, ENDIAN_shift, ENDIAN_mask);
318 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_LINEAR_GENERAL,
319 CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
320 if(4 == rrb->cpp)
321 {
322 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_8_8_8_8,
323 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
324 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT, COMP_SWAP_shift, COMP_SWAP_mask);
325 }
326 else
327 {
328 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_5_6_5,
329 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
330 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT_REV,
331 COMP_SWAP_shift, COMP_SWAP_mask);
332 }
333 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
334 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_CLAMP_bit);
335 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, NUMBER_UNORM, NUMBER_TYPE_shift, NUMBER_TYPE_mask);
336
337 r700->render_target[id].enabled = GL_TRUE;
338 }
339
340 static void r700SetDepthTarget(context_t *context)
341 {
342 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
343
344 struct radeon_renderbuffer *rrb;
345 unsigned int nPitchInPixel;
346
347 rrb = radeon_get_depthbuffer(&context->radeon);
348 if (!rrb)
349 return;
350
351 R600_STATECHANGE(context, db_target);
352
353 /* depth buf */
354 r700->DB_DEPTH_SIZE.u32All = 0;
355 r700->DB_DEPTH_BASE.u32All = 0;
356 r700->DB_DEPTH_INFO.u32All = 0;
357 r700->DB_DEPTH_VIEW.u32All = 0;
358
359 nPitchInPixel = rrb->pitch/rrb->cpp;
360
361 SETfield(r700->DB_DEPTH_SIZE.u32All, (nPitchInPixel/8)-1,
362 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
363 SETfield(r700->DB_DEPTH_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
364 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask); /* size in pixel / 64 - 1 */
365
366 if(4 == rrb->cpp)
367 {
368 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_8_24,
369 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
370 }
371 else
372 {
373 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_16,
374 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
375 }
376 SETfield(r700->DB_DEPTH_INFO.u32All, ARRAY_1D_TILED_THIN1,
377 DB_DEPTH_INFO__ARRAY_MODE_shift, DB_DEPTH_INFO__ARRAY_MODE_mask);
378 /* r700->DB_PREFETCH_LIMIT.bits.DEPTH_HEIGHT_TILE_MAX = (context->currentDraw->h >> 3) - 1; */ /* z buffer sie may much bigger than what need, so use actual used h. */
379 }
380
381 static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
382 {
383 context_t *context = R700_CONTEXT(ctx);
384 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
385 struct radeon_renderbuffer *rrb;
386 BATCH_LOCALS(&context->radeon);
387 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
388
389 rrb = radeon_get_depthbuffer(&context->radeon);
390 if (!rrb || !rrb->bo) {
391 return;
392 }
393
394 r700SetDepthTarget(context);
395
396 BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
397 R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
398 R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
399 R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
400 R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
401 R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
402 R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
403 R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
404 rrb->bo,
405 r700->DB_DEPTH_BASE.u32All,
406 0, RADEON_GEM_DOMAIN_VRAM, 0);
407 END_BATCH();
408
409 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
410 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
411 BEGIN_BATCH_NO_AUTOSTATE(2);
412 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
413 R600_OUT_BATCH(1 << 0);
414 END_BATCH();
415 }
416
417 COMMIT_BATCH();
418
419 }
420
421 static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
422 {
423 context_t *context = R700_CONTEXT(ctx);
424 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
425 struct radeon_renderbuffer *rrb;
426 BATCH_LOCALS(&context->radeon);
427 int id = 0;
428 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
429
430 rrb = radeon_get_colorbuffer(&context->radeon);
431 if (!rrb || !rrb->bo) {
432 return;
433 }
434
435 r700SetRenderTarget(context, 0);
436
437 if (id > R700_MAX_RENDER_TARGETS)
438 return;
439
440 if (!r700->render_target[id].enabled)
441 return;
442
443 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
444 R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
445 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
446 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
447 rrb->bo,
448 r700->render_target[id].CB_COLOR0_BASE.u32All,
449 0, RADEON_GEM_DOMAIN_VRAM, 0);
450 END_BATCH();
451
452 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
453 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
454 BEGIN_BATCH_NO_AUTOSTATE(2);
455 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
456 R600_OUT_BATCH((2 << id));
457 END_BATCH();
458 }
459
460 BEGIN_BATCH_NO_AUTOSTATE(18);
461 R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
462 R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
463 R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
464 R600_OUT_BATCH_REGVAL(CB_COLOR0_TILE + (4 * id), r700->render_target[id].CB_COLOR0_TILE.u32All);
465 R600_OUT_BATCH_REGVAL(CB_COLOR0_FRAG + (4 * id), r700->render_target[id].CB_COLOR0_FRAG.u32All);
466 R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
467 END_BATCH();
468
469 COMMIT_BATCH();
470
471 }
472
473 static void r700SendPSState(GLcontext *ctx, struct radeon_state_atom *atom)
474 {
475 context_t *context = R700_CONTEXT(ctx);
476 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
477 struct radeon_bo * pbo;
478 BATCH_LOCALS(&context->radeon);
479 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
480
481 pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));
482
483 if (!pbo)
484 return;
485
486 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
487
488 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
489 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
490 R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
491 R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
492 pbo,
493 r700->ps.SQ_PGM_START_PS.u32All,
494 RADEON_GEM_DOMAIN_GTT, 0, 0);
495 END_BATCH();
496
497 BEGIN_BATCH_NO_AUTOSTATE(9);
498 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
499 R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
500 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
501 END_BATCH();
502
503 BEGIN_BATCH_NO_AUTOSTATE(3);
504 R600_OUT_BATCH_REGVAL(SQ_LOOP_CONST_0, 0x01000FFF);
505 END_BATCH();
506
507 COMMIT_BATCH();
508
509 }
510
511 static void r700SendVSState(GLcontext *ctx, struct radeon_state_atom *atom)
512 {
513 context_t *context = R700_CONTEXT(ctx);
514 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
515 struct radeon_bo * pbo;
516 BATCH_LOCALS(&context->radeon);
517 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
518
519 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
520
521 if (!pbo)
522 return;
523
524 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
525
526 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
527 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_VS, 1);
528 R600_OUT_BATCH(r700->vs.SQ_PGM_START_VS.u32All);
529 R600_OUT_BATCH_RELOC(r700->vs.SQ_PGM_START_VS.u32All,
530 pbo,
531 r700->vs.SQ_PGM_START_VS.u32All,
532 RADEON_GEM_DOMAIN_GTT, 0, 0);
533 END_BATCH();
534
535 BEGIN_BATCH_NO_AUTOSTATE(6);
536 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_VS, r700->vs.SQ_PGM_RESOURCES_VS.u32All);
537 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_VS, r700->vs.SQ_PGM_CF_OFFSET_VS.u32All);
538 END_BATCH();
539
540 BEGIN_BATCH_NO_AUTOSTATE(3);
541 R600_OUT_BATCH_REGVAL((SQ_LOOP_CONST_0 + 32*4), 0x0100000F);
542 //R600_OUT_BATCH_REGVAL((SQ_LOOP_CONST_0 + (SQ_LOOP_CONST_vs<2)), 0x0100000F);
543 END_BATCH();
544
545 COMMIT_BATCH();
546 }
547
548 static void r700SendFSState(GLcontext *ctx, struct radeon_state_atom *atom)
549 {
550 context_t *context = R700_CONTEXT(ctx);
551 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
552 struct radeon_bo * pbo;
553 BATCH_LOCALS(&context->radeon);
554 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
555
556 /* XXX fixme
557 * R6xx chips require a FS be emitted, even if it's not used.
558 * since we aren't using FS yet, just send the VS address to make
559 * the kernel command checker happy
560 */
561 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
562 r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
563 r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
564 r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
565 /* XXX */
566
567 if (!pbo)
568 return;
569
570 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
571
572 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
573 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
574 R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
575 R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
576 pbo,
577 r700->fs.SQ_PGM_START_FS.u32All,
578 RADEON_GEM_DOMAIN_GTT, 0, 0);
579 END_BATCH();
580
581 BEGIN_BATCH_NO_AUTOSTATE(6);
582 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
583 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
584 END_BATCH();
585
586 COMMIT_BATCH();
587
588 }
589
590 static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
591 {
592 context_t *context = R700_CONTEXT(ctx);
593 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
594 BATCH_LOCALS(&context->radeon);
595 int id = 0;
596 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
597
598 if (id > R700_MAX_VIEWPORTS)
599 return;
600
601 if (!r700->viewport[id].enabled)
602 return;
603
604 BEGIN_BATCH_NO_AUTOSTATE(16);
605 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
606 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
607 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
608 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
609 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
610 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
611 R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
612 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
613 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
614 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
615 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
616 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
617 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
618 END_BATCH();
619
620 COMMIT_BATCH();
621
622 }
623
624 static void r700SendSQConfig(GLcontext *ctx, struct radeon_state_atom *atom)
625 {
626 context_t *context = R700_CONTEXT(ctx);
627 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
628 BATCH_LOCALS(&context->radeon);
629 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
630
631 BEGIN_BATCH_NO_AUTOSTATE(34);
632 R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
633 R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
634 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
635 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_2.u32All);
636 R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
637 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
638 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);
639
640 R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
641 R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
642 R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
643 R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
644 R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);
645
646 R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
647 R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
648 R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
649 R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
650 R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
651 R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
652 R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
653 R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
654 R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
655 R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
656 END_BATCH();
657
658 COMMIT_BATCH();
659 }
660
661 static void r700SendUCPState(GLcontext *ctx, struct radeon_state_atom *atom)
662 {
663 context_t *context = R700_CONTEXT(ctx);
664 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
665 BATCH_LOCALS(&context->radeon);
666 int i;
667 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
668
669 for (i = 0; i < R700_MAX_UCP; i++) {
670 if (r700->ucp[i].enabled) {
671 BEGIN_BATCH_NO_AUTOSTATE(6);
672 R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
673 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
674 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Y.u32All);
675 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Z.u32All);
676 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
677 END_BATCH();
678 COMMIT_BATCH();
679 }
680 }
681 }
682
683 static void r700SendSPIState(GLcontext *ctx, struct radeon_state_atom *atom)
684 {
685 context_t *context = R700_CONTEXT(ctx);
686 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
687 BATCH_LOCALS(&context->radeon);
688 unsigned int ui;
689 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
690
691 BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);
692
693 R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
694 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
695 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
696 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
697 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
698 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
699 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
700 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
701 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
702 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
703 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
704 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
705 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
706 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
707 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
708 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
709 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
710 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
711 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
712 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
713 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
714 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
715 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
716 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
717 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
718 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
719 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
720 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
721 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
722 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
723 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
724 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
725 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);
726
727 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
728 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
729 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
730 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
731 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
732 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
733 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
734 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
735 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
736 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
737 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);
738
739 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
740 R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
741 R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
742 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
743 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
744 R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
745 R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
746 R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
747 R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
748 R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);
749
750 R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
751 for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
752 R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
753
754 END_BATCH();
755 COMMIT_BATCH();
756 }
757
758 static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom)
759 {
760 context_t *context = R700_CONTEXT(ctx);
761 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
762 BATCH_LOCALS(&context->radeon);
763 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
764
765 BEGIN_BATCH_NO_AUTOSTATE(41);
766
767 R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
768 R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
769 R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
770 R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
771 R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);
772
773 R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
774 R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
775 R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
776 R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
777 R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
778 R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
779 R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
780 R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
781 R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
782 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
783 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
784 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
785 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
786 R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);
787
788 R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
789 R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
790 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
791 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);
792
793 R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
794 R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
795 R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
796 R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);
797
798 R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);
799
800 END_BATCH();
801 COMMIT_BATCH();
802 }
803
804 static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom)
805 {
806 context_t *context = R700_CONTEXT(ctx);
807 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
808 BATCH_LOCALS(&context->radeon);
809 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
810
811 BEGIN_BATCH_NO_AUTOSTATE(9);
812 R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
813 R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
814 R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
815 END_BATCH();
816 COMMIT_BATCH();
817 }
818
819 static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
820 {
821 context_t *context = R700_CONTEXT(ctx);
822 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
823 BATCH_LOCALS(&context->radeon);
824 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
825
826 BEGIN_BATCH_NO_AUTOSTATE(17);
827
828 R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
829 R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
830 R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);
831
832 R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
833 R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);
834
835 R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
836 R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
837 R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);
838
839 R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);
840
841 END_BATCH();
842 COMMIT_BATCH();
843 }
844
845 static void r700SendStencilState(GLcontext *ctx, struct radeon_state_atom *atom)
846 {
847 context_t *context = R700_CONTEXT(ctx);
848 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
849 BATCH_LOCALS(&context->radeon);
850
851 BEGIN_BATCH_NO_AUTOSTATE(4);
852 R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
853 R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
854 R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
855 END_BATCH();
856 COMMIT_BATCH();
857 }
858
859 static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
860 {
861 context_t *context = R700_CONTEXT(ctx);
862 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
863 BATCH_LOCALS(&context->radeon);
864 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
865
866 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
867 BEGIN_BATCH_NO_AUTOSTATE(11);
868 R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
869 R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
870 R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
871 R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
872 R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
873 R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
874 R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
875 R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
876 R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
877 END_BATCH();
878 }
879
880 BEGIN_BATCH_NO_AUTOSTATE(7);
881 R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
882 R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
883 R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
884 R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
885 END_BATCH();
886 COMMIT_BATCH();
887 }
888
889 static void r700SendCBCLRCMPState(GLcontext *ctx, struct radeon_state_atom *atom)
890 {
891 context_t *context = R700_CONTEXT(ctx);
892 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
893 BATCH_LOCALS(&context->radeon);
894
895 BEGIN_BATCH_NO_AUTOSTATE(6);
896 R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
897 R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
898 R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
899 R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
900 R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
901 END_BATCH();
902 COMMIT_BATCH();
903 }
904
905 static void r700SendCBBlendState(GLcontext *ctx, struct radeon_state_atom *atom)
906 {
907 context_t *context = R700_CONTEXT(ctx);
908 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
909 BATCH_LOCALS(&context->radeon);
910 unsigned int ui;
911 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
912
913 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
914 BEGIN_BATCH_NO_AUTOSTATE(3);
915 R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
916 END_BATCH();
917 }
918
919 BEGIN_BATCH_NO_AUTOSTATE(3);
920 R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
921 END_BATCH();
922
923 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
924 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
925 if (r700->render_target[ui].enabled) {
926 BEGIN_BATCH_NO_AUTOSTATE(3);
927 R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
928 r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
929 END_BATCH();
930 }
931 }
932 }
933
934 COMMIT_BATCH();
935 }
936
937 static void r700SendCBBlendColorState(GLcontext *ctx, struct radeon_state_atom *atom)
938 {
939 context_t *context = R700_CONTEXT(ctx);
940 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
941 BATCH_LOCALS(&context->radeon);
942 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
943
944 BEGIN_BATCH_NO_AUTOSTATE(6);
945 R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
946 R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
947 R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
948 R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
949 R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
950 END_BATCH();
951 COMMIT_BATCH();
952 }
953
954 static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom)
955 {
956 context_t *context = R700_CONTEXT(ctx);
957 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
958 BATCH_LOCALS(&context->radeon);
959
960 BEGIN_BATCH_NO_AUTOSTATE(9);
961 R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
962 R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
963 R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
964 R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
965 R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
966 R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
967 END_BATCH();
968 COMMIT_BATCH();
969
970 }
971
972 static void r700SendPolyState(GLcontext *ctx, struct radeon_state_atom *atom)
973 {
974 context_t *context = R700_CONTEXT(ctx);
975 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
976 BATCH_LOCALS(&context->radeon);
977
978 BEGIN_BATCH_NO_AUTOSTATE(10);
979 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
980 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
981 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
982 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
983 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
984 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
985 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
986 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
987 END_BATCH();
988 COMMIT_BATCH();
989
990 }
991
992 static void r700SendCLState(GLcontext *ctx, struct radeon_state_atom *atom)
993 {
994 context_t *context = R700_CONTEXT(ctx);
995 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
996 BATCH_LOCALS(&context->radeon);
997 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
998
999 BEGIN_BATCH_NO_AUTOSTATE(12);
1000 R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All);
1001 R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All);
1002 R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All);
1003 R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All);
1004 END_BATCH();
1005 COMMIT_BATCH();
1006 }
1007
1008 static void r700SendGBState(GLcontext *ctx, struct radeon_state_atom *atom)
1009 {
1010 context_t *context = R700_CONTEXT(ctx);
1011 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1012 BATCH_LOCALS(&context->radeon);
1013
1014 BEGIN_BATCH_NO_AUTOSTATE(6);
1015 R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
1016 R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
1017 R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
1018 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
1019 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
1020 END_BATCH();
1021 COMMIT_BATCH();
1022 }
1023
1024 static void r700SendScissorState(GLcontext *ctx, struct radeon_state_atom *atom)
1025 {
1026 context_t *context = R700_CONTEXT(ctx);
1027 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1028 BATCH_LOCALS(&context->radeon);
1029 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1030
1031 BEGIN_BATCH_NO_AUTOSTATE(22);
1032 R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
1033 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
1034 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);
1035
1036 R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12);
1037 R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
1038 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
1039 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
1040 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
1041 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
1042 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
1043 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
1044 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
1045 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
1046 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
1047 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
1048 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);
1049
1050 R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
1051 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
1052 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
1053 END_BATCH();
1054 COMMIT_BATCH();
1055 }
1056
1057 static void r700SendSCState(GLcontext *ctx, struct radeon_state_atom *atom)
1058 {
1059 context_t *context = R700_CONTEXT(ctx);
1060 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1061 BATCH_LOCALS(&context->radeon);
1062 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1063
1064 BEGIN_BATCH_NO_AUTOSTATE(15);
1065 R600_OUT_BATCH_REGVAL(R7xx_PA_SC_EDGERULE, r700->PA_SC_EDGERULE.u32All);
1066 R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All);
1067 R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All);
1068 R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All);
1069 R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All);
1070 END_BATCH();
1071 COMMIT_BATCH();
1072 }
1073
1074 static void r700SendAAState(GLcontext *ctx, struct radeon_state_atom *atom)
1075 {
1076 context_t *context = R700_CONTEXT(ctx);
1077 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1078 BATCH_LOCALS(&context->radeon);
1079
1080 BEGIN_BATCH_NO_AUTOSTATE(12);
1081 R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
1082 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
1083 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
1084 R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
1085 END_BATCH();
1086 COMMIT_BATCH();
1087 }
1088
1089 static void r700SendPSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1090 {
1091 context_t *context = R700_CONTEXT(ctx);
1092 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1093 int i;
1094 BATCH_LOCALS(&context->radeon);
1095
1096 if (r700->ps.num_consts == 0)
1097 return;
1098
1099 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->ps.num_consts * 4));
1100 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->ps.num_consts * 4)));
1101 /* assembler map const from very beginning. */
1102 R600_OUT_BATCH(SQ_ALU_CONSTANT_PS_OFFSET * 4);
1103 for (i = 0; i < r700->ps.num_consts; i++) {
1104 R600_OUT_BATCH(r700->ps.consts[i][0].u32All);
1105 R600_OUT_BATCH(r700->ps.consts[i][1].u32All);
1106 R600_OUT_BATCH(r700->ps.consts[i][2].u32All);
1107 R600_OUT_BATCH(r700->ps.consts[i][3].u32All);
1108 }
1109 END_BATCH();
1110 COMMIT_BATCH();
1111 }
1112
1113 static void r700SendVSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1114 {
1115 context_t *context = R700_CONTEXT(ctx);
1116 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1117 int i;
1118 BATCH_LOCALS(&context->radeon);
1119 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1120
1121 if (r700->vs.num_consts == 0)
1122 return;
1123
1124 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->vs.num_consts * 4));
1125 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->vs.num_consts * 4)));
1126 /* assembler map const from very beginning. */
1127 R600_OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
1128 for (i = 0; i < r700->vs.num_consts; i++) {
1129 R600_OUT_BATCH(r700->vs.consts[i][0].u32All);
1130 R600_OUT_BATCH(r700->vs.consts[i][1].u32All);
1131 R600_OUT_BATCH(r700->vs.consts[i][2].u32All);
1132 R600_OUT_BATCH(r700->vs.consts[i][3].u32All);
1133 }
1134 END_BATCH();
1135 COMMIT_BATCH();
1136 }
1137
1138 static void r700SendQueryBegin(GLcontext *ctx, struct radeon_state_atom *atom)
1139 {
1140 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1141 struct radeon_query_object *query = radeon->query.current;
1142 BATCH_LOCALS(radeon);
1143 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1144
1145 /* clear the buffer */
1146 radeon_bo_map(query->bo, GL_FALSE);
1147 memset(query->bo->ptr, 0, 4 * 2 * sizeof(uint64_t)); /* 4 DBs, 2 qwords each */
1148 radeon_bo_unmap(query->bo);
1149
1150 radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
1151 query->bo,
1152 0, RADEON_GEM_DOMAIN_GTT);
1153
1154 BEGIN_BATCH_NO_AUTOSTATE(4 + 2);
1155 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 2));
1156 R600_OUT_BATCH(ZPASS_DONE);
1157 R600_OUT_BATCH(query->curr_offset); /* hw writes qwords */
1158 R600_OUT_BATCH(0x00000000);
1159 R600_OUT_BATCH_RELOC(VGT_EVENT_INITIATOR, query->bo, 0, 0, RADEON_GEM_DOMAIN_GTT, 0);
1160 END_BATCH();
1161 query->emitted_begin = GL_TRUE;
1162 }
1163
1164 static int check_always(GLcontext *ctx, struct radeon_state_atom *atom)
1165 {
1166 return atom->cmd_size;
1167 }
1168
1169 static int check_cb(GLcontext *ctx, struct radeon_state_atom *atom)
1170 {
1171 context_t *context = R700_CONTEXT(ctx);
1172 int count = 7;
1173
1174 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1175 count += 11;
1176 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1177
1178 return count;
1179 }
1180
1181 static int check_blnd(GLcontext *ctx, struct radeon_state_atom *atom)
1182 {
1183 context_t *context = R700_CONTEXT(ctx);
1184 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1185 unsigned int ui;
1186 int count = 3;
1187
1188 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1189 count += 3;
1190
1191 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
1192 /* targets are enabled in r700SetRenderTarget but state
1193 size is calculated before that. Until MRT's are done
1194 hardcode target0 as enabled. */
1195 count += 3;
1196 for (ui = 1; ui < R700_MAX_RENDER_TARGETS; ui++) {
1197 if (r700->render_target[ui].enabled)
1198 count += 3;
1199 }
1200 }
1201 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1202
1203 return count;
1204 }
1205
1206 static int check_ucp(GLcontext *ctx, struct radeon_state_atom *atom)
1207 {
1208 context_t *context = R700_CONTEXT(ctx);
1209 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1210 int i;
1211 int count = 0;
1212
1213 for (i = 0; i < R700_MAX_UCP; i++) {
1214 if (r700->ucp[i].enabled)
1215 count += 6;
1216 }
1217 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1218 return count;
1219 }
1220
1221 static int check_vtx(GLcontext *ctx, struct radeon_state_atom *atom)
1222 {
1223 context_t *context = R700_CONTEXT(ctx);
1224 int count = context->radeon.tcl.aos_count * 18;
1225
1226 if (count)
1227 count += 6;
1228
1229 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1230 return count;
1231 }
1232
1233 static int check_tx(GLcontext *ctx, struct radeon_state_atom *atom)
1234 {
1235 context_t *context = R700_CONTEXT(ctx);
1236 unsigned int i, count = 0;
1237 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1238
1239 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
1240 if (ctx->Texture.Unit[i]._ReallyEnabled) {
1241 radeonTexObj *t = r700->textures[i];
1242 if (t)
1243 count++;
1244 }
1245 }
1246 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1247 return count * 31;
1248 }
1249
1250 static int check_ps_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1251 {
1252 context_t *context = R700_CONTEXT(ctx);
1253 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1254 int count = r700->ps.num_consts * 4;
1255
1256 if (count)
1257 count += 2;
1258 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1259
1260 return count;
1261 }
1262
1263 static int check_vs_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1264 {
1265 context_t *context = R700_CONTEXT(ctx);
1266 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1267 int count = r700->vs.num_consts * 4;
1268
1269 if (count)
1270 count += 2;
1271 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1272
1273 return count;
1274 }
1275
1276 static int check_queryobj(GLcontext *ctx, struct radeon_state_atom *atom)
1277 {
1278 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1279 struct radeon_query_object *query = radeon->query.current;
1280 int count;
1281
1282 if (!query || query->emitted_begin)
1283 count = 0;
1284 else
1285 count = atom->cmd_size;
1286 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1287 return count;
1288 }
1289
1290 #define ALLOC_STATE( ATOM, CHK, SZ, EMIT ) \
1291 do { \
1292 context->atoms.ATOM.cmd_size = (SZ); \
1293 context->atoms.ATOM.cmd = NULL; \
1294 context->atoms.ATOM.name = #ATOM; \
1295 context->atoms.ATOM.idx = 0; \
1296 context->atoms.ATOM.check = check_##CHK; \
1297 context->atoms.ATOM.dirty = GL_FALSE; \
1298 context->atoms.ATOM.emit = (EMIT); \
1299 context->radeon.hw.max_state_size += (SZ); \
1300 insert_at_tail(&context->radeon.hw.atomlist, &context->atoms.ATOM); \
1301 } while (0)
1302
1303 static void r600_init_query_stateobj(radeonContextPtr radeon, int SZ)
1304 {
1305 radeon->query.queryobj.cmd_size = (SZ);
1306 radeon->query.queryobj.cmd = NULL;
1307 radeon->query.queryobj.name = "queryobj";
1308 radeon->query.queryobj.idx = 0;
1309 radeon->query.queryobj.check = check_queryobj;
1310 radeon->query.queryobj.dirty = GL_FALSE;
1311 radeon->query.queryobj.emit = r700SendQueryBegin;
1312 radeon->hw.max_state_size += (SZ);
1313 insert_at_tail(&radeon->hw.atomlist, &radeon->query.queryobj);
1314 }
1315
1316 void r600InitAtoms(context_t *context)
1317 {
1318 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %p\n", __func__, context);
1319 context->radeon.hw.max_state_size = 10 + 5 + 14; /* start 3d, idle, cb/db flush */
1320
1321 /* Setup the atom linked list */
1322 make_empty_list(&context->radeon.hw.atomlist);
1323 context->radeon.hw.atomlist.name = "atom-list";
1324
1325 ALLOC_STATE(sq, always, 34, r700SendSQConfig);
1326 ALLOC_STATE(db, always, 17, r700SendDBState);
1327 ALLOC_STATE(stencil, always, 4, r700SendStencilState);
1328 ALLOC_STATE(db_target, always, 12, r700SendDepthTargetState);
1329 ALLOC_STATE(sc, always, 15, r700SendSCState);
1330 ALLOC_STATE(scissor, always, 22, r700SendScissorState);
1331 ALLOC_STATE(aa, always, 12, r700SendAAState);
1332 ALLOC_STATE(cl, always, 12, r700SendCLState);
1333 ALLOC_STATE(gb, always, 6, r700SendGBState);
1334 ALLOC_STATE(ucp, ucp, (R700_MAX_UCP * 6), r700SendUCPState);
1335 ALLOC_STATE(su, always, 9, r700SendSUState);
1336 ALLOC_STATE(poly, always, 10, r700SendPolyState);
1337 ALLOC_STATE(cb, cb, 18, r700SendCBState);
1338 ALLOC_STATE(clrcmp, always, 6, r700SendCBCLRCMPState);
1339 ALLOC_STATE(cb_target, always, 25, r700SendRenderTargetState);
1340 ALLOC_STATE(blnd, blnd, (6 + (R700_MAX_RENDER_TARGETS * 3)), r700SendCBBlendState);
1341 ALLOC_STATE(blnd_clr, always, 6, r700SendCBBlendColorState);
1342 ALLOC_STATE(sx, always, 9, r700SendSXState);
1343 ALLOC_STATE(vgt, always, 41, r700SendVGTState);
1344 ALLOC_STATE(spi, always, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState);
1345 ALLOC_STATE(vpt, always, 16, r700SendViewportState);
1346 ALLOC_STATE(fs, always, 18, r700SendFSState);
1347 ALLOC_STATE(vs, always, 21, r700SendVSState);
1348 ALLOC_STATE(ps, always, 24, r700SendPSState);
1349 ALLOC_STATE(vs_consts, vs_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendVSConsts);
1350 ALLOC_STATE(ps_consts, ps_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendPSConsts);
1351 ALLOC_STATE(vtx, vtx, (6 + (VERT_ATTRIB_MAX * 18)), r700SendVTXState);
1352 ALLOC_STATE(tx, tx, (R700_TEXTURE_NUMBERUNITS * 20), r700SendTexState);
1353 ALLOC_STATE(tx_smplr, tx, (R700_TEXTURE_NUMBERUNITS * 5), r700SendTexSamplerState);
1354 ALLOC_STATE(tx_brdr_clr, tx, (R700_TEXTURE_NUMBERUNITS * 6), r700SendTexBorderColorState);
1355 r600_init_query_stateobj(&context->radeon, 6 * 2);
1356
1357 context->radeon.hw.is_dirty = GL_TRUE;
1358 context->radeon.hw.all_dirty = GL_TRUE;
1359 }