Merge commit 'origin/mesa_7_7_branch'
[mesa.git] / src / mesa / drivers / dri / r600 / r700_chip.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/imports.h"
29 #include "main/glheader.h"
30 #include "main/simple_list.h"
31
32 #include "r600_context.h"
33 #include "r600_cmdbuf.h"
34
35 #include "r700_state.h"
36 #include "r600_tex.h"
37 #include "r700_oglprog.h"
38 #include "r700_fragprog.h"
39 #include "r700_vertprog.h"
40 #include "r700_ioctl.h"
41
42 #include "radeon_mipmap_tree.h"
43
44 static void r700SendTexState(GLcontext *ctx, struct radeon_state_atom *atom)
45 {
46 context_t *context = R700_CONTEXT(ctx);
47 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
48 struct radeon_bo *bo = NULL;
49 unsigned int i;
50 BATCH_LOCALS(&context->radeon);
51
52 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
53
54 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
55 if (ctx->Texture.Unit[i]._ReallyEnabled) {
56 radeonTexObj *t = r700->textures[i];
57 uint32_t offset;
58 if (t) {
59 if (!t->image_override) {
60 bo = t->mt->bo;
61 offset = get_base_teximage_offset(t);
62 } else {
63 bo = t->bo;
64 offset = 0;
65 }
66 if (bo) {
67
68 r700SyncSurf(context, bo,
69 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
70 0, TC_ACTION_ENA_bit);
71
72 BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
73 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
74 R600_OUT_BATCH(i * 7);
75 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
76 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
77 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE2);
78 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
79 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
80 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
81 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
82 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
83 bo,
84 offset,
85 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
86 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE3,
87 bo,
88 r700->textures[i]->SQ_TEX_RESOURCE3,
89 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
90 END_BATCH();
91 COMMIT_BATCH();
92 }
93 }
94 }
95 }
96 }
97
98 static void r700SendTexSamplerState(GLcontext *ctx, struct radeon_state_atom *atom)
99 {
100 context_t *context = R700_CONTEXT(ctx);
101 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
102 unsigned int i;
103 BATCH_LOCALS(&context->radeon);
104 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
105
106 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
107 if (ctx->Texture.Unit[i]._ReallyEnabled) {
108 radeonTexObj *t = r700->textures[i];
109 if (t) {
110 BEGIN_BATCH_NO_AUTOSTATE(5);
111 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, 3));
112 R600_OUT_BATCH(i * 3);
113 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER0);
114 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER1);
115 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER2);
116 END_BATCH();
117 COMMIT_BATCH();
118 }
119 }
120 }
121 }
122
123 static void r700SendTexBorderColorState(GLcontext *ctx, struct radeon_state_atom *atom)
124 {
125 context_t *context = R700_CONTEXT(ctx);
126 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
127 unsigned int i;
128 BATCH_LOCALS(&context->radeon);
129 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
130
131 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
132 if (ctx->Texture.Unit[i]._ReallyEnabled) {
133 radeonTexObj *t = r700->textures[i];
134 if (t) {
135 BEGIN_BATCH_NO_AUTOSTATE(2 + 4);
136 R600_OUT_BATCH_REGSEQ((TD_PS_SAMPLER0_BORDER_RED + (i * 16)), 4);
137 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_RED);
138 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_GREEN);
139 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_BLUE);
140 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_ALPHA);
141 END_BATCH();
142 COMMIT_BATCH();
143 }
144 }
145 }
146 }
147
148 extern int getTypeSize(GLenum type);
149 static void r700SetupVTXConstants(GLcontext * ctx,
150 void * pAos,
151 StreamDesc * pStreamDesc)
152 {
153 context_t *context = R700_CONTEXT(ctx);
154 struct radeon_aos * paos = (struct radeon_aos *)pAos;
155 unsigned int nVBsize;
156 BATCH_LOCALS(&context->radeon);
157
158 unsigned int uSQ_VTX_CONSTANT_WORD0_0;
159 unsigned int uSQ_VTX_CONSTANT_WORD1_0;
160 unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
161 unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
162 unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;
163
164 if (!paos->bo)
165 return;
166
167 if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
168 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
169 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
170 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS880) ||
171 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
172 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
173 else
174 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);
175
176 if(0 == pStreamDesc->stride)
177 {
178 nVBsize = paos->count * pStreamDesc->size * getTypeSize(pStreamDesc->type);
179 }
180 else
181 {
182 nVBsize = paos->count * pStreamDesc->stride;
183 }
184
185 uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
186 uSQ_VTX_CONSTANT_WORD1_0 = nVBsize - 1;
187
188 SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
189 SETfield(uSQ_VTX_CONSTANT_WORD2_0, pStreamDesc->stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
190 SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
191 SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(pStreamDesc->type, pStreamDesc->size, NULL),
192 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
193 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
194
195 if(GL_TRUE == pStreamDesc->normalize)
196 {
197 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_NORM,
198 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
199 }
200 //else
201 //{
202 // SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_INT,
203 // SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
204 //}
205
206 if(1 == pStreamDesc->_signed)
207 {
208 SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
209 }
210
211 SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
212 SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
213 SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
214
215 BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
216
217 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
218 R600_OUT_BATCH((pStreamDesc->element + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
219 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
220 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
221 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
222 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
223 R600_OUT_BATCH(0);
224 R600_OUT_BATCH(0);
225 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
226 R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
227 paos->bo,
228 uSQ_VTX_CONSTANT_WORD0_0,
229 RADEON_GEM_DOMAIN_GTT, 0, 0);
230 END_BATCH();
231 COMMIT_BATCH();
232
233 }
234
235 static void r700SendVTXState(GLcontext *ctx, struct radeon_state_atom *atom)
236 {
237 context_t *context = R700_CONTEXT(ctx);
238 struct r700_vertex_program *vp = context->selected_vp;
239 unsigned int i, j = 0;
240 BATCH_LOCALS(&context->radeon);
241 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
242
243 if (context->radeon.tcl.aos_count == 0)
244 return;
245
246 BEGIN_BATCH_NO_AUTOSTATE(6);
247 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
248 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
249 R600_OUT_BATCH(0);
250
251 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
252 R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
253 R600_OUT_BATCH(0);
254 END_BATCH();
255 COMMIT_BATCH();
256
257 for(i=0; i<VERT_ATTRIB_MAX; i++) {
258 if(vp->mesa_program->Base.InputsRead & (1 << i))
259 {
260 r700SetupVTXConstants(ctx,
261 (void*)(&context->radeon.tcl.aos[j]),
262 &(context->stream_desc[j]));
263 j++;
264 }
265 }
266 }
267
268 static void r700SetRenderTarget(context_t *context, int id)
269 {
270 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
271
272 struct radeon_renderbuffer *rrb;
273 unsigned int nPitchInPixel;
274
275 rrb = radeon_get_colorbuffer(&context->radeon);
276 if (!rrb || !rrb->bo) {
277 return;
278 }
279
280 R600_STATECHANGE(context, cb_target);
281
282 /* color buffer */
283 r700->render_target[id].CB_COLOR0_BASE.u32All = context->radeon.state.color.draw_offset;
284
285 nPitchInPixel = rrb->pitch/rrb->cpp;
286 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, (nPitchInPixel/8)-1,
287 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
288 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
289 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask);
290 r700->render_target[id].CB_COLOR0_BASE.u32All = 0;
291 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ENDIAN_NONE, ENDIAN_shift, ENDIAN_mask);
292 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_LINEAR_GENERAL,
293 CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
294 if(4 == rrb->cpp)
295 {
296 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_8_8_8_8,
297 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
298 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT, COMP_SWAP_shift, COMP_SWAP_mask);
299 }
300 else
301 {
302 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_5_6_5,
303 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
304 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT_REV,
305 COMP_SWAP_shift, COMP_SWAP_mask);
306 }
307 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
308 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_CLAMP_bit);
309 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, NUMBER_UNORM, NUMBER_TYPE_shift, NUMBER_TYPE_mask);
310
311 r700->render_target[id].enabled = GL_TRUE;
312 }
313
314 static void r700SetDepthTarget(context_t *context)
315 {
316 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
317
318 struct radeon_renderbuffer *rrb;
319 unsigned int nPitchInPixel;
320
321 rrb = radeon_get_depthbuffer(&context->radeon);
322 if (!rrb)
323 return;
324
325 R600_STATECHANGE(context, db_target);
326
327 /* depth buf */
328 r700->DB_DEPTH_SIZE.u32All = 0;
329 r700->DB_DEPTH_BASE.u32All = 0;
330 r700->DB_DEPTH_INFO.u32All = 0;
331 r700->DB_DEPTH_VIEW.u32All = 0;
332
333 nPitchInPixel = rrb->pitch/rrb->cpp;
334
335 SETfield(r700->DB_DEPTH_SIZE.u32All, (nPitchInPixel/8)-1,
336 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
337 SETfield(r700->DB_DEPTH_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
338 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask); /* size in pixel / 64 - 1 */
339
340 if(4 == rrb->cpp)
341 {
342 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_8_24,
343 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
344 }
345 else
346 {
347 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_16,
348 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
349 }
350 SETfield(r700->DB_DEPTH_INFO.u32All, ARRAY_1D_TILED_THIN1,
351 DB_DEPTH_INFO__ARRAY_MODE_shift, DB_DEPTH_INFO__ARRAY_MODE_mask);
352 /* r700->DB_PREFETCH_LIMIT.bits.DEPTH_HEIGHT_TILE_MAX = (context->currentDraw->h >> 3) - 1; */ /* z buffer sie may much bigger than what need, so use actual used h. */
353 }
354
355 static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
356 {
357 context_t *context = R700_CONTEXT(ctx);
358 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
359 struct radeon_renderbuffer *rrb;
360 BATCH_LOCALS(&context->radeon);
361 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
362
363 rrb = radeon_get_depthbuffer(&context->radeon);
364 if (!rrb || !rrb->bo) {
365 return;
366 }
367
368 r700SetDepthTarget(context);
369
370 BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
371 R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
372 R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
373 R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
374 R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
375 R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
376 R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
377 R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
378 rrb->bo,
379 r700->DB_DEPTH_BASE.u32All,
380 0, RADEON_GEM_DOMAIN_VRAM, 0);
381 END_BATCH();
382
383 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
384 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
385 BEGIN_BATCH_NO_AUTOSTATE(2);
386 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
387 R600_OUT_BATCH(1 << 0);
388 END_BATCH();
389 }
390
391 COMMIT_BATCH();
392
393 }
394
395 static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
396 {
397 context_t *context = R700_CONTEXT(ctx);
398 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
399 struct radeon_renderbuffer *rrb;
400 BATCH_LOCALS(&context->radeon);
401 int id = 0;
402 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
403
404 rrb = radeon_get_colorbuffer(&context->radeon);
405 if (!rrb || !rrb->bo) {
406 return;
407 }
408
409 r700SetRenderTarget(context, 0);
410
411 if (id > R700_MAX_RENDER_TARGETS)
412 return;
413
414 if (!r700->render_target[id].enabled)
415 return;
416
417 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
418 R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
419 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
420 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
421 rrb->bo,
422 r700->render_target[id].CB_COLOR0_BASE.u32All,
423 0, RADEON_GEM_DOMAIN_VRAM, 0);
424 END_BATCH();
425
426 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
427 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
428 BEGIN_BATCH_NO_AUTOSTATE(2);
429 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
430 R600_OUT_BATCH((2 << id));
431 END_BATCH();
432 }
433
434 BEGIN_BATCH_NO_AUTOSTATE(18);
435 R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
436 R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
437 R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
438 R600_OUT_BATCH_REGVAL(CB_COLOR0_TILE + (4 * id), r700->render_target[id].CB_COLOR0_TILE.u32All);
439 R600_OUT_BATCH_REGVAL(CB_COLOR0_FRAG + (4 * id), r700->render_target[id].CB_COLOR0_FRAG.u32All);
440 R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
441 END_BATCH();
442
443 COMMIT_BATCH();
444
445 }
446
447 static void r700SendPSState(GLcontext *ctx, struct radeon_state_atom *atom)
448 {
449 context_t *context = R700_CONTEXT(ctx);
450 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
451 struct radeon_bo * pbo;
452 BATCH_LOCALS(&context->radeon);
453 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
454
455 pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));
456
457 if (!pbo)
458 return;
459
460 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
461
462 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
463 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
464 R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
465 R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
466 pbo,
467 r700->ps.SQ_PGM_START_PS.u32All,
468 RADEON_GEM_DOMAIN_GTT, 0, 0);
469 END_BATCH();
470
471 BEGIN_BATCH_NO_AUTOSTATE(9);
472 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
473 R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
474 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
475 END_BATCH();
476
477 BEGIN_BATCH_NO_AUTOSTATE(3);
478 R600_OUT_BATCH_REGVAL(SQ_LOOP_CONST_0, 0x01000FFF);
479 END_BATCH();
480
481 COMMIT_BATCH();
482
483 }
484
485 static void r700SendVSState(GLcontext *ctx, struct radeon_state_atom *atom)
486 {
487 context_t *context = R700_CONTEXT(ctx);
488 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
489 struct radeon_bo * pbo;
490 BATCH_LOCALS(&context->radeon);
491 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
492
493 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
494
495 if (!pbo)
496 return;
497
498 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
499
500 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
501 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_VS, 1);
502 R600_OUT_BATCH(r700->vs.SQ_PGM_START_VS.u32All);
503 R600_OUT_BATCH_RELOC(r700->vs.SQ_PGM_START_VS.u32All,
504 pbo,
505 r700->vs.SQ_PGM_START_VS.u32All,
506 RADEON_GEM_DOMAIN_GTT, 0, 0);
507 END_BATCH();
508
509 BEGIN_BATCH_NO_AUTOSTATE(6);
510 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_VS, r700->vs.SQ_PGM_RESOURCES_VS.u32All);
511 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_VS, r700->vs.SQ_PGM_CF_OFFSET_VS.u32All);
512 END_BATCH();
513
514 BEGIN_BATCH_NO_AUTOSTATE(3);
515 R600_OUT_BATCH_REGVAL((SQ_LOOP_CONST_0 + 32*4), 0x0100000F);
516 //R600_OUT_BATCH_REGVAL((SQ_LOOP_CONST_0 + (SQ_LOOP_CONST_vs<2)), 0x0100000F);
517 END_BATCH();
518
519 COMMIT_BATCH();
520 }
521
522 static void r700SendFSState(GLcontext *ctx, struct radeon_state_atom *atom)
523 {
524 context_t *context = R700_CONTEXT(ctx);
525 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
526 struct radeon_bo * pbo;
527 BATCH_LOCALS(&context->radeon);
528 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
529
530 /* XXX fixme
531 * R6xx chips require a FS be emitted, even if it's not used.
532 * since we aren't using FS yet, just send the VS address to make
533 * the kernel command checker happy
534 */
535 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
536 r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
537 r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
538 r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
539 /* XXX */
540
541 if (!pbo)
542 return;
543
544 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
545
546 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
547 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
548 R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
549 R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
550 pbo,
551 r700->fs.SQ_PGM_START_FS.u32All,
552 RADEON_GEM_DOMAIN_GTT, 0, 0);
553 END_BATCH();
554
555 BEGIN_BATCH_NO_AUTOSTATE(6);
556 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
557 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
558 END_BATCH();
559
560 COMMIT_BATCH();
561
562 }
563
564 static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
565 {
566 context_t *context = R700_CONTEXT(ctx);
567 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
568 BATCH_LOCALS(&context->radeon);
569 int id = 0;
570 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
571
572 if (id > R700_MAX_VIEWPORTS)
573 return;
574
575 if (!r700->viewport[id].enabled)
576 return;
577
578 BEGIN_BATCH_NO_AUTOSTATE(16);
579 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
580 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
581 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
582 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
583 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
584 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
585 R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
586 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
587 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
588 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
589 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
590 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
591 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
592 END_BATCH();
593
594 COMMIT_BATCH();
595
596 }
597
598 static void r700SendSQConfig(GLcontext *ctx, struct radeon_state_atom *atom)
599 {
600 context_t *context = R700_CONTEXT(ctx);
601 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
602 BATCH_LOCALS(&context->radeon);
603 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
604
605 BEGIN_BATCH_NO_AUTOSTATE(34);
606 R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
607 R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
608 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
609 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_2.u32All);
610 R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
611 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
612 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);
613
614 R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
615 R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
616 R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
617 R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
618 R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);
619
620 R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
621 R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
622 R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
623 R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
624 R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
625 R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
626 R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
627 R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
628 R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
629 R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
630 END_BATCH();
631
632 COMMIT_BATCH();
633 }
634
635 static void r700SendUCPState(GLcontext *ctx, struct radeon_state_atom *atom)
636 {
637 context_t *context = R700_CONTEXT(ctx);
638 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
639 BATCH_LOCALS(&context->radeon);
640 int i;
641 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
642
643 for (i = 0; i < R700_MAX_UCP; i++) {
644 if (r700->ucp[i].enabled) {
645 BEGIN_BATCH_NO_AUTOSTATE(6);
646 R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
647 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
648 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Y.u32All);
649 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Z.u32All);
650 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
651 END_BATCH();
652 COMMIT_BATCH();
653 }
654 }
655 }
656
657 static void r700SendSPIState(GLcontext *ctx, struct radeon_state_atom *atom)
658 {
659 context_t *context = R700_CONTEXT(ctx);
660 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
661 BATCH_LOCALS(&context->radeon);
662 unsigned int ui;
663 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
664
665 BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);
666
667 R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
668 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
669 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
670 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
671 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
672 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
673 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
674 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
675 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
676 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
677 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
678 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
679 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
680 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
681 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
682 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
683 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
684 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
685 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
686 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
687 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
688 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
689 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
690 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
691 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
692 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
693 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
694 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
695 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
696 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
697 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
698 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
699 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);
700
701 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
702 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
703 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
704 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
705 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
706 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
707 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
708 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
709 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
710 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
711 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);
712
713 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
714 R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
715 R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
716 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
717 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
718 R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
719 R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
720 R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
721 R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
722 R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);
723
724 R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
725 for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
726 R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
727
728 END_BATCH();
729 COMMIT_BATCH();
730 }
731
732 static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom)
733 {
734 context_t *context = R700_CONTEXT(ctx);
735 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
736 BATCH_LOCALS(&context->radeon);
737 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
738
739 BEGIN_BATCH_NO_AUTOSTATE(41);
740
741 R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
742 R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
743 R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
744 R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
745 R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);
746
747 R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
748 R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
749 R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
750 R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
751 R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
752 R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
753 R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
754 R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
755 R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
756 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
757 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
758 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
759 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
760 R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);
761
762 R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
763 R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
764 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
765 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);
766
767 R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
768 R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
769 R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
770 R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);
771
772 R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);
773
774 END_BATCH();
775 COMMIT_BATCH();
776 }
777
778 static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom)
779 {
780 context_t *context = R700_CONTEXT(ctx);
781 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
782 BATCH_LOCALS(&context->radeon);
783 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
784
785 BEGIN_BATCH_NO_AUTOSTATE(9);
786 R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
787 R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
788 R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
789 END_BATCH();
790 COMMIT_BATCH();
791 }
792
793 static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
794 {
795 context_t *context = R700_CONTEXT(ctx);
796 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
797 BATCH_LOCALS(&context->radeon);
798 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
799
800 BEGIN_BATCH_NO_AUTOSTATE(17);
801
802 R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
803 R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
804 R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);
805
806 R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
807 R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);
808
809 R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
810 R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
811 R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);
812
813 R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);
814
815 END_BATCH();
816 COMMIT_BATCH();
817 }
818
819 static void r700SendStencilState(GLcontext *ctx, struct radeon_state_atom *atom)
820 {
821 context_t *context = R700_CONTEXT(ctx);
822 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
823 BATCH_LOCALS(&context->radeon);
824
825 BEGIN_BATCH_NO_AUTOSTATE(4);
826 R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
827 R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
828 R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
829 END_BATCH();
830 COMMIT_BATCH();
831 }
832
833 static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
834 {
835 context_t *context = R700_CONTEXT(ctx);
836 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
837 BATCH_LOCALS(&context->radeon);
838 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
839
840 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
841 BEGIN_BATCH_NO_AUTOSTATE(11);
842 R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
843 R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
844 R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
845 R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
846 R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
847 R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
848 R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
849 R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
850 R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
851 END_BATCH();
852 }
853
854 BEGIN_BATCH_NO_AUTOSTATE(7);
855 R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
856 R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
857 R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
858 R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
859 END_BATCH();
860 COMMIT_BATCH();
861 }
862
863 static void r700SendCBCLRCMPState(GLcontext *ctx, struct radeon_state_atom *atom)
864 {
865 context_t *context = R700_CONTEXT(ctx);
866 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
867 BATCH_LOCALS(&context->radeon);
868
869 BEGIN_BATCH_NO_AUTOSTATE(6);
870 R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
871 R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
872 R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
873 R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
874 R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
875 END_BATCH();
876 COMMIT_BATCH();
877 }
878
879 static void r700SendCBBlendState(GLcontext *ctx, struct radeon_state_atom *atom)
880 {
881 context_t *context = R700_CONTEXT(ctx);
882 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
883 BATCH_LOCALS(&context->radeon);
884 unsigned int ui;
885 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
886
887 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
888 BEGIN_BATCH_NO_AUTOSTATE(3);
889 R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
890 END_BATCH();
891 }
892
893 BEGIN_BATCH_NO_AUTOSTATE(3);
894 R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
895 END_BATCH();
896
897 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
898 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
899 if (r700->render_target[ui].enabled) {
900 BEGIN_BATCH_NO_AUTOSTATE(3);
901 R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
902 r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
903 END_BATCH();
904 }
905 }
906 }
907
908 COMMIT_BATCH();
909 }
910
911 static void r700SendCBBlendColorState(GLcontext *ctx, struct radeon_state_atom *atom)
912 {
913 context_t *context = R700_CONTEXT(ctx);
914 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
915 BATCH_LOCALS(&context->radeon);
916 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
917
918 BEGIN_BATCH_NO_AUTOSTATE(6);
919 R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
920 R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
921 R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
922 R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
923 R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
924 END_BATCH();
925 COMMIT_BATCH();
926 }
927
928 static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom)
929 {
930 context_t *context = R700_CONTEXT(ctx);
931 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
932 BATCH_LOCALS(&context->radeon);
933
934 BEGIN_BATCH_NO_AUTOSTATE(9);
935 R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
936 R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
937 R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
938 R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
939 R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
940 R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
941 END_BATCH();
942 COMMIT_BATCH();
943
944 }
945
946 static void r700SendPolyState(GLcontext *ctx, struct radeon_state_atom *atom)
947 {
948 context_t *context = R700_CONTEXT(ctx);
949 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
950 BATCH_LOCALS(&context->radeon);
951
952 BEGIN_BATCH_NO_AUTOSTATE(10);
953 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
954 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
955 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
956 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
957 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
958 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
959 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
960 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
961 END_BATCH();
962 COMMIT_BATCH();
963
964 }
965
966 static void r700SendCLState(GLcontext *ctx, struct radeon_state_atom *atom)
967 {
968 context_t *context = R700_CONTEXT(ctx);
969 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
970 BATCH_LOCALS(&context->radeon);
971 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
972
973 BEGIN_BATCH_NO_AUTOSTATE(12);
974 R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All);
975 R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All);
976 R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All);
977 R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All);
978 END_BATCH();
979 COMMIT_BATCH();
980 }
981
982 static void r700SendGBState(GLcontext *ctx, struct radeon_state_atom *atom)
983 {
984 context_t *context = R700_CONTEXT(ctx);
985 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
986 BATCH_LOCALS(&context->radeon);
987
988 BEGIN_BATCH_NO_AUTOSTATE(6);
989 R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
990 R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
991 R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
992 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
993 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
994 END_BATCH();
995 COMMIT_BATCH();
996 }
997
998 static void r700SendScissorState(GLcontext *ctx, struct radeon_state_atom *atom)
999 {
1000 context_t *context = R700_CONTEXT(ctx);
1001 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1002 BATCH_LOCALS(&context->radeon);
1003 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1004
1005 BEGIN_BATCH_NO_AUTOSTATE(22);
1006 R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
1007 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
1008 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);
1009
1010 R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12);
1011 R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
1012 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
1013 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
1014 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
1015 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
1016 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
1017 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
1018 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
1019 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
1020 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
1021 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
1022 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);
1023
1024 R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
1025 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
1026 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
1027 END_BATCH();
1028 COMMIT_BATCH();
1029 }
1030
1031 static void r700SendSCState(GLcontext *ctx, struct radeon_state_atom *atom)
1032 {
1033 context_t *context = R700_CONTEXT(ctx);
1034 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1035 BATCH_LOCALS(&context->radeon);
1036 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1037
1038 BEGIN_BATCH_NO_AUTOSTATE(15);
1039 R600_OUT_BATCH_REGVAL(R7xx_PA_SC_EDGERULE, r700->PA_SC_EDGERULE.u32All);
1040 R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All);
1041 R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All);
1042 R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All);
1043 R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All);
1044 END_BATCH();
1045 COMMIT_BATCH();
1046 }
1047
1048 static void r700SendAAState(GLcontext *ctx, struct radeon_state_atom *atom)
1049 {
1050 context_t *context = R700_CONTEXT(ctx);
1051 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1052 BATCH_LOCALS(&context->radeon);
1053
1054 BEGIN_BATCH_NO_AUTOSTATE(12);
1055 R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
1056 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
1057 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
1058 R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
1059 END_BATCH();
1060 COMMIT_BATCH();
1061 }
1062
1063 static void r700SendPSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1064 {
1065 context_t *context = R700_CONTEXT(ctx);
1066 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1067 int i;
1068 BATCH_LOCALS(&context->radeon);
1069
1070 if (r700->ps.num_consts == 0)
1071 return;
1072
1073 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->ps.num_consts * 4));
1074 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->ps.num_consts * 4)));
1075 /* assembler map const from very beginning. */
1076 R600_OUT_BATCH(SQ_ALU_CONSTANT_PS_OFFSET * 4);
1077 for (i = 0; i < r700->ps.num_consts; i++) {
1078 R600_OUT_BATCH(r700->ps.consts[i][0].u32All);
1079 R600_OUT_BATCH(r700->ps.consts[i][1].u32All);
1080 R600_OUT_BATCH(r700->ps.consts[i][2].u32All);
1081 R600_OUT_BATCH(r700->ps.consts[i][3].u32All);
1082 }
1083 END_BATCH();
1084 COMMIT_BATCH();
1085 }
1086
1087 static void r700SendVSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1088 {
1089 context_t *context = R700_CONTEXT(ctx);
1090 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1091 int i;
1092 BATCH_LOCALS(&context->radeon);
1093 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1094
1095 if (r700->vs.num_consts == 0)
1096 return;
1097
1098 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->vs.num_consts * 4));
1099 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->vs.num_consts * 4)));
1100 /* assembler map const from very beginning. */
1101 R600_OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
1102 for (i = 0; i < r700->vs.num_consts; i++) {
1103 R600_OUT_BATCH(r700->vs.consts[i][0].u32All);
1104 R600_OUT_BATCH(r700->vs.consts[i][1].u32All);
1105 R600_OUT_BATCH(r700->vs.consts[i][2].u32All);
1106 R600_OUT_BATCH(r700->vs.consts[i][3].u32All);
1107 }
1108 END_BATCH();
1109 COMMIT_BATCH();
1110 }
1111
1112 static void r700SendQueryBegin(GLcontext *ctx, struct radeon_state_atom *atom)
1113 {
1114 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1115 struct radeon_query_object *query = radeon->query.current;
1116 BATCH_LOCALS(radeon);
1117 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1118
1119 /* clear the buffer */
1120 radeon_bo_map(query->bo, GL_FALSE);
1121 memset(query->bo->ptr, 0, 4 * 2 * sizeof(uint64_t)); /* 4 DBs, 2 qwords each */
1122 radeon_bo_unmap(query->bo);
1123
1124 radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
1125 query->bo,
1126 0, RADEON_GEM_DOMAIN_GTT);
1127
1128 BEGIN_BATCH_NO_AUTOSTATE(4 + 2);
1129 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 2));
1130 R600_OUT_BATCH(ZPASS_DONE);
1131 R600_OUT_BATCH(query->curr_offset); /* hw writes qwords */
1132 R600_OUT_BATCH(0x00000000);
1133 R600_OUT_BATCH_RELOC(VGT_EVENT_INITIATOR, query->bo, 0, 0, RADEON_GEM_DOMAIN_GTT, 0);
1134 END_BATCH();
1135 query->emitted_begin = GL_TRUE;
1136 }
1137
1138 static int check_always(GLcontext *ctx, struct radeon_state_atom *atom)
1139 {
1140 return atom->cmd_size;
1141 }
1142
1143 static int check_cb(GLcontext *ctx, struct radeon_state_atom *atom)
1144 {
1145 context_t *context = R700_CONTEXT(ctx);
1146 int count = 7;
1147
1148 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1149 count += 11;
1150 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1151
1152 return count;
1153 }
1154
1155 static int check_blnd(GLcontext *ctx, struct radeon_state_atom *atom)
1156 {
1157 context_t *context = R700_CONTEXT(ctx);
1158 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1159 unsigned int ui;
1160 int count = 3;
1161
1162 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1163 count += 3;
1164
1165 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
1166 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
1167 if (r700->render_target[ui].enabled)
1168 count += 3;
1169 }
1170 }
1171 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1172
1173 return count;
1174 }
1175
1176 static int check_ucp(GLcontext *ctx, struct radeon_state_atom *atom)
1177 {
1178 context_t *context = R700_CONTEXT(ctx);
1179 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1180 int i;
1181 int count = 0;
1182
1183 for (i = 0; i < R700_MAX_UCP; i++) {
1184 if (r700->ucp[i].enabled)
1185 count += 6;
1186 }
1187 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1188 return count;
1189 }
1190
1191 static int check_vtx(GLcontext *ctx, struct radeon_state_atom *atom)
1192 {
1193 context_t *context = R700_CONTEXT(ctx);
1194 int count = context->radeon.tcl.aos_count * 18;
1195
1196 if (count)
1197 count += 6;
1198
1199 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1200 return count;
1201 }
1202
1203 static int check_tx(GLcontext *ctx, struct radeon_state_atom *atom)
1204 {
1205 context_t *context = R700_CONTEXT(ctx);
1206 unsigned int i, count = 0;
1207 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1208
1209 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
1210 if (ctx->Texture.Unit[i]._ReallyEnabled) {
1211 radeonTexObj *t = r700->textures[i];
1212 if (t)
1213 count++;
1214 }
1215 }
1216 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1217 return count * 31;
1218 }
1219
1220 static int check_ps_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1221 {
1222 context_t *context = R700_CONTEXT(ctx);
1223 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1224 int count = r700->ps.num_consts * 4;
1225
1226 if (count)
1227 count += 2;
1228 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1229
1230 return count;
1231 }
1232
1233 static int check_vs_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1234 {
1235 context_t *context = R700_CONTEXT(ctx);
1236 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1237 int count = r700->vs.num_consts * 4;
1238
1239 if (count)
1240 count += 2;
1241 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1242
1243 return count;
1244 }
1245
1246 static int check_queryobj(GLcontext *ctx, struct radeon_state_atom *atom)
1247 {
1248 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1249 struct radeon_query_object *query = radeon->query.current;
1250 int count;
1251
1252 if (!query || query->emitted_begin)
1253 count = 0;
1254 else
1255 count = atom->cmd_size;
1256 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1257 return count;
1258 }
1259
1260 #define ALLOC_STATE( ATOM, CHK, SZ, EMIT ) \
1261 do { \
1262 context->atoms.ATOM.cmd_size = (SZ); \
1263 context->atoms.ATOM.cmd = NULL; \
1264 context->atoms.ATOM.name = #ATOM; \
1265 context->atoms.ATOM.idx = 0; \
1266 context->atoms.ATOM.check = check_##CHK; \
1267 context->atoms.ATOM.dirty = GL_FALSE; \
1268 context->atoms.ATOM.emit = (EMIT); \
1269 context->radeon.hw.max_state_size += (SZ); \
1270 insert_at_tail(&context->radeon.hw.atomlist, &context->atoms.ATOM); \
1271 } while (0)
1272
1273 static void r600_init_query_stateobj(radeonContextPtr radeon, int SZ)
1274 {
1275 radeon->query.queryobj.cmd_size = (SZ);
1276 radeon->query.queryobj.cmd = NULL;
1277 radeon->query.queryobj.name = "queryobj";
1278 radeon->query.queryobj.idx = 0;
1279 radeon->query.queryobj.check = check_queryobj;
1280 radeon->query.queryobj.dirty = GL_FALSE;
1281 radeon->query.queryobj.emit = r700SendQueryBegin;
1282 radeon->hw.max_state_size += (SZ);
1283 insert_at_tail(&radeon->hw.atomlist, &radeon->query.queryobj);
1284 }
1285
1286 void r600InitAtoms(context_t *context)
1287 {
1288 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %p\n", __func__, context);
1289 context->radeon.hw.max_state_size = 10 + 5 + 14; /* start 3d, idle, cb/db flush */
1290
1291 /* Setup the atom linked list */
1292 make_empty_list(&context->radeon.hw.atomlist);
1293 context->radeon.hw.atomlist.name = "atom-list";
1294
1295 ALLOC_STATE(sq, always, 34, r700SendSQConfig);
1296 ALLOC_STATE(db, always, 17, r700SendDBState);
1297 ALLOC_STATE(stencil, always, 4, r700SendStencilState);
1298 ALLOC_STATE(db_target, always, 12, r700SendDepthTargetState);
1299 ALLOC_STATE(sc, always, 15, r700SendSCState);
1300 ALLOC_STATE(scissor, always, 22, r700SendScissorState);
1301 ALLOC_STATE(aa, always, 12, r700SendAAState);
1302 ALLOC_STATE(cl, always, 12, r700SendCLState);
1303 ALLOC_STATE(gb, always, 6, r700SendGBState);
1304 ALLOC_STATE(ucp, ucp, (R700_MAX_UCP * 6), r700SendUCPState);
1305 ALLOC_STATE(su, always, 9, r700SendSUState);
1306 ALLOC_STATE(poly, always, 10, r700SendPolyState);
1307 ALLOC_STATE(cb, cb, 18, r700SendCBState);
1308 ALLOC_STATE(clrcmp, always, 6, r700SendCBCLRCMPState);
1309 ALLOC_STATE(blnd, blnd, (6 + (R700_MAX_RENDER_TARGETS * 3)), r700SendCBBlendState);
1310 ALLOC_STATE(blnd_clr, always, 6, r700SendCBBlendColorState);
1311 ALLOC_STATE(cb_target, always, 25, r700SendRenderTargetState);
1312 ALLOC_STATE(sx, always, 9, r700SendSXState);
1313 ALLOC_STATE(vgt, always, 41, r700SendVGTState);
1314 ALLOC_STATE(spi, always, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState);
1315 ALLOC_STATE(vpt, always, 16, r700SendViewportState);
1316 ALLOC_STATE(fs, always, 18, r700SendFSState);
1317 ALLOC_STATE(vs, always, 21, r700SendVSState);
1318 ALLOC_STATE(ps, always, 24, r700SendPSState);
1319 ALLOC_STATE(vs_consts, vs_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendVSConsts);
1320 ALLOC_STATE(ps_consts, ps_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendPSConsts);
1321 ALLOC_STATE(vtx, vtx, (6 + (VERT_ATTRIB_MAX * 18)), r700SendVTXState);
1322 ALLOC_STATE(tx, tx, (R700_TEXTURE_NUMBERUNITS * 20), r700SendTexState);
1323 ALLOC_STATE(tx_smplr, tx, (R700_TEXTURE_NUMBERUNITS * 5), r700SendTexSamplerState);
1324 ALLOC_STATE(tx_brdr_clr, tx, (R700_TEXTURE_NUMBERUNITS * 6), r700SendTexBorderColorState);
1325 r600_init_query_stateobj(&context->radeon, 6 * 2);
1326
1327 context->radeon.hw.is_dirty = GL_TRUE;
1328 context->radeon.hw.all_dirty = GL_TRUE;
1329 }