Merge branch 'gallium-nopointsizeminmax'
[mesa.git] / src / mesa / drivers / dri / r600 / r700_chip.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/imports.h"
29 #include "main/glheader.h"
30 #include "main/simple_list.h"
31
32 #include "r600_context.h"
33 #include "r600_cmdbuf.h"
34
35 #include "r600_tex.h"
36 #include "r700_oglprog.h"
37 #include "r700_fragprog.h"
38 #include "r700_vertprog.h"
39
40 #include "radeon_mipmap_tree.h"
41
42 static void r700SendTexState(GLcontext *ctx, struct radeon_state_atom *atom)
43 {
44 context_t *context = R700_CONTEXT(ctx);
45 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
46
47 struct r700_vertex_program *vp = context->selected_vp;
48
49 struct radeon_bo *bo = NULL;
50 unsigned int i;
51 BATCH_LOCALS(&context->radeon);
52
53 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
54
55 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
56 if (ctx->Texture.Unit[i]._ReallyEnabled) {
57 radeonTexObj *t = r700->textures[i];
58 if (t) {
59 if (!t->image_override) {
60 bo = t->mt->bo;
61 } else {
62 bo = t->bo;
63 }
64 if (bo) {
65
66 r700SyncSurf(context, bo,
67 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
68 0, TC_ACTION_ENA_bit);
69
70 BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
71 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
72
73 if( (1<<i) & vp->r700AsmCode.unVetTexBits )
74 { /* vs texture */
75 R600_OUT_BATCH((i + VERT_ATTRIB_MAX + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
76 }
77 else
78 {
79 R600_OUT_BATCH(i * 7);
80 }
81
82 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
83 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
84 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE2);
85 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
86 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
87 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
88 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
89 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
90 bo,
91 r700->textures[i]->SQ_TEX_RESOURCE2,
92 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
93 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE3,
94 bo,
95 r700->textures[i]->SQ_TEX_RESOURCE3,
96 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
97 END_BATCH();
98 COMMIT_BATCH();
99 }
100 }
101 }
102 }
103 }
104
105 #define SAMPLER_STRIDE 3
106
107 static void r700SendTexSamplerState(GLcontext *ctx, struct radeon_state_atom *atom)
108 {
109 context_t *context = R700_CONTEXT(ctx);
110 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
111 unsigned int i;
112
113 struct r700_vertex_program *vp = context->selected_vp;
114
115 BATCH_LOCALS(&context->radeon);
116 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
117
118 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
119 if (ctx->Texture.Unit[i]._ReallyEnabled) {
120 radeonTexObj *t = r700->textures[i];
121 if (t) {
122 BEGIN_BATCH_NO_AUTOSTATE(5);
123 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, 3));
124
125 if( (1<<i) & vp->r700AsmCode.unVetTexBits )
126 { /* vs texture */
127 R600_OUT_BATCH((i+SQ_TEX_SAMPLER_VS_OFFSET) * SAMPLER_STRIDE); //work 1
128 }
129 else
130 {
131 R600_OUT_BATCH(i * 3);
132 }
133
134 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER0);
135 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER1);
136 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER2);
137 END_BATCH();
138 COMMIT_BATCH();
139 }
140 }
141 }
142 }
143
144 static void r700SendTexBorderColorState(GLcontext *ctx, struct radeon_state_atom *atom)
145 {
146 context_t *context = R700_CONTEXT(ctx);
147 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
148 unsigned int i;
149 BATCH_LOCALS(&context->radeon);
150 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
151
152 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
153 if (ctx->Texture.Unit[i]._ReallyEnabled) {
154 radeonTexObj *t = r700->textures[i];
155 if (t) {
156 BEGIN_BATCH_NO_AUTOSTATE(2 + 4);
157 R600_OUT_BATCH_REGSEQ((TD_PS_SAMPLER0_BORDER_RED + (i * 16)), 4);
158 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_RED);
159 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_GREEN);
160 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_BLUE);
161 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_ALPHA);
162 END_BATCH();
163 COMMIT_BATCH();
164 }
165 }
166 }
167 }
168
169 extern int getTypeSize(GLenum type);
170 static void r700SetupVTXConstants(GLcontext * ctx,
171 void * pAos,
172 StreamDesc * pStreamDesc)
173 {
174 context_t *context = R700_CONTEXT(ctx);
175 struct radeon_aos * paos = (struct radeon_aos *)pAos;
176 unsigned int nVBsize;
177 BATCH_LOCALS(&context->radeon);
178
179 unsigned int uSQ_VTX_CONSTANT_WORD0_0;
180 unsigned int uSQ_VTX_CONSTANT_WORD1_0;
181 unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
182 unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
183 unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;
184
185 if (!paos->bo)
186 return;
187
188 if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
189 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
190 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
191 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS880) ||
192 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
193 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
194 else
195 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);
196
197 if(0 == pStreamDesc->stride)
198 {
199 nVBsize = paos->count * pStreamDesc->size * getTypeSize(pStreamDesc->type);
200 }
201 else
202 {
203 nVBsize = paos->count * pStreamDesc->stride;
204 }
205
206 uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
207 uSQ_VTX_CONSTANT_WORD1_0 = nVBsize - 1;
208
209 SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
210 SETfield(uSQ_VTX_CONSTANT_WORD2_0, pStreamDesc->stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
211 SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
212 SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(pStreamDesc->type, pStreamDesc->size, NULL),
213 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
214 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
215
216 if(GL_TRUE == pStreamDesc->normalize)
217 {
218 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_NORM,
219 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
220 }
221 //else
222 //{
223 // SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_INT,
224 // SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
225 //}
226
227 if(1 == pStreamDesc->_signed)
228 {
229 SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
230 }
231
232 SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
233 SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
234 SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
235
236 BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
237
238 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
239 R600_OUT_BATCH((pStreamDesc->element + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
240 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
241 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
242 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
243 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
244 R600_OUT_BATCH(0);
245 R600_OUT_BATCH(0);
246 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
247 R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
248 paos->bo,
249 uSQ_VTX_CONSTANT_WORD0_0,
250 RADEON_GEM_DOMAIN_GTT, 0, 0);
251 END_BATCH();
252 COMMIT_BATCH();
253
254 }
255
256 static void r700SendVTXState(GLcontext *ctx, struct radeon_state_atom *atom)
257 {
258 context_t *context = R700_CONTEXT(ctx);
259 struct r700_vertex_program *vp = context->selected_vp;
260 unsigned int i, j = 0;
261 BATCH_LOCALS(&context->radeon);
262 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
263
264 if (context->radeon.tcl.aos_count == 0)
265 return;
266
267 BEGIN_BATCH_NO_AUTOSTATE(6);
268 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
269 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
270 R600_OUT_BATCH(0);
271
272 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
273 R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
274 R600_OUT_BATCH(0);
275 END_BATCH();
276 COMMIT_BATCH();
277
278 for(i=0; i<VERT_ATTRIB_MAX; i++) {
279 if(vp->mesa_program->Base.InputsRead & (1 << i))
280 {
281 r700SetupVTXConstants(ctx,
282 (void*)(&context->radeon.tcl.aos[j]),
283 &(context->stream_desc[j]));
284 j++;
285 }
286 }
287 }
288
289 static void r700SetRenderTarget(context_t *context, int id)
290 {
291 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
292
293 struct radeon_renderbuffer *rrb;
294 unsigned int nPitchInPixel;
295
296 rrb = radeon_get_colorbuffer(&context->radeon);
297 if (!rrb || !rrb->bo) {
298 return;
299 }
300
301 R600_STATECHANGE(context, cb_target);
302
303 /* color buffer */
304 r700->render_target[id].CB_COLOR0_BASE.u32All = context->radeon.state.color.draw_offset / 256;
305
306 nPitchInPixel = rrb->pitch/rrb->cpp;
307 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, (nPitchInPixel/8)-1,
308 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
309 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
310 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask);
311 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ENDIAN_NONE, ENDIAN_shift, ENDIAN_mask);
312 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_LINEAR_GENERAL,
313 CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
314 if(4 == rrb->cpp)
315 {
316 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_8_8_8_8,
317 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
318 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT, COMP_SWAP_shift, COMP_SWAP_mask);
319 }
320 else
321 {
322 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_5_6_5,
323 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
324 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT_REV,
325 COMP_SWAP_shift, COMP_SWAP_mask);
326 }
327 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
328 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_CLAMP_bit);
329 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, NUMBER_UNORM, NUMBER_TYPE_shift, NUMBER_TYPE_mask);
330
331 r700->render_target[id].enabled = GL_TRUE;
332 }
333
334 static void r700SetDepthTarget(context_t *context)
335 {
336 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
337
338 struct radeon_renderbuffer *rrb;
339 unsigned int nPitchInPixel;
340
341 rrb = radeon_get_depthbuffer(&context->radeon);
342 if (!rrb)
343 return;
344
345 R600_STATECHANGE(context, db_target);
346
347 /* depth buf */
348 r700->DB_DEPTH_SIZE.u32All = 0;
349 r700->DB_DEPTH_BASE.u32All = 0;
350 r700->DB_DEPTH_INFO.u32All = 0;
351 r700->DB_DEPTH_VIEW.u32All = 0;
352
353 nPitchInPixel = rrb->pitch/rrb->cpp;
354
355 SETfield(r700->DB_DEPTH_SIZE.u32All, (nPitchInPixel/8)-1,
356 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
357 SETfield(r700->DB_DEPTH_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
358 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask); /* size in pixel / 64 - 1 */
359
360 if(4 == rrb->cpp)
361 {
362 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_8_24,
363 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
364 }
365 else
366 {
367 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_16,
368 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
369 }
370 SETfield(r700->DB_DEPTH_INFO.u32All, ARRAY_1D_TILED_THIN1,
371 DB_DEPTH_INFO__ARRAY_MODE_shift, DB_DEPTH_INFO__ARRAY_MODE_mask);
372 /* r700->DB_PREFETCH_LIMIT.bits.DEPTH_HEIGHT_TILE_MAX = (context->currentDraw->h >> 3) - 1; */ /* z buffer sie may much bigger than what need, so use actual used h. */
373 }
374
375 static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
376 {
377 context_t *context = R700_CONTEXT(ctx);
378 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
379 struct radeon_renderbuffer *rrb;
380 BATCH_LOCALS(&context->radeon);
381 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
382
383 rrb = radeon_get_depthbuffer(&context->radeon);
384 if (!rrb || !rrb->bo) {
385 return;
386 }
387
388 r700SetDepthTarget(context);
389
390 BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
391 R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
392 R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
393 R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
394 R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
395 R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
396 R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
397 R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
398 rrb->bo,
399 r700->DB_DEPTH_BASE.u32All,
400 0, RADEON_GEM_DOMAIN_VRAM, 0);
401 END_BATCH();
402
403 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
404 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
405 BEGIN_BATCH_NO_AUTOSTATE(2);
406 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
407 R600_OUT_BATCH(1 << 0);
408 END_BATCH();
409 }
410
411 COMMIT_BATCH();
412
413 }
414
415 static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
416 {
417 context_t *context = R700_CONTEXT(ctx);
418 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
419 struct radeon_renderbuffer *rrb;
420 BATCH_LOCALS(&context->radeon);
421 int id = 0;
422 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
423
424 rrb = radeon_get_colorbuffer(&context->radeon);
425 if (!rrb || !rrb->bo) {
426 return;
427 }
428
429 r700SetRenderTarget(context, 0);
430
431 if (id > R700_MAX_RENDER_TARGETS)
432 return;
433
434 if (!r700->render_target[id].enabled)
435 return;
436
437 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
438 R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
439 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
440 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
441 rrb->bo,
442 r700->render_target[id].CB_COLOR0_BASE.u32All,
443 0, RADEON_GEM_DOMAIN_VRAM, 0);
444 END_BATCH();
445
446 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
447 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
448 BEGIN_BATCH_NO_AUTOSTATE(2);
449 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
450 R600_OUT_BATCH((2 << id));
451 END_BATCH();
452 }
453 /* Set CMASK & TILE buffer to the offset of color buffer as
454 * we don't use those this shouldn't cause any issue and we
455 * then have a valid cmd stream
456 */
457 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
458 R600_OUT_BATCH_REGSEQ(CB_COLOR0_TILE + (4 * id), 1);
459 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_TILE.u32All);
460 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
461 rrb->bo,
462 r700->render_target[id].CB_COLOR0_BASE.u32All,
463 0, RADEON_GEM_DOMAIN_VRAM, 0);
464 END_BATCH();
465 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
466 R600_OUT_BATCH_REGSEQ(CB_COLOR0_FRAG + (4 * id), 1);
467 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_FRAG.u32All);
468 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
469 rrb->bo,
470 r700->render_target[id].CB_COLOR0_BASE.u32All,
471 0, RADEON_GEM_DOMAIN_VRAM, 0);
472 END_BATCH();
473
474 BEGIN_BATCH_NO_AUTOSTATE(12);
475 R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
476 R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
477 R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
478 R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
479 END_BATCH();
480
481 COMMIT_BATCH();
482
483 }
484
485 static void r700SendPSState(GLcontext *ctx, struct radeon_state_atom *atom)
486 {
487 context_t *context = R700_CONTEXT(ctx);
488 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
489 struct radeon_bo * pbo;
490 BATCH_LOCALS(&context->radeon);
491 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
492
493 pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));
494
495 if (!pbo)
496 return;
497
498 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
499
500 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
501 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
502 R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
503 R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
504 pbo,
505 r700->ps.SQ_PGM_START_PS.u32All,
506 RADEON_GEM_DOMAIN_GTT, 0, 0);
507 END_BATCH();
508
509 BEGIN_BATCH_NO_AUTOSTATE(9);
510 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
511 R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
512 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
513 END_BATCH();
514
515 BEGIN_BATCH_NO_AUTOSTATE(3);
516 R600_OUT_BATCH_REGVAL(SQ_LOOP_CONST_0, 0x01000FFF);
517 END_BATCH();
518
519 COMMIT_BATCH();
520
521 }
522
523 static void r700SendVSState(GLcontext *ctx, struct radeon_state_atom *atom)
524 {
525 context_t *context = R700_CONTEXT(ctx);
526 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
527 struct radeon_bo * pbo;
528 BATCH_LOCALS(&context->radeon);
529 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
530
531 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
532
533 if (!pbo)
534 return;
535
536 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
537
538 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
539 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_VS, 1);
540 R600_OUT_BATCH(r700->vs.SQ_PGM_START_VS.u32All);
541 R600_OUT_BATCH_RELOC(r700->vs.SQ_PGM_START_VS.u32All,
542 pbo,
543 r700->vs.SQ_PGM_START_VS.u32All,
544 RADEON_GEM_DOMAIN_GTT, 0, 0);
545 END_BATCH();
546
547 BEGIN_BATCH_NO_AUTOSTATE(6);
548 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_VS, r700->vs.SQ_PGM_RESOURCES_VS.u32All);
549 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_VS, r700->vs.SQ_PGM_CF_OFFSET_VS.u32All);
550 END_BATCH();
551
552 BEGIN_BATCH_NO_AUTOSTATE(3);
553 R600_OUT_BATCH_REGVAL((SQ_LOOP_CONST_0 + 32*4), 0x0100000F);
554 //R600_OUT_BATCH_REGVAL((SQ_LOOP_CONST_0 + (SQ_LOOP_CONST_vs<2)), 0x0100000F);
555 END_BATCH();
556
557 COMMIT_BATCH();
558 }
559
560 static void r700SendFSState(GLcontext *ctx, struct radeon_state_atom *atom)
561 {
562 context_t *context = R700_CONTEXT(ctx);
563 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
564 struct radeon_bo * pbo;
565 BATCH_LOCALS(&context->radeon);
566 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
567
568 /* XXX fixme
569 * R6xx chips require a FS be emitted, even if it's not used.
570 * since we aren't using FS yet, just send the VS address to make
571 * the kernel command checker happy
572 */
573 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
574 r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
575 r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
576 r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
577 /* XXX */
578
579 if (!pbo)
580 return;
581
582 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
583
584 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
585 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
586 R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
587 R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
588 pbo,
589 r700->fs.SQ_PGM_START_FS.u32All,
590 RADEON_GEM_DOMAIN_GTT, 0, 0);
591 END_BATCH();
592
593 BEGIN_BATCH_NO_AUTOSTATE(6);
594 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
595 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
596 END_BATCH();
597
598 COMMIT_BATCH();
599
600 }
601
602 static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
603 {
604 context_t *context = R700_CONTEXT(ctx);
605 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
606 BATCH_LOCALS(&context->radeon);
607 int id = 0;
608 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
609
610 if (id > R700_MAX_VIEWPORTS)
611 return;
612
613 if (!r700->viewport[id].enabled)
614 return;
615
616 BEGIN_BATCH_NO_AUTOSTATE(16);
617 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
618 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
619 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
620 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
621 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
622 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
623 R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
624 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
625 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
626 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
627 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
628 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
629 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
630 END_BATCH();
631
632 COMMIT_BATCH();
633
634 }
635
636 static void r700SendSQConfig(GLcontext *ctx, struct radeon_state_atom *atom)
637 {
638 context_t *context = R700_CONTEXT(ctx);
639 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
640 BATCH_LOCALS(&context->radeon);
641 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
642
643 BEGIN_BATCH_NO_AUTOSTATE(34);
644 R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
645 R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
646 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
647 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_2.u32All);
648 R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
649 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
650 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);
651
652 R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
653 R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
654 R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
655 R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
656 R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);
657
658 R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
659 R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
660 R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
661 R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
662 R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
663 R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
664 R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
665 R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
666 R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
667 R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
668 END_BATCH();
669
670 COMMIT_BATCH();
671 }
672
673 static void r700SendUCPState(GLcontext *ctx, struct radeon_state_atom *atom)
674 {
675 context_t *context = R700_CONTEXT(ctx);
676 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
677 BATCH_LOCALS(&context->radeon);
678 int i;
679 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
680
681 for (i = 0; i < R700_MAX_UCP; i++) {
682 if (r700->ucp[i].enabled) {
683 BEGIN_BATCH_NO_AUTOSTATE(6);
684 R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
685 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
686 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Y.u32All);
687 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Z.u32All);
688 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
689 END_BATCH();
690 COMMIT_BATCH();
691 }
692 }
693 }
694
695 static void r700SendSPIState(GLcontext *ctx, struct radeon_state_atom *atom)
696 {
697 context_t *context = R700_CONTEXT(ctx);
698 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
699 BATCH_LOCALS(&context->radeon);
700 unsigned int ui;
701 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
702
703 BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);
704
705 R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
706 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
707 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
708 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
709 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
710 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
711 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
712 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
713 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
714 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
715 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
716 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
717 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
718 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
719 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
720 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
721 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
722 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
723 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
724 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
725 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
726 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
727 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
728 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
729 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
730 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
731 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
732 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
733 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
734 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
735 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
736 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
737 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);
738
739 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
740 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
741 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
742 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
743 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
744 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
745 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
746 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
747 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
748 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
749 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);
750
751 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
752 R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
753 R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
754 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
755 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
756 R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
757 R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
758 R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
759 R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
760 R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);
761
762 R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
763 for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
764 R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
765
766 END_BATCH();
767 COMMIT_BATCH();
768 }
769
770 static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom)
771 {
772 context_t *context = R700_CONTEXT(ctx);
773 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
774 BATCH_LOCALS(&context->radeon);
775 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
776
777 BEGIN_BATCH_NO_AUTOSTATE(41);
778
779 R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
780 R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
781 R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
782 R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
783 R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);
784
785 R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
786 R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
787 R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
788 R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
789 R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
790 R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
791 R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
792 R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
793 R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
794 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
795 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
796 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
797 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
798 R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);
799
800 R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
801 R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
802 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
803 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);
804
805 R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
806 R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
807 R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
808 R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);
809
810 R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);
811
812 END_BATCH();
813 COMMIT_BATCH();
814 }
815
816 static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom)
817 {
818 context_t *context = R700_CONTEXT(ctx);
819 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
820 BATCH_LOCALS(&context->radeon);
821 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
822
823 BEGIN_BATCH_NO_AUTOSTATE(9);
824 R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
825 R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
826 R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
827 END_BATCH();
828 COMMIT_BATCH();
829 }
830
831 static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
832 {
833 context_t *context = R700_CONTEXT(ctx);
834 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
835 BATCH_LOCALS(&context->radeon);
836 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
837
838 BEGIN_BATCH_NO_AUTOSTATE(17);
839
840 R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
841 R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
842 R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);
843
844 R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
845 R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);
846
847 R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
848 R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
849 R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);
850
851 R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);
852
853 END_BATCH();
854 COMMIT_BATCH();
855 }
856
857 static void r700SendStencilState(GLcontext *ctx, struct radeon_state_atom *atom)
858 {
859 context_t *context = R700_CONTEXT(ctx);
860 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
861 BATCH_LOCALS(&context->radeon);
862
863 BEGIN_BATCH_NO_AUTOSTATE(4);
864 R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
865 R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
866 R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
867 END_BATCH();
868 COMMIT_BATCH();
869 }
870
871 static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
872 {
873 context_t *context = R700_CONTEXT(ctx);
874 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
875 BATCH_LOCALS(&context->radeon);
876 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
877
878 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
879 BEGIN_BATCH_NO_AUTOSTATE(11);
880 R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
881 R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
882 R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
883 R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
884 R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
885 R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
886 R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
887 R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
888 R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
889 END_BATCH();
890 }
891
892 BEGIN_BATCH_NO_AUTOSTATE(7);
893 R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
894 R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
895 R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
896 R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
897 END_BATCH();
898 COMMIT_BATCH();
899 }
900
901 static void r700SendCBCLRCMPState(GLcontext *ctx, struct radeon_state_atom *atom)
902 {
903 context_t *context = R700_CONTEXT(ctx);
904 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
905 BATCH_LOCALS(&context->radeon);
906
907 BEGIN_BATCH_NO_AUTOSTATE(6);
908 R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
909 R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
910 R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
911 R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
912 R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
913 END_BATCH();
914 COMMIT_BATCH();
915 }
916
917 static void r700SendCBBlendState(GLcontext *ctx, struct radeon_state_atom *atom)
918 {
919 context_t *context = R700_CONTEXT(ctx);
920 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
921 BATCH_LOCALS(&context->radeon);
922 unsigned int ui;
923 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
924
925 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
926 BEGIN_BATCH_NO_AUTOSTATE(3);
927 R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
928 END_BATCH();
929 }
930
931 BEGIN_BATCH_NO_AUTOSTATE(3);
932 R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
933 END_BATCH();
934
935 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
936 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
937 if (r700->render_target[ui].enabled) {
938 BEGIN_BATCH_NO_AUTOSTATE(3);
939 R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
940 r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
941 END_BATCH();
942 }
943 }
944 }
945
946 COMMIT_BATCH();
947 }
948
949 static void r700SendCBBlendColorState(GLcontext *ctx, struct radeon_state_atom *atom)
950 {
951 context_t *context = R700_CONTEXT(ctx);
952 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
953 BATCH_LOCALS(&context->radeon);
954 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
955
956 BEGIN_BATCH_NO_AUTOSTATE(6);
957 R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
958 R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
959 R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
960 R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
961 R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
962 END_BATCH();
963 COMMIT_BATCH();
964 }
965
966 static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom)
967 {
968 context_t *context = R700_CONTEXT(ctx);
969 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
970 BATCH_LOCALS(&context->radeon);
971
972 BEGIN_BATCH_NO_AUTOSTATE(9);
973 R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
974 R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
975 R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
976 R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
977 R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
978 R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
979 END_BATCH();
980 COMMIT_BATCH();
981
982 }
983
984 static void r700SendPolyState(GLcontext *ctx, struct radeon_state_atom *atom)
985 {
986 context_t *context = R700_CONTEXT(ctx);
987 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
988 BATCH_LOCALS(&context->radeon);
989
990 BEGIN_BATCH_NO_AUTOSTATE(10);
991 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
992 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
993 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
994 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
995 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
996 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
997 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
998 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
999 END_BATCH();
1000 COMMIT_BATCH();
1001
1002 }
1003
1004 static void r700SendCLState(GLcontext *ctx, struct radeon_state_atom *atom)
1005 {
1006 context_t *context = R700_CONTEXT(ctx);
1007 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1008 BATCH_LOCALS(&context->radeon);
1009 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1010
1011 BEGIN_BATCH_NO_AUTOSTATE(12);
1012 R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All);
1013 R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All);
1014 R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All);
1015 R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All);
1016 END_BATCH();
1017 COMMIT_BATCH();
1018 }
1019
1020 static void r700SendGBState(GLcontext *ctx, struct radeon_state_atom *atom)
1021 {
1022 context_t *context = R700_CONTEXT(ctx);
1023 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1024 BATCH_LOCALS(&context->radeon);
1025
1026 BEGIN_BATCH_NO_AUTOSTATE(6);
1027 R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
1028 R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
1029 R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
1030 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
1031 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
1032 END_BATCH();
1033 COMMIT_BATCH();
1034 }
1035
1036 static void r700SendScissorState(GLcontext *ctx, struct radeon_state_atom *atom)
1037 {
1038 context_t *context = R700_CONTEXT(ctx);
1039 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1040 BATCH_LOCALS(&context->radeon);
1041 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1042
1043 BEGIN_BATCH_NO_AUTOSTATE(22);
1044 R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
1045 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
1046 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);
1047
1048 R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12);
1049 R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
1050 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
1051 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
1052 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
1053 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
1054 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
1055 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
1056 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
1057 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
1058 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
1059 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
1060 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);
1061
1062 R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
1063 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
1064 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
1065 END_BATCH();
1066 COMMIT_BATCH();
1067 }
1068
1069 static void r700SendSCState(GLcontext *ctx, struct radeon_state_atom *atom)
1070 {
1071 context_t *context = R700_CONTEXT(ctx);
1072 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1073 BATCH_LOCALS(&context->radeon);
1074 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1075
1076 BEGIN_BATCH_NO_AUTOSTATE(15);
1077 R600_OUT_BATCH_REGVAL(R7xx_PA_SC_EDGERULE, r700->PA_SC_EDGERULE.u32All);
1078 R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All);
1079 R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All);
1080 R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All);
1081 R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All);
1082 END_BATCH();
1083 COMMIT_BATCH();
1084 }
1085
1086 static void r700SendAAState(GLcontext *ctx, struct radeon_state_atom *atom)
1087 {
1088 context_t *context = R700_CONTEXT(ctx);
1089 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1090 BATCH_LOCALS(&context->radeon);
1091
1092 BEGIN_BATCH_NO_AUTOSTATE(12);
1093 R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
1094 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
1095 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
1096 R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
1097 END_BATCH();
1098 COMMIT_BATCH();
1099 }
1100
1101 static void r700SendPSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1102 {
1103 context_t *context = R700_CONTEXT(ctx);
1104 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1105 int i;
1106 BATCH_LOCALS(&context->radeon);
1107
1108 if (r700->ps.num_consts == 0)
1109 return;
1110
1111 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->ps.num_consts * 4));
1112 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->ps.num_consts * 4)));
1113 /* assembler map const from very beginning. */
1114 R600_OUT_BATCH(SQ_ALU_CONSTANT_PS_OFFSET * 4);
1115 for (i = 0; i < r700->ps.num_consts; i++) {
1116 R600_OUT_BATCH(r700->ps.consts[i][0].u32All);
1117 R600_OUT_BATCH(r700->ps.consts[i][1].u32All);
1118 R600_OUT_BATCH(r700->ps.consts[i][2].u32All);
1119 R600_OUT_BATCH(r700->ps.consts[i][3].u32All);
1120 }
1121 END_BATCH();
1122 COMMIT_BATCH();
1123 }
1124
1125 static void r700SendVSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1126 {
1127 context_t *context = R700_CONTEXT(ctx);
1128 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1129 int i;
1130 BATCH_LOCALS(&context->radeon);
1131 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1132
1133 if (r700->vs.num_consts == 0)
1134 return;
1135
1136 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->vs.num_consts * 4));
1137 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->vs.num_consts * 4)));
1138 /* assembler map const from very beginning. */
1139 R600_OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
1140 for (i = 0; i < r700->vs.num_consts; i++) {
1141 R600_OUT_BATCH(r700->vs.consts[i][0].u32All);
1142 R600_OUT_BATCH(r700->vs.consts[i][1].u32All);
1143 R600_OUT_BATCH(r700->vs.consts[i][2].u32All);
1144 R600_OUT_BATCH(r700->vs.consts[i][3].u32All);
1145 }
1146 END_BATCH();
1147 COMMIT_BATCH();
1148 }
1149
1150 static void r700SendQueryBegin(GLcontext *ctx, struct radeon_state_atom *atom)
1151 {
1152 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1153 struct radeon_query_object *query = radeon->query.current;
1154 BATCH_LOCALS(radeon);
1155 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1156
1157 /* clear the buffer */
1158 radeon_bo_map(query->bo, GL_FALSE);
1159 memset(query->bo->ptr, 0, 4 * 2 * sizeof(uint64_t)); /* 4 DBs, 2 qwords each */
1160 radeon_bo_unmap(query->bo);
1161
1162 radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
1163 query->bo,
1164 0, RADEON_GEM_DOMAIN_GTT);
1165
1166 BEGIN_BATCH_NO_AUTOSTATE(4 + 2);
1167 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 2));
1168 R600_OUT_BATCH(ZPASS_DONE);
1169 R600_OUT_BATCH(query->curr_offset); /* hw writes qwords */
1170 R600_OUT_BATCH(0x00000000);
1171 R600_OUT_BATCH_RELOC(VGT_EVENT_INITIATOR, query->bo, 0, 0, RADEON_GEM_DOMAIN_GTT, 0);
1172 END_BATCH();
1173 query->emitted_begin = GL_TRUE;
1174 }
1175
1176 static int check_always(GLcontext *ctx, struct radeon_state_atom *atom)
1177 {
1178 return atom->cmd_size;
1179 }
1180
1181 static int check_cb(GLcontext *ctx, struct radeon_state_atom *atom)
1182 {
1183 context_t *context = R700_CONTEXT(ctx);
1184 int count = 7;
1185
1186 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1187 count += 11;
1188 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1189
1190 return count;
1191 }
1192
1193 static int check_blnd(GLcontext *ctx, struct radeon_state_atom *atom)
1194 {
1195 context_t *context = R700_CONTEXT(ctx);
1196 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1197 unsigned int ui;
1198 int count = 3;
1199
1200 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1201 count += 3;
1202
1203 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
1204 /* targets are enabled in r700SetRenderTarget but state
1205 size is calculated before that. Until MRT's are done
1206 hardcode target0 as enabled. */
1207 count += 3;
1208 for (ui = 1; ui < R700_MAX_RENDER_TARGETS; ui++) {
1209 if (r700->render_target[ui].enabled)
1210 count += 3;
1211 }
1212 }
1213 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1214
1215 return count;
1216 }
1217
1218 static int check_ucp(GLcontext *ctx, struct radeon_state_atom *atom)
1219 {
1220 context_t *context = R700_CONTEXT(ctx);
1221 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1222 int i;
1223 int count = 0;
1224
1225 for (i = 0; i < R700_MAX_UCP; i++) {
1226 if (r700->ucp[i].enabled)
1227 count += 6;
1228 }
1229 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1230 return count;
1231 }
1232
1233 static int check_vtx(GLcontext *ctx, struct radeon_state_atom *atom)
1234 {
1235 context_t *context = R700_CONTEXT(ctx);
1236 int count = context->radeon.tcl.aos_count * 18;
1237
1238 if (count)
1239 count += 6;
1240
1241 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1242 return count;
1243 }
1244
1245 static int check_tx(GLcontext *ctx, struct radeon_state_atom *atom)
1246 {
1247 context_t *context = R700_CONTEXT(ctx);
1248 unsigned int i, count = 0;
1249 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1250
1251 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
1252 if (ctx->Texture.Unit[i]._ReallyEnabled) {
1253 radeonTexObj *t = r700->textures[i];
1254 if (t)
1255 count++;
1256 }
1257 }
1258 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1259 return count * 31;
1260 }
1261
1262 static int check_ps_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1263 {
1264 context_t *context = R700_CONTEXT(ctx);
1265 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1266 int count = r700->ps.num_consts * 4;
1267
1268 if (count)
1269 count += 2;
1270 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1271
1272 return count;
1273 }
1274
1275 static int check_vs_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1276 {
1277 context_t *context = R700_CONTEXT(ctx);
1278 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1279 int count = r700->vs.num_consts * 4;
1280
1281 if (count)
1282 count += 2;
1283 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1284
1285 return count;
1286 }
1287
1288 static int check_queryobj(GLcontext *ctx, struct radeon_state_atom *atom)
1289 {
1290 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1291 struct radeon_query_object *query = radeon->query.current;
1292 int count;
1293
1294 if (!query || query->emitted_begin)
1295 count = 0;
1296 else
1297 count = atom->cmd_size;
1298 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1299 return count;
1300 }
1301
1302 #define ALLOC_STATE( ATOM, CHK, SZ, EMIT ) \
1303 do { \
1304 context->atoms.ATOM.cmd_size = (SZ); \
1305 context->atoms.ATOM.cmd = NULL; \
1306 context->atoms.ATOM.name = #ATOM; \
1307 context->atoms.ATOM.idx = 0; \
1308 context->atoms.ATOM.check = check_##CHK; \
1309 context->atoms.ATOM.dirty = GL_FALSE; \
1310 context->atoms.ATOM.emit = (EMIT); \
1311 context->radeon.hw.max_state_size += (SZ); \
1312 insert_at_tail(&context->radeon.hw.atomlist, &context->atoms.ATOM); \
1313 } while (0)
1314
1315 static void r600_init_query_stateobj(radeonContextPtr radeon, int SZ)
1316 {
1317 radeon->query.queryobj.cmd_size = (SZ);
1318 radeon->query.queryobj.cmd = NULL;
1319 radeon->query.queryobj.name = "queryobj";
1320 radeon->query.queryobj.idx = 0;
1321 radeon->query.queryobj.check = check_queryobj;
1322 radeon->query.queryobj.dirty = GL_FALSE;
1323 radeon->query.queryobj.emit = r700SendQueryBegin;
1324 radeon->hw.max_state_size += (SZ);
1325 insert_at_tail(&radeon->hw.atomlist, &radeon->query.queryobj);
1326 }
1327
1328 void r600InitAtoms(context_t *context)
1329 {
1330 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %p\n", __func__, context);
1331 context->radeon.hw.max_state_size = 10 + 5 + 14; /* start 3d, idle, cb/db flush */
1332
1333 /* Setup the atom linked list */
1334 make_empty_list(&context->radeon.hw.atomlist);
1335 context->radeon.hw.atomlist.name = "atom-list";
1336
1337 ALLOC_STATE(sq, always, 34, r700SendSQConfig);
1338 ALLOC_STATE(db, always, 17, r700SendDBState);
1339 ALLOC_STATE(stencil, always, 4, r700SendStencilState);
1340 ALLOC_STATE(db_target, always, 12, r700SendDepthTargetState);
1341 ALLOC_STATE(sc, always, 15, r700SendSCState);
1342 ALLOC_STATE(scissor, always, 22, r700SendScissorState);
1343 ALLOC_STATE(aa, always, 12, r700SendAAState);
1344 ALLOC_STATE(cl, always, 12, r700SendCLState);
1345 ALLOC_STATE(gb, always, 6, r700SendGBState);
1346 ALLOC_STATE(ucp, ucp, (R700_MAX_UCP * 6), r700SendUCPState);
1347 ALLOC_STATE(su, always, 9, r700SendSUState);
1348 ALLOC_STATE(poly, always, 10, r700SendPolyState);
1349 ALLOC_STATE(cb, cb, 18, r700SendCBState);
1350 ALLOC_STATE(clrcmp, always, 6, r700SendCBCLRCMPState);
1351 ALLOC_STATE(cb_target, always, 25, r700SendRenderTargetState);
1352 ALLOC_STATE(blnd, blnd, (6 + (R700_MAX_RENDER_TARGETS * 3)), r700SendCBBlendState);
1353 ALLOC_STATE(blnd_clr, always, 6, r700SendCBBlendColorState);
1354 ALLOC_STATE(sx, always, 9, r700SendSXState);
1355 ALLOC_STATE(vgt, always, 41, r700SendVGTState);
1356 ALLOC_STATE(spi, always, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState);
1357 ALLOC_STATE(vpt, always, 16, r700SendViewportState);
1358 ALLOC_STATE(fs, always, 18, r700SendFSState);
1359 ALLOC_STATE(vs, always, 21, r700SendVSState);
1360 ALLOC_STATE(ps, always, 24, r700SendPSState);
1361 ALLOC_STATE(vs_consts, vs_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendVSConsts);
1362 ALLOC_STATE(ps_consts, ps_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendPSConsts);
1363 ALLOC_STATE(vtx, vtx, (6 + (VERT_ATTRIB_MAX * 18)), r700SendVTXState);
1364 ALLOC_STATE(tx, tx, (R700_TEXTURE_NUMBERUNITS * 20), r700SendTexState);
1365 ALLOC_STATE(tx_smplr, tx, (R700_TEXTURE_NUMBERUNITS * 5), r700SendTexSamplerState);
1366 ALLOC_STATE(tx_brdr_clr, tx, (R700_TEXTURE_NUMBERUNITS * 6), r700SendTexBorderColorState);
1367 r600_init_query_stateobj(&context->radeon, 6 * 2);
1368
1369 context->radeon.hw.is_dirty = GL_TRUE;
1370 context->radeon.hw.all_dirty = GL_TRUE;
1371 }