r600: fix render size prediction
[mesa.git] / src / mesa / drivers / dri / r600 / r700_chip.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/imports.h"
29 #include "main/glheader.h"
30 #include "main/simple_list.h"
31
32 #include "r600_context.h"
33 #include "r600_cmdbuf.h"
34
35 #include "r700_state.h"
36 #include "r600_tex.h"
37 #include "r700_oglprog.h"
38 #include "r700_fragprog.h"
39 #include "r700_vertprog.h"
40 #include "r700_ioctl.h"
41
42 #include "radeon_mipmap_tree.h"
43
44 static void r700SendTexState(GLcontext *ctx, struct radeon_state_atom *atom)
45 {
46 context_t *context = R700_CONTEXT(ctx);
47 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
48 struct radeon_bo *bo = NULL;
49 unsigned int i;
50 BATCH_LOCALS(&context->radeon);
51
52 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
53
54 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
55 if (ctx->Texture.Unit[i]._ReallyEnabled) {
56 radeonTexObj *t = r700->textures[i];
57 if (t) {
58 if (!t->image_override)
59 bo = t->mt->bo;
60 else
61 bo = t->bo;
62 if (bo) {
63
64 r700SyncSurf(context, bo,
65 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
66 0, TC_ACTION_ENA_bit);
67
68 BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
69 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
70 R600_OUT_BATCH(i * 7);
71 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
72 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
73 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE2);
74 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
75 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
76 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
77 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
78 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
79 bo,
80 0,
81 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
82 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE3,
83 bo,
84 r700->textures[i]->SQ_TEX_RESOURCE3,
85 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
86 END_BATCH();
87 COMMIT_BATCH();
88 }
89 }
90 }
91 }
92 }
93
94 static void r700SendTexSamplerState(GLcontext *ctx, struct radeon_state_atom *atom)
95 {
96 context_t *context = R700_CONTEXT(ctx);
97 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
98 unsigned int i;
99 BATCH_LOCALS(&context->radeon);
100 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
101
102 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
103 if (ctx->Texture.Unit[i]._ReallyEnabled) {
104 radeonTexObj *t = r700->textures[i];
105 if (t) {
106 BEGIN_BATCH_NO_AUTOSTATE(5);
107 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, 3));
108 R600_OUT_BATCH(i * 3);
109 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER0);
110 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER1);
111 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER2);
112 END_BATCH();
113 COMMIT_BATCH();
114 }
115 }
116 }
117 }
118
119 static void r700SendTexBorderColorState(GLcontext *ctx, struct radeon_state_atom *atom)
120 {
121 context_t *context = R700_CONTEXT(ctx);
122 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
123 unsigned int i;
124 BATCH_LOCALS(&context->radeon);
125 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
126
127 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
128 if (ctx->Texture.Unit[i]._ReallyEnabled) {
129 radeonTexObj *t = r700->textures[i];
130 if (t) {
131 BEGIN_BATCH_NO_AUTOSTATE(2 + 4);
132 R600_OUT_BATCH_REGSEQ((TD_PS_SAMPLER0_BORDER_RED + (i * 16)), 4);
133 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_RED);
134 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_GREEN);
135 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_BLUE);
136 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_ALPHA);
137 END_BATCH();
138 COMMIT_BATCH();
139 }
140 }
141 }
142 }
143
144 static void r700SetupVTXConstants(GLcontext * ctx,
145 unsigned int nStreamID,
146 void * pAos,
147 unsigned int size, /* number of elements in vector */
148 unsigned int stride,
149 unsigned int count) /* number of vectors in stream */
150 {
151 context_t *context = R700_CONTEXT(ctx);
152 struct radeon_aos * paos = (struct radeon_aos *)pAos;
153 BATCH_LOCALS(&context->radeon);
154 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
155
156 unsigned int uSQ_VTX_CONSTANT_WORD0_0;
157 unsigned int uSQ_VTX_CONSTANT_WORD1_0;
158 unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
159 unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
160 unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;
161
162 if (!paos->bo)
163 return;
164
165 if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
166 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
167 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
168 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS880) ||
169 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
170 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
171 else
172 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);
173
174 uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
175 uSQ_VTX_CONSTANT_WORD1_0 = count * (size * 4) - 1;
176
177 SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
178 SETfield(uSQ_VTX_CONSTANT_WORD2_0, stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
179 SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
180 SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(GL_FLOAT, size, NULL),
181 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
182 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
183 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_SCALED,
184 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
185 SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
186
187 SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
188 SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
189 SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
190
191 BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
192
193 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
194 R600_OUT_BATCH((nStreamID + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
195 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
196 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
197 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
198 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
199 R600_OUT_BATCH(0);
200 R600_OUT_BATCH(0);
201 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
202 R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
203 paos->bo,
204 uSQ_VTX_CONSTANT_WORD0_0,
205 RADEON_GEM_DOMAIN_GTT, 0, 0);
206 END_BATCH();
207 COMMIT_BATCH();
208
209 }
210
211 extern int getTypeSize(GLenum type);
212 static void r700SetupVTXConstants2(GLcontext * ctx,
213 void * pAos,
214 StreamDesc * pStreamDesc)
215 {
216 context_t *context = R700_CONTEXT(ctx);
217 struct radeon_aos * paos = (struct radeon_aos *)pAos;
218 unsigned int nVBsize;
219 BATCH_LOCALS(&context->radeon);
220
221 unsigned int uSQ_VTX_CONSTANT_WORD0_0;
222 unsigned int uSQ_VTX_CONSTANT_WORD1_0;
223 unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
224 unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
225 unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;
226
227 if (!paos->bo)
228 return;
229
230 if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
231 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
232 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
233 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS880) ||
234 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
235 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
236 else
237 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);
238
239 if(0 == pStreamDesc->stride)
240 {
241 nVBsize = paos->count * pStreamDesc->size * getTypeSize(pStreamDesc->type);
242 }
243 else
244 {
245 nVBsize = paos->count * pStreamDesc->stride;
246 }
247
248 uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
249 uSQ_VTX_CONSTANT_WORD1_0 = nVBsize - 1;
250
251 SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
252 SETfield(uSQ_VTX_CONSTANT_WORD2_0, pStreamDesc->stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
253 SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
254 SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(pStreamDesc->type, pStreamDesc->size, NULL),
255 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
256 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
257
258 if(GL_TRUE == pStreamDesc->normalize)
259 {
260 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_NORM,
261 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
262 }
263 //else
264 //{
265 // SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_INT,
266 // SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
267 //}
268
269 if(1 == pStreamDesc->_signed)
270 {
271 SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
272 }
273
274 SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
275 SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
276 SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
277
278 BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
279
280 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
281 R600_OUT_BATCH((pStreamDesc->element + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
282 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
283 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
284 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
285 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
286 R600_OUT_BATCH(0);
287 R600_OUT_BATCH(0);
288 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
289 R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
290 paos->bo,
291 uSQ_VTX_CONSTANT_WORD0_0,
292 RADEON_GEM_DOMAIN_GTT, 0, 0);
293 END_BATCH();
294 COMMIT_BATCH();
295
296 }
297
298 void r700SetupStreams(GLcontext *ctx)
299 {
300 context_t *context = R700_CONTEXT(ctx);
301 struct r700_vertex_program *vp = context->selected_vp;
302 TNLcontext *tnl = TNL_CONTEXT(ctx);
303 struct vertex_buffer *vb = &tnl->vb;
304 unsigned int i, j = 0;
305 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
306
307 R600_STATECHANGE(context, vtx);
308
309 for(i=0; i<VERT_ATTRIB_MAX; i++) {
310 if(vp->mesa_program->Base.InputsRead & (1 << i)) {
311 rcommon_emit_vector(ctx,
312 &context->radeon.tcl.aos[j],
313 vb->AttribPtr[i]->data,
314 vb->AttribPtr[i]->size,
315 vb->AttribPtr[i]->stride,
316 vb->Count);
317 j++;
318 }
319 }
320 context->radeon.tcl.aos_count = j;
321 }
322
323 static void r700SendVTXState(GLcontext *ctx, struct radeon_state_atom *atom)
324 {
325 context_t *context = R700_CONTEXT(ctx);
326 struct r700_vertex_program *vp = context->selected_vp;
327 unsigned int i, j = 0;
328 BATCH_LOCALS(&context->radeon);
329 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
330
331 if (context->radeon.tcl.aos_count == 0)
332 return;
333
334 BEGIN_BATCH_NO_AUTOSTATE(6);
335 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
336 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
337 R600_OUT_BATCH(0);
338
339 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
340 R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
341 R600_OUT_BATCH(0);
342 END_BATCH();
343 COMMIT_BATCH();
344
345 for(i=0; i<VERT_ATTRIB_MAX; i++) {
346 if(vp->mesa_program->Base.InputsRead & (1 << i))
347 {
348 if(1 == context->selected_vp->uiVersion)
349 {
350 /* currently aos are packed */
351 r700SetupVTXConstants(ctx,
352 i,
353 (void*)(&context->radeon.tcl.aos[j]),
354 (unsigned int)context->radeon.tcl.aos[j].components,
355 (unsigned int)context->radeon.tcl.aos[j].stride * 4,
356 (unsigned int)context->radeon.tcl.aos[j].count);
357 }
358 else
359 { /* context->selected_vp->uiVersion == 2 : aos not always packed */
360 r700SetupVTXConstants2(ctx,
361 (void*)(&context->radeon.tcl.aos[j]),
362 &(context->stream_desc[j]));
363 }
364 j++;
365 }
366 }
367 }
368
369 static void r700SetRenderTarget(context_t *context, int id)
370 {
371 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
372
373 struct radeon_renderbuffer *rrb;
374 unsigned int nPitchInPixel;
375
376 rrb = radeon_get_colorbuffer(&context->radeon);
377 if (!rrb || !rrb->bo) {
378 return;
379 }
380
381 R600_STATECHANGE(context, cb_target);
382
383 /* color buffer */
384 r700->render_target[id].CB_COLOR0_BASE.u32All = context->radeon.state.color.draw_offset;
385
386 nPitchInPixel = rrb->pitch/rrb->cpp;
387 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, (nPitchInPixel/8)-1,
388 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
389 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
390 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask);
391 r700->render_target[id].CB_COLOR0_BASE.u32All = 0;
392 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ENDIAN_NONE, ENDIAN_shift, ENDIAN_mask);
393 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_LINEAR_GENERAL,
394 CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
395 if(4 == rrb->cpp)
396 {
397 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_8_8_8_8,
398 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
399 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT, COMP_SWAP_shift, COMP_SWAP_mask);
400 }
401 else
402 {
403 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_5_6_5,
404 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
405 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT_REV,
406 COMP_SWAP_shift, COMP_SWAP_mask);
407 }
408 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
409 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_CLAMP_bit);
410 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, NUMBER_UNORM, NUMBER_TYPE_shift, NUMBER_TYPE_mask);
411
412 r700->render_target[id].enabled = GL_TRUE;
413 }
414
415 static void r700SetDepthTarget(context_t *context)
416 {
417 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
418
419 struct radeon_renderbuffer *rrb;
420 unsigned int nPitchInPixel;
421
422 rrb = radeon_get_depthbuffer(&context->radeon);
423 if (!rrb)
424 return;
425
426 R600_STATECHANGE(context, db_target);
427
428 /* depth buf */
429 r700->DB_DEPTH_SIZE.u32All = 0;
430 r700->DB_DEPTH_BASE.u32All = 0;
431 r700->DB_DEPTH_INFO.u32All = 0;
432 r700->DB_DEPTH_VIEW.u32All = 0;
433
434 nPitchInPixel = rrb->pitch/rrb->cpp;
435
436 SETfield(r700->DB_DEPTH_SIZE.u32All, (nPitchInPixel/8)-1,
437 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
438 SETfield(r700->DB_DEPTH_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
439 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask); /* size in pixel / 64 - 1 */
440
441 if(4 == rrb->cpp)
442 {
443 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_8_24,
444 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
445 }
446 else
447 {
448 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_16,
449 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
450 }
451 SETfield(r700->DB_DEPTH_INFO.u32All, ARRAY_1D_TILED_THIN1,
452 DB_DEPTH_INFO__ARRAY_MODE_shift, DB_DEPTH_INFO__ARRAY_MODE_mask);
453 /* r700->DB_PREFETCH_LIMIT.bits.DEPTH_HEIGHT_TILE_MAX = (context->currentDraw->h >> 3) - 1; */ /* z buffer sie may much bigger than what need, so use actual used h. */
454 }
455
456 static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
457 {
458 context_t *context = R700_CONTEXT(ctx);
459 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
460 struct radeon_renderbuffer *rrb;
461 BATCH_LOCALS(&context->radeon);
462 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
463
464 rrb = radeon_get_depthbuffer(&context->radeon);
465 if (!rrb || !rrb->bo) {
466 fprintf(stderr, "no rrb\n");
467 return;
468 }
469
470 r700SetDepthTarget(context);
471
472 BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
473 R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
474 R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
475 R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
476 R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
477 R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
478 R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
479 R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
480 rrb->bo,
481 r700->DB_DEPTH_BASE.u32All,
482 0, RADEON_GEM_DOMAIN_VRAM, 0);
483 END_BATCH();
484
485 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
486 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
487 BEGIN_BATCH_NO_AUTOSTATE(2);
488 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
489 R600_OUT_BATCH(1 << 0);
490 END_BATCH();
491 }
492
493 COMMIT_BATCH();
494
495 }
496
497 static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
498 {
499 context_t *context = R700_CONTEXT(ctx);
500 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
501 struct radeon_renderbuffer *rrb;
502 BATCH_LOCALS(&context->radeon);
503 int id = 0;
504 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
505
506 rrb = radeon_get_colorbuffer(&context->radeon);
507 if (!rrb || !rrb->bo) {
508 fprintf(stderr, "no rrb\n");
509 return;
510 }
511
512 r700SetRenderTarget(context, 0);
513
514 if (id > R700_MAX_RENDER_TARGETS)
515 return;
516
517 if (!r700->render_target[id].enabled)
518 return;
519
520 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
521 R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
522 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
523 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
524 rrb->bo,
525 r700->render_target[id].CB_COLOR0_BASE.u32All,
526 0, RADEON_GEM_DOMAIN_VRAM, 0);
527 END_BATCH();
528
529 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
530 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
531 BEGIN_BATCH_NO_AUTOSTATE(2);
532 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
533 R600_OUT_BATCH((2 << id));
534 END_BATCH();
535 }
536
537 BEGIN_BATCH_NO_AUTOSTATE(18);
538 R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
539 R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
540 R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
541 R600_OUT_BATCH_REGVAL(CB_COLOR0_TILE + (4 * id), r700->render_target[id].CB_COLOR0_TILE.u32All);
542 R600_OUT_BATCH_REGVAL(CB_COLOR0_FRAG + (4 * id), r700->render_target[id].CB_COLOR0_FRAG.u32All);
543 R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
544 END_BATCH();
545
546 COMMIT_BATCH();
547
548 }
549
550 static void r700SendPSState(GLcontext *ctx, struct radeon_state_atom *atom)
551 {
552 context_t *context = R700_CONTEXT(ctx);
553 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
554 struct radeon_bo * pbo;
555 BATCH_LOCALS(&context->radeon);
556 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
557
558 pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));
559
560 if (!pbo)
561 return;
562
563 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
564
565 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
566 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
567 R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
568 R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
569 pbo,
570 r700->ps.SQ_PGM_START_PS.u32All,
571 RADEON_GEM_DOMAIN_GTT, 0, 0);
572 END_BATCH();
573
574 BEGIN_BATCH_NO_AUTOSTATE(9);
575 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
576 R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
577 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
578 END_BATCH();
579
580 COMMIT_BATCH();
581
582 }
583
584 static void r700SendVSState(GLcontext *ctx, struct radeon_state_atom *atom)
585 {
586 context_t *context = R700_CONTEXT(ctx);
587 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
588 struct radeon_bo * pbo;
589 BATCH_LOCALS(&context->radeon);
590 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
591
592 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
593
594 if (!pbo)
595 return;
596
597 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
598
599 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
600 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_VS, 1);
601 R600_OUT_BATCH(r700->vs.SQ_PGM_START_VS.u32All);
602 R600_OUT_BATCH_RELOC(r700->vs.SQ_PGM_START_VS.u32All,
603 pbo,
604 r700->vs.SQ_PGM_START_VS.u32All,
605 RADEON_GEM_DOMAIN_GTT, 0, 0);
606 END_BATCH();
607
608 BEGIN_BATCH_NO_AUTOSTATE(6);
609 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_VS, r700->vs.SQ_PGM_RESOURCES_VS.u32All);
610 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_VS, r700->vs.SQ_PGM_CF_OFFSET_VS.u32All);
611 END_BATCH();
612
613 COMMIT_BATCH();
614 }
615
616 static void r700SendFSState(GLcontext *ctx, struct radeon_state_atom *atom)
617 {
618 context_t *context = R700_CONTEXT(ctx);
619 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
620 struct radeon_bo * pbo;
621 BATCH_LOCALS(&context->radeon);
622 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
623
624 /* XXX fixme
625 * R6xx chips require a FS be emitted, even if it's not used.
626 * since we aren't using FS yet, just send the VS address to make
627 * the kernel command checker happy
628 */
629 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
630 r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
631 r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
632 r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
633 /* XXX */
634
635 if (!pbo)
636 return;
637
638 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
639
640 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
641 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
642 R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
643 R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
644 pbo,
645 r700->fs.SQ_PGM_START_FS.u32All,
646 RADEON_GEM_DOMAIN_GTT, 0, 0);
647 END_BATCH();
648
649 BEGIN_BATCH_NO_AUTOSTATE(6);
650 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
651 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
652 END_BATCH();
653
654 COMMIT_BATCH();
655
656 }
657
658 static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
659 {
660 context_t *context = R700_CONTEXT(ctx);
661 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
662 BATCH_LOCALS(&context->radeon);
663 int id = 0;
664 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
665
666 if (id > R700_MAX_VIEWPORTS)
667 return;
668
669 if (!r700->viewport[id].enabled)
670 return;
671
672 BEGIN_BATCH_NO_AUTOSTATE(16);
673 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
674 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
675 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
676 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
677 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
678 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
679 R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
680 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
681 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
682 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
683 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
684 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
685 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
686 END_BATCH();
687
688 COMMIT_BATCH();
689
690 }
691
692 static void r700SendSQConfig(GLcontext *ctx, struct radeon_state_atom *atom)
693 {
694 context_t *context = R700_CONTEXT(ctx);
695 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
696 BATCH_LOCALS(&context->radeon);
697 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
698
699 BEGIN_BATCH_NO_AUTOSTATE(34);
700 R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
701 R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
702 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
703 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_2.u32All);
704 R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
705 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
706 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);
707
708 R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
709 R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
710 R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
711 R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
712 R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);
713
714 R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
715 R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
716 R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
717 R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
718 R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
719 R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
720 R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
721 R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
722 R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
723 R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
724 END_BATCH();
725
726 COMMIT_BATCH();
727 }
728
729 static void r700SendUCPState(GLcontext *ctx, struct radeon_state_atom *atom)
730 {
731 context_t *context = R700_CONTEXT(ctx);
732 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
733 BATCH_LOCALS(&context->radeon);
734 int i;
735 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
736
737 for (i = 0; i < R700_MAX_UCP; i++) {
738 if (r700->ucp[i].enabled) {
739 BEGIN_BATCH_NO_AUTOSTATE(6);
740 R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
741 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
742 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Y.u32All);
743 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Z.u32All);
744 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
745 END_BATCH();
746 COMMIT_BATCH();
747 }
748 }
749 }
750
751 static void r700SendSPIState(GLcontext *ctx, struct radeon_state_atom *atom)
752 {
753 context_t *context = R700_CONTEXT(ctx);
754 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
755 BATCH_LOCALS(&context->radeon);
756 unsigned int ui;
757 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
758
759 BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);
760
761 R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
762 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
763 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
764 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
765 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
766 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
767 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
768 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
769 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
770 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
771 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
772 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
773 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
774 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
775 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
776 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
777 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
778 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
779 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
780 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
781 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
782 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
783 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
784 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
785 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
786 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
787 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
788 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
789 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
790 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
791 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
792 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
793 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);
794
795 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
796 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
797 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
798 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
799 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
800 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
801 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
802 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
803 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
804 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
805 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);
806
807 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
808 R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
809 R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
810 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
811 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
812 R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
813 R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
814 R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
815 R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
816 R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);
817
818 R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
819 for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
820 R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
821
822 END_BATCH();
823 COMMIT_BATCH();
824 }
825
826 static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom)
827 {
828 context_t *context = R700_CONTEXT(ctx);
829 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
830 BATCH_LOCALS(&context->radeon);
831 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
832
833 BEGIN_BATCH_NO_AUTOSTATE(41);
834
835 R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
836 R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
837 R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
838 R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
839 R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);
840
841 R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
842 R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
843 R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
844 R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
845 R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
846 R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
847 R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
848 R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
849 R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
850 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
851 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
852 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
853 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
854 R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);
855
856 R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
857 R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
858 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
859 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);
860
861 R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
862 R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
863 R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
864 R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);
865
866 R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);
867
868 END_BATCH();
869 COMMIT_BATCH();
870 }
871
872 static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom)
873 {
874 context_t *context = R700_CONTEXT(ctx);
875 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
876 BATCH_LOCALS(&context->radeon);
877 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
878
879 BEGIN_BATCH_NO_AUTOSTATE(9);
880 R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
881 R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
882 R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
883 END_BATCH();
884 COMMIT_BATCH();
885 }
886
887 static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
888 {
889 context_t *context = R700_CONTEXT(ctx);
890 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
891 BATCH_LOCALS(&context->radeon);
892 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
893
894 BEGIN_BATCH_NO_AUTOSTATE(23);
895 R600_OUT_BATCH_REGVAL(DB_HTILE_DATA_BASE, r700->DB_HTILE_DATA_BASE.u32All);
896
897 R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
898 R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
899 R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);
900
901 R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
902 R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);
903
904 R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
905 R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
906 R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);
907
908 R600_OUT_BATCH_REGVAL(DB_HTILE_SURFACE, r700->DB_HTILE_SURFACE.u32All);
909 R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);
910
911 END_BATCH();
912 COMMIT_BATCH();
913 }
914
915 static void r700SendStencilState(GLcontext *ctx, struct radeon_state_atom *atom)
916 {
917 context_t *context = R700_CONTEXT(ctx);
918 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
919 BATCH_LOCALS(&context->radeon);
920
921 BEGIN_BATCH_NO_AUTOSTATE(4);
922 R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
923 R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
924 R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
925 END_BATCH();
926 COMMIT_BATCH();
927 }
928
929 static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
930 {
931 context_t *context = R700_CONTEXT(ctx);
932 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
933 BATCH_LOCALS(&context->radeon);
934 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
935
936 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
937 BEGIN_BATCH_NO_AUTOSTATE(11);
938 R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
939 R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
940 R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
941 R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
942 R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
943 R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
944 R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
945 R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
946 R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
947 END_BATCH();
948 }
949
950 BEGIN_BATCH_NO_AUTOSTATE(7);
951 R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
952 R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
953 R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
954 R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
955 END_BATCH();
956 COMMIT_BATCH();
957 }
958
959 static void r700SendCBCLRCMPState(GLcontext *ctx, struct radeon_state_atom *atom)
960 {
961 context_t *context = R700_CONTEXT(ctx);
962 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
963 BATCH_LOCALS(&context->radeon);
964
965 BEGIN_BATCH_NO_AUTOSTATE(6);
966 R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
967 R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
968 R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
969 R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
970 R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
971 END_BATCH();
972 COMMIT_BATCH();
973 }
974
975 static void r700SendCBBlendState(GLcontext *ctx, struct radeon_state_atom *atom)
976 {
977 context_t *context = R700_CONTEXT(ctx);
978 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
979 BATCH_LOCALS(&context->radeon);
980 unsigned int ui;
981 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
982
983 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
984 BEGIN_BATCH_NO_AUTOSTATE(3);
985 R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
986 END_BATCH();
987 }
988
989 BEGIN_BATCH_NO_AUTOSTATE(3);
990 R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
991 END_BATCH();
992
993 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
994 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
995 if (r700->render_target[ui].enabled) {
996 BEGIN_BATCH_NO_AUTOSTATE(3);
997 R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
998 r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
999 END_BATCH();
1000 }
1001 }
1002 }
1003
1004 COMMIT_BATCH();
1005 }
1006
1007 static void r700SendCBBlendColorState(GLcontext *ctx, struct radeon_state_atom *atom)
1008 {
1009 context_t *context = R700_CONTEXT(ctx);
1010 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1011 BATCH_LOCALS(&context->radeon);
1012 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1013
1014 BEGIN_BATCH_NO_AUTOSTATE(6);
1015 R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
1016 R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
1017 R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
1018 R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
1019 R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
1020 END_BATCH();
1021 COMMIT_BATCH();
1022 }
1023
1024 static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom)
1025 {
1026 context_t *context = R700_CONTEXT(ctx);
1027 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1028 BATCH_LOCALS(&context->radeon);
1029
1030 BEGIN_BATCH_NO_AUTOSTATE(9);
1031 R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
1032 R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
1033 R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
1034 R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
1035 R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
1036 R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
1037 END_BATCH();
1038 COMMIT_BATCH();
1039
1040 }
1041
1042 static void r700SendPolyState(GLcontext *ctx, struct radeon_state_atom *atom)
1043 {
1044 context_t *context = R700_CONTEXT(ctx);
1045 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1046 BATCH_LOCALS(&context->radeon);
1047
1048 BEGIN_BATCH_NO_AUTOSTATE(10);
1049 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
1050 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
1051 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
1052 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
1053 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
1054 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
1055 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
1056 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
1057 END_BATCH();
1058 COMMIT_BATCH();
1059
1060 }
1061
1062 static void r700SendCLState(GLcontext *ctx, struct radeon_state_atom *atom)
1063 {
1064 context_t *context = R700_CONTEXT(ctx);
1065 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1066 BATCH_LOCALS(&context->radeon);
1067 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1068
1069 BEGIN_BATCH_NO_AUTOSTATE(12);
1070 R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All);
1071 R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All);
1072 R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All);
1073 R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All);
1074 END_BATCH();
1075 COMMIT_BATCH();
1076 }
1077
1078 static void r700SendGBState(GLcontext *ctx, struct radeon_state_atom *atom)
1079 {
1080 context_t *context = R700_CONTEXT(ctx);
1081 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1082 BATCH_LOCALS(&context->radeon);
1083
1084 BEGIN_BATCH_NO_AUTOSTATE(6);
1085 R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
1086 R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
1087 R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
1088 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
1089 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
1090 END_BATCH();
1091 COMMIT_BATCH();
1092 }
1093
1094 static void r700SendScissorState(GLcontext *ctx, struct radeon_state_atom *atom)
1095 {
1096 context_t *context = R700_CONTEXT(ctx);
1097 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1098 BATCH_LOCALS(&context->radeon);
1099 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1100
1101 BEGIN_BATCH_NO_AUTOSTATE(22);
1102 R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
1103 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
1104 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);
1105
1106 R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12);
1107 R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
1108 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
1109 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
1110 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
1111 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
1112 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
1113 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
1114 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
1115 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
1116 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
1117 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
1118 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);
1119
1120 R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
1121 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
1122 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
1123 END_BATCH();
1124 COMMIT_BATCH();
1125 }
1126
1127 static void r700SendSCState(GLcontext *ctx, struct radeon_state_atom *atom)
1128 {
1129 context_t *context = R700_CONTEXT(ctx);
1130 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1131 BATCH_LOCALS(&context->radeon);
1132 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1133
1134 BEGIN_BATCH_NO_AUTOSTATE(15);
1135 R600_OUT_BATCH_REGVAL(R7xx_PA_SC_EDGERULE, r700->PA_SC_EDGERULE.u32All);
1136 R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All);
1137 R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All);
1138 R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All);
1139 R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All);
1140 END_BATCH();
1141 COMMIT_BATCH();
1142 }
1143
1144 static void r700SendAAState(GLcontext *ctx, struct radeon_state_atom *atom)
1145 {
1146 context_t *context = R700_CONTEXT(ctx);
1147 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1148 BATCH_LOCALS(&context->radeon);
1149
1150 BEGIN_BATCH_NO_AUTOSTATE(12);
1151 R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
1152 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
1153 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
1154 R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
1155 END_BATCH();
1156 COMMIT_BATCH();
1157 }
1158
1159 static void r700SendPSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1160 {
1161 context_t *context = R700_CONTEXT(ctx);
1162 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1163 int i;
1164 BATCH_LOCALS(&context->radeon);
1165
1166 if (r700->ps.num_consts == 0)
1167 return;
1168
1169 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->ps.num_consts * 4));
1170 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->ps.num_consts * 4)));
1171 /* assembler map const from very beginning. */
1172 R600_OUT_BATCH(SQ_ALU_CONSTANT_PS_OFFSET * 4);
1173 for (i = 0; i < r700->ps.num_consts; i++) {
1174 R600_OUT_BATCH(r700->ps.consts[i][0].u32All);
1175 R600_OUT_BATCH(r700->ps.consts[i][1].u32All);
1176 R600_OUT_BATCH(r700->ps.consts[i][2].u32All);
1177 R600_OUT_BATCH(r700->ps.consts[i][3].u32All);
1178 }
1179 END_BATCH();
1180 COMMIT_BATCH();
1181 }
1182
1183 static void r700SendVSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1184 {
1185 context_t *context = R700_CONTEXT(ctx);
1186 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1187 int i;
1188 BATCH_LOCALS(&context->radeon);
1189 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1190
1191 if (r700->vs.num_consts == 0)
1192 return;
1193
1194 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->vs.num_consts * 4));
1195 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->vs.num_consts * 4)));
1196 /* assembler map const from very beginning. */
1197 R600_OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
1198 for (i = 0; i < r700->vs.num_consts; i++) {
1199 R600_OUT_BATCH(r700->vs.consts[i][0].u32All);
1200 R600_OUT_BATCH(r700->vs.consts[i][1].u32All);
1201 R600_OUT_BATCH(r700->vs.consts[i][2].u32All);
1202 R600_OUT_BATCH(r700->vs.consts[i][3].u32All);
1203 }
1204 END_BATCH();
1205 COMMIT_BATCH();
1206 }
1207
1208 static int check_always(GLcontext *ctx, struct radeon_state_atom *atom)
1209 {
1210 return atom->cmd_size;
1211 }
1212
1213 static int check_cb(GLcontext *ctx, struct radeon_state_atom *atom)
1214 {
1215 context_t *context = R700_CONTEXT(ctx);
1216 int count = 7;
1217
1218 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1219 count += 11;
1220 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1221
1222 return count;
1223 }
1224
1225 static int check_blnd(GLcontext *ctx, struct radeon_state_atom *atom)
1226 {
1227 context_t *context = R700_CONTEXT(ctx);
1228 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1229 unsigned int ui;
1230 int count = 3;
1231
1232 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1233 count += 3;
1234
1235 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
1236 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
1237 if (r700->render_target[ui].enabled)
1238 count += 3;
1239 }
1240 }
1241 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1242
1243 return count;
1244 }
1245
1246 static int check_ucp(GLcontext *ctx, struct radeon_state_atom *atom)
1247 {
1248 context_t *context = R700_CONTEXT(ctx);
1249 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1250 int i;
1251 int count = 0;
1252
1253 for (i = 0; i < R700_MAX_UCP; i++) {
1254 if (r700->ucp[i].enabled)
1255 count += 6;
1256 }
1257 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1258 return count;
1259 }
1260
1261 static int check_vtx(GLcontext *ctx, struct radeon_state_atom *atom)
1262 {
1263 context_t *context = R700_CONTEXT(ctx);
1264 int count = context->radeon.tcl.aos_count * 18;
1265
1266 if (count)
1267 count += 6;
1268
1269 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1270 return count;
1271 }
1272
1273 static int check_tx(GLcontext *ctx, struct radeon_state_atom *atom)
1274 {
1275 context_t *context = R700_CONTEXT(ctx);
1276 unsigned int i, count = 0;
1277 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1278
1279 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
1280 if (ctx->Texture.Unit[i]._ReallyEnabled) {
1281 radeonTexObj *t = r700->textures[i];
1282 if (t)
1283 count++;
1284 }
1285 }
1286 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1287 return count * 31;
1288 }
1289
1290 static int check_ps_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1291 {
1292 context_t *context = R700_CONTEXT(ctx);
1293 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1294 int count = r700->ps.num_consts * 4;
1295
1296 if (count)
1297 count += 2;
1298 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1299
1300 return count;
1301 }
1302
1303 static int check_vs_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1304 {
1305 context_t *context = R700_CONTEXT(ctx);
1306 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1307 int count = r700->vs.num_consts * 4;
1308
1309 if (count)
1310 count += 2;
1311 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1312
1313 return count;
1314 }
1315
1316 #define ALLOC_STATE( ATOM, CHK, SZ, EMIT ) \
1317 do { \
1318 context->atoms.ATOM.cmd_size = (SZ); \
1319 context->atoms.ATOM.cmd = NULL; \
1320 context->atoms.ATOM.name = #ATOM; \
1321 context->atoms.ATOM.idx = 0; \
1322 context->atoms.ATOM.check = check_##CHK; \
1323 context->atoms.ATOM.dirty = GL_FALSE; \
1324 context->atoms.ATOM.emit = (EMIT); \
1325 context->radeon.hw.max_state_size += (SZ); \
1326 insert_at_tail(&context->radeon.hw.atomlist, &context->atoms.ATOM); \
1327 } while (0)
1328
1329 void r600InitAtoms(context_t *context)
1330 {
1331 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %p\n", __func__, context);
1332 context->radeon.hw.max_state_size = 10 + 5 + 14; /* start 3d, idle, cb/db flush */
1333
1334 /* Setup the atom linked list */
1335 make_empty_list(&context->radeon.hw.atomlist);
1336 context->radeon.hw.atomlist.name = "atom-list";
1337
1338 ALLOC_STATE(sq, always, 34, r700SendSQConfig);
1339 ALLOC_STATE(db, always, 23, r700SendDBState);
1340 ALLOC_STATE(stencil, always, 4, r700SendStencilState);
1341 ALLOC_STATE(db_target, always, 12, r700SendDepthTargetState);
1342 ALLOC_STATE(sc, always, 15, r700SendSCState);
1343 ALLOC_STATE(scissor, always, 22, r700SendScissorState);
1344 ALLOC_STATE(aa, always, 12, r700SendAAState);
1345 ALLOC_STATE(cl, always, 12, r700SendCLState);
1346 ALLOC_STATE(gb, always, 6, r700SendGBState);
1347 ALLOC_STATE(ucp, ucp, (R700_MAX_UCP * 6), r700SendUCPState);
1348 ALLOC_STATE(su, always, 9, r700SendSUState);
1349 ALLOC_STATE(poly, always, 10, r700SendPolyState);
1350 ALLOC_STATE(cb, cb, 18, r700SendCBState);
1351 ALLOC_STATE(clrcmp, always, 6, r700SendCBCLRCMPState);
1352 ALLOC_STATE(blnd, blnd, (6 + (R700_MAX_RENDER_TARGETS * 3)), r700SendCBBlendState);
1353 ALLOC_STATE(blnd_clr, always, 6, r700SendCBBlendColorState);
1354 ALLOC_STATE(cb_target, always, 25, r700SendRenderTargetState);
1355 ALLOC_STATE(sx, always, 9, r700SendSXState);
1356 ALLOC_STATE(vgt, always, 41, r700SendVGTState);
1357 ALLOC_STATE(spi, always, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState);
1358 ALLOC_STATE(vpt, always, 16, r700SendViewportState);
1359 ALLOC_STATE(fs, always, 18, r700SendFSState);
1360 ALLOC_STATE(vs, always, 18, r700SendVSState);
1361 ALLOC_STATE(ps, always, 21, r700SendPSState);
1362 ALLOC_STATE(vs_consts, vs_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendVSConsts);
1363 ALLOC_STATE(ps_consts, ps_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendPSConsts);
1364 ALLOC_STATE(vtx, vtx, (6 + (VERT_ATTRIB_MAX * 18)), r700SendVTXState);
1365 ALLOC_STATE(tx, tx, (R700_TEXTURE_NUMBERUNITS * 20), r700SendTexState);
1366 ALLOC_STATE(tx_smplr, tx, (R700_TEXTURE_NUMBERUNITS * 5), r700SendTexSamplerState);
1367 ALLOC_STATE(tx_brdr_clr, tx, (R700_TEXTURE_NUMBERUNITS * 6), r700SendTexBorderColorState);
1368
1369 context->radeon.hw.is_dirty = GL_TRUE;
1370 context->radeon.hw.all_dirty = GL_TRUE;
1371 }