r600: adjust after radeon mipmap changes in 7118db8700
[mesa.git] / src / mesa / drivers / dri / r600 / r700_chip.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/imports.h"
29 #include "main/glheader.h"
30 #include "main/simple_list.h"
31
32 #include "r600_context.h"
33 #include "r600_cmdbuf.h"
34
35 #include "r700_state.h"
36 #include "r600_tex.h"
37 #include "r700_oglprog.h"
38 #include "r700_fragprog.h"
39 #include "r700_vertprog.h"
40 #include "r700_ioctl.h"
41
42 #include "radeon_mipmap_tree.h"
43
44 static void r700SendTexState(GLcontext *ctx, struct radeon_state_atom *atom)
45 {
46 context_t *context = R700_CONTEXT(ctx);
47 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
48 struct radeon_bo *bo = NULL;
49 unsigned int i;
50 BATCH_LOCALS(&context->radeon);
51
52 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
53
54 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
55 if (ctx->Texture.Unit[i]._ReallyEnabled) {
56 radeonTexObj *t = r700->textures[i];
57 if (t) {
58 if (!t->image_override) {
59 bo = t->mt->bo;
60 } else {
61 bo = t->bo;
62 }
63 if (bo) {
64
65 r700SyncSurf(context, bo,
66 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM,
67 0, TC_ACTION_ENA_bit);
68
69 BEGIN_BATCH_NO_AUTOSTATE(9 + 4);
70 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
71 R600_OUT_BATCH(i * 7);
72 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE0);
73 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE1);
74 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE2);
75 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE3);
76 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE4);
77 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE5);
78 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_RESOURCE6);
79 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE2,
80 bo,
81 r700->textures[i]->SQ_TEX_RESOURCE2,
82 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
83 R600_OUT_BATCH_RELOC(r700->textures[i]->SQ_TEX_RESOURCE3,
84 bo,
85 r700->textures[i]->SQ_TEX_RESOURCE3,
86 RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
87 END_BATCH();
88 COMMIT_BATCH();
89 }
90 }
91 }
92 }
93 }
94
95 static void r700SendTexSamplerState(GLcontext *ctx, struct radeon_state_atom *atom)
96 {
97 context_t *context = R700_CONTEXT(ctx);
98 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
99 unsigned int i;
100 BATCH_LOCALS(&context->radeon);
101 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
102
103 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
104 if (ctx->Texture.Unit[i]._ReallyEnabled) {
105 radeonTexObj *t = r700->textures[i];
106 if (t) {
107 BEGIN_BATCH_NO_AUTOSTATE(5);
108 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, 3));
109 R600_OUT_BATCH(i * 3);
110 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER0);
111 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER1);
112 R600_OUT_BATCH(r700->textures[i]->SQ_TEX_SAMPLER2);
113 END_BATCH();
114 COMMIT_BATCH();
115 }
116 }
117 }
118 }
119
120 static void r700SendTexBorderColorState(GLcontext *ctx, struct radeon_state_atom *atom)
121 {
122 context_t *context = R700_CONTEXT(ctx);
123 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
124 unsigned int i;
125 BATCH_LOCALS(&context->radeon);
126 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
127
128 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
129 if (ctx->Texture.Unit[i]._ReallyEnabled) {
130 radeonTexObj *t = r700->textures[i];
131 if (t) {
132 BEGIN_BATCH_NO_AUTOSTATE(2 + 4);
133 R600_OUT_BATCH_REGSEQ((TD_PS_SAMPLER0_BORDER_RED + (i * 16)), 4);
134 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_RED);
135 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_GREEN);
136 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_BLUE);
137 R600_OUT_BATCH(r700->textures[i]->TD_PS_SAMPLER0_BORDER_ALPHA);
138 END_BATCH();
139 COMMIT_BATCH();
140 }
141 }
142 }
143 }
144
145 extern int getTypeSize(GLenum type);
146 static void r700SetupVTXConstants(GLcontext * ctx,
147 void * pAos,
148 StreamDesc * pStreamDesc)
149 {
150 context_t *context = R700_CONTEXT(ctx);
151 struct radeon_aos * paos = (struct radeon_aos *)pAos;
152 unsigned int nVBsize;
153 BATCH_LOCALS(&context->radeon);
154
155 unsigned int uSQ_VTX_CONSTANT_WORD0_0;
156 unsigned int uSQ_VTX_CONSTANT_WORD1_0;
157 unsigned int uSQ_VTX_CONSTANT_WORD2_0 = 0;
158 unsigned int uSQ_VTX_CONSTANT_WORD3_0 = 0;
159 unsigned int uSQ_VTX_CONSTANT_WORD6_0 = 0;
160
161 if (!paos->bo)
162 return;
163
164 if ((context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV610) ||
165 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV620) ||
166 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS780) ||
167 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RS880) ||
168 (context->radeon.radeonScreen->chip_family == CHIP_FAMILY_RV710))
169 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, TC_ACTION_ENA_bit);
170 else
171 r700SyncSurf(context, paos->bo, RADEON_GEM_DOMAIN_GTT, 0, VC_ACTION_ENA_bit);
172
173 if(0 == pStreamDesc->stride)
174 {
175 nVBsize = paos->count * pStreamDesc->size * getTypeSize(pStreamDesc->type);
176 }
177 else
178 {
179 nVBsize = paos->count * pStreamDesc->stride;
180 }
181
182 uSQ_VTX_CONSTANT_WORD0_0 = paos->offset;
183 uSQ_VTX_CONSTANT_WORD1_0 = nVBsize - 1;
184
185 SETfield(uSQ_VTX_CONSTANT_WORD2_0, 0, BASE_ADDRESS_HI_shift, BASE_ADDRESS_HI_mask); /* TODO */
186 SETfield(uSQ_VTX_CONSTANT_WORD2_0, pStreamDesc->stride, SQ_VTX_CONSTANT_WORD2_0__STRIDE_shift,
187 SQ_VTX_CONSTANT_WORD2_0__STRIDE_mask);
188 SETfield(uSQ_VTX_CONSTANT_WORD2_0, GetSurfaceFormat(pStreamDesc->type, pStreamDesc->size, NULL),
189 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_shift,
190 SQ_VTX_CONSTANT_WORD2_0__DATA_FORMAT_mask); /* TODO : trace back api for initial data type, not only GL_FLOAT */
191
192 if(GL_TRUE == pStreamDesc->normalize)
193 {
194 SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_NORM,
195 SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
196 }
197 //else
198 //{
199 // SETfield(uSQ_VTX_CONSTANT_WORD2_0, SQ_NUM_FORMAT_INT,
200 // SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_shift, SQ_VTX_CONSTANT_WORD2_0__NUM_FORMAT_ALL_mask);
201 //}
202
203 if(1 == pStreamDesc->_signed)
204 {
205 SETbit(uSQ_VTX_CONSTANT_WORD2_0, SQ_VTX_CONSTANT_WORD2_0__FORMAT_COMP_ALL_bit);
206 }
207
208 SETfield(uSQ_VTX_CONSTANT_WORD3_0, 1, MEM_REQUEST_SIZE_shift, MEM_REQUEST_SIZE_mask);
209 SETfield(uSQ_VTX_CONSTANT_WORD6_0, SQ_TEX_VTX_VALID_BUFFER,
210 SQ_TEX_RESOURCE_WORD6_0__TYPE_shift, SQ_TEX_RESOURCE_WORD6_0__TYPE_mask);
211
212 BEGIN_BATCH_NO_AUTOSTATE(9 + 2);
213
214 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
215 R600_OUT_BATCH((pStreamDesc->element + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
216 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD0_0);
217 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
218 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
219 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
220 R600_OUT_BATCH(0);
221 R600_OUT_BATCH(0);
222 R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
223 R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
224 paos->bo,
225 uSQ_VTX_CONSTANT_WORD0_0,
226 RADEON_GEM_DOMAIN_GTT, 0, 0);
227 END_BATCH();
228 COMMIT_BATCH();
229
230 }
231
232 static void r700SendVTXState(GLcontext *ctx, struct radeon_state_atom *atom)
233 {
234 context_t *context = R700_CONTEXT(ctx);
235 struct r700_vertex_program *vp = context->selected_vp;
236 unsigned int i, j = 0;
237 BATCH_LOCALS(&context->radeon);
238 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
239
240 if (context->radeon.tcl.aos_count == 0)
241 return;
242
243 BEGIN_BATCH_NO_AUTOSTATE(6);
244 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
245 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
246 R600_OUT_BATCH(0);
247
248 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
249 R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
250 R600_OUT_BATCH(0);
251 END_BATCH();
252 COMMIT_BATCH();
253
254 for(i=0; i<VERT_ATTRIB_MAX; i++) {
255 if(vp->mesa_program->Base.InputsRead & (1 << i))
256 {
257 r700SetupVTXConstants(ctx,
258 (void*)(&context->radeon.tcl.aos[j]),
259 &(context->stream_desc[j]));
260 j++;
261 }
262 }
263 }
264
265 static void r700SetRenderTarget(context_t *context, int id)
266 {
267 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
268
269 struct radeon_renderbuffer *rrb;
270 unsigned int nPitchInPixel;
271
272 rrb = radeon_get_colorbuffer(&context->radeon);
273 if (!rrb || !rrb->bo) {
274 return;
275 }
276
277 R600_STATECHANGE(context, cb_target);
278
279 /* color buffer */
280 r700->render_target[id].CB_COLOR0_BASE.u32All = context->radeon.state.color.draw_offset;
281
282 nPitchInPixel = rrb->pitch/rrb->cpp;
283 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, (nPitchInPixel/8)-1,
284 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
285 SETfield(r700->render_target[id].CB_COLOR0_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
286 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask);
287 r700->render_target[id].CB_COLOR0_BASE.u32All = 0;
288 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ENDIAN_NONE, ENDIAN_shift, ENDIAN_mask);
289 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, ARRAY_LINEAR_GENERAL,
290 CB_COLOR0_INFO__ARRAY_MODE_shift, CB_COLOR0_INFO__ARRAY_MODE_mask);
291 if(4 == rrb->cpp)
292 {
293 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_8_8_8_8,
294 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
295 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT, COMP_SWAP_shift, COMP_SWAP_mask);
296 }
297 else
298 {
299 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, COLOR_5_6_5,
300 CB_COLOR0_INFO__FORMAT_shift, CB_COLOR0_INFO__FORMAT_mask);
301 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, SWAP_ALT_REV,
302 COMP_SWAP_shift, COMP_SWAP_mask);
303 }
304 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, SOURCE_FORMAT_bit);
305 SETbit(r700->render_target[id].CB_COLOR0_INFO.u32All, BLEND_CLAMP_bit);
306 SETfield(r700->render_target[id].CB_COLOR0_INFO.u32All, NUMBER_UNORM, NUMBER_TYPE_shift, NUMBER_TYPE_mask);
307
308 r700->render_target[id].enabled = GL_TRUE;
309 }
310
311 static void r700SetDepthTarget(context_t *context)
312 {
313 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
314
315 struct radeon_renderbuffer *rrb;
316 unsigned int nPitchInPixel;
317
318 rrb = radeon_get_depthbuffer(&context->radeon);
319 if (!rrb)
320 return;
321
322 R600_STATECHANGE(context, db_target);
323
324 /* depth buf */
325 r700->DB_DEPTH_SIZE.u32All = 0;
326 r700->DB_DEPTH_BASE.u32All = 0;
327 r700->DB_DEPTH_INFO.u32All = 0;
328 r700->DB_DEPTH_VIEW.u32All = 0;
329
330 nPitchInPixel = rrb->pitch/rrb->cpp;
331
332 SETfield(r700->DB_DEPTH_SIZE.u32All, (nPitchInPixel/8)-1,
333 PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
334 SETfield(r700->DB_DEPTH_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
335 SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask); /* size in pixel / 64 - 1 */
336
337 if(4 == rrb->cpp)
338 {
339 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_8_24,
340 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
341 }
342 else
343 {
344 SETfield(r700->DB_DEPTH_INFO.u32All, DEPTH_16,
345 DB_DEPTH_INFO__FORMAT_shift, DB_DEPTH_INFO__FORMAT_mask);
346 }
347 SETfield(r700->DB_DEPTH_INFO.u32All, ARRAY_1D_TILED_THIN1,
348 DB_DEPTH_INFO__ARRAY_MODE_shift, DB_DEPTH_INFO__ARRAY_MODE_mask);
349 /* r700->DB_PREFETCH_LIMIT.bits.DEPTH_HEIGHT_TILE_MAX = (context->currentDraw->h >> 3) - 1; */ /* z buffer sie may much bigger than what need, so use actual used h. */
350 }
351
352 static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
353 {
354 context_t *context = R700_CONTEXT(ctx);
355 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
356 struct radeon_renderbuffer *rrb;
357 BATCH_LOCALS(&context->radeon);
358 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
359
360 rrb = radeon_get_depthbuffer(&context->radeon);
361 if (!rrb || !rrb->bo) {
362 return;
363 }
364
365 r700SetDepthTarget(context);
366
367 BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
368 R600_OUT_BATCH_REGSEQ(DB_DEPTH_SIZE, 2);
369 R600_OUT_BATCH(r700->DB_DEPTH_SIZE.u32All);
370 R600_OUT_BATCH(r700->DB_DEPTH_VIEW.u32All);
371 R600_OUT_BATCH_REGSEQ(DB_DEPTH_BASE, 2);
372 R600_OUT_BATCH(r700->DB_DEPTH_BASE.u32All);
373 R600_OUT_BATCH(r700->DB_DEPTH_INFO.u32All);
374 R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
375 rrb->bo,
376 r700->DB_DEPTH_BASE.u32All,
377 0, RADEON_GEM_DOMAIN_VRAM, 0);
378 END_BATCH();
379
380 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
381 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
382 BEGIN_BATCH_NO_AUTOSTATE(2);
383 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
384 R600_OUT_BATCH(1 << 0);
385 END_BATCH();
386 }
387
388 COMMIT_BATCH();
389
390 }
391
392 static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
393 {
394 context_t *context = R700_CONTEXT(ctx);
395 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
396 struct radeon_renderbuffer *rrb;
397 BATCH_LOCALS(&context->radeon);
398 int id = 0;
399 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
400
401 rrb = radeon_get_colorbuffer(&context->radeon);
402 if (!rrb || !rrb->bo) {
403 return;
404 }
405
406 r700SetRenderTarget(context, 0);
407
408 if (id > R700_MAX_RENDER_TARGETS)
409 return;
410
411 if (!r700->render_target[id].enabled)
412 return;
413
414 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
415 R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
416 R600_OUT_BATCH(r700->render_target[id].CB_COLOR0_BASE.u32All);
417 R600_OUT_BATCH_RELOC(r700->render_target[id].CB_COLOR0_BASE.u32All,
418 rrb->bo,
419 r700->render_target[id].CB_COLOR0_BASE.u32All,
420 0, RADEON_GEM_DOMAIN_VRAM, 0);
421 END_BATCH();
422
423 if ((context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) &&
424 (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)) {
425 BEGIN_BATCH_NO_AUTOSTATE(2);
426 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
427 R600_OUT_BATCH((2 << id));
428 END_BATCH();
429 }
430
431 BEGIN_BATCH_NO_AUTOSTATE(18);
432 R600_OUT_BATCH_REGVAL(CB_COLOR0_SIZE + (4 * id), r700->render_target[id].CB_COLOR0_SIZE.u32All);
433 R600_OUT_BATCH_REGVAL(CB_COLOR0_VIEW + (4 * id), r700->render_target[id].CB_COLOR0_VIEW.u32All);
434 R600_OUT_BATCH_REGVAL(CB_COLOR0_INFO + (4 * id), r700->render_target[id].CB_COLOR0_INFO.u32All);
435 R600_OUT_BATCH_REGVAL(CB_COLOR0_TILE + (4 * id), r700->render_target[id].CB_COLOR0_TILE.u32All);
436 R600_OUT_BATCH_REGVAL(CB_COLOR0_FRAG + (4 * id), r700->render_target[id].CB_COLOR0_FRAG.u32All);
437 R600_OUT_BATCH_REGVAL(CB_COLOR0_MASK + (4 * id), r700->render_target[id].CB_COLOR0_MASK.u32All);
438 END_BATCH();
439
440 COMMIT_BATCH();
441
442 }
443
444 static void r700SendPSState(GLcontext *ctx, struct radeon_state_atom *atom)
445 {
446 context_t *context = R700_CONTEXT(ctx);
447 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
448 struct radeon_bo * pbo;
449 BATCH_LOCALS(&context->radeon);
450 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
451
452 pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));
453
454 if (!pbo)
455 return;
456
457 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
458
459 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
460 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_PS, 1);
461 R600_OUT_BATCH(r700->ps.SQ_PGM_START_PS.u32All);
462 R600_OUT_BATCH_RELOC(r700->ps.SQ_PGM_START_PS.u32All,
463 pbo,
464 r700->ps.SQ_PGM_START_PS.u32All,
465 RADEON_GEM_DOMAIN_GTT, 0, 0);
466 END_BATCH();
467
468 BEGIN_BATCH_NO_AUTOSTATE(9);
469 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_PS, r700->ps.SQ_PGM_RESOURCES_PS.u32All);
470 R600_OUT_BATCH_REGVAL(SQ_PGM_EXPORTS_PS, r700->ps.SQ_PGM_EXPORTS_PS.u32All);
471 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_PS, r700->ps.SQ_PGM_CF_OFFSET_PS.u32All);
472 END_BATCH();
473
474 COMMIT_BATCH();
475
476 }
477
478 static void r700SendVSState(GLcontext *ctx, struct radeon_state_atom *atom)
479 {
480 context_t *context = R700_CONTEXT(ctx);
481 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
482 struct radeon_bo * pbo;
483 BATCH_LOCALS(&context->radeon);
484 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
485
486 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
487
488 if (!pbo)
489 return;
490
491 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
492
493 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
494 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_VS, 1);
495 R600_OUT_BATCH(r700->vs.SQ_PGM_START_VS.u32All);
496 R600_OUT_BATCH_RELOC(r700->vs.SQ_PGM_START_VS.u32All,
497 pbo,
498 r700->vs.SQ_PGM_START_VS.u32All,
499 RADEON_GEM_DOMAIN_GTT, 0, 0);
500 END_BATCH();
501
502 BEGIN_BATCH_NO_AUTOSTATE(6);
503 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_VS, r700->vs.SQ_PGM_RESOURCES_VS.u32All);
504 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_VS, r700->vs.SQ_PGM_CF_OFFSET_VS.u32All);
505 END_BATCH();
506
507 COMMIT_BATCH();
508 }
509
510 static void r700SendFSState(GLcontext *ctx, struct radeon_state_atom *atom)
511 {
512 context_t *context = R700_CONTEXT(ctx);
513 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
514 struct radeon_bo * pbo;
515 BATCH_LOCALS(&context->radeon);
516 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
517
518 /* XXX fixme
519 * R6xx chips require a FS be emitted, even if it's not used.
520 * since we aren't using FS yet, just send the VS address to make
521 * the kernel command checker happy
522 */
523 pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
524 r700->fs.SQ_PGM_START_FS.u32All = r700->vs.SQ_PGM_START_VS.u32All;
525 r700->fs.SQ_PGM_RESOURCES_FS.u32All = 0;
526 r700->fs.SQ_PGM_CF_OFFSET_FS.u32All = 0;
527 /* XXX */
528
529 if (!pbo)
530 return;
531
532 r700SyncSurf(context, pbo, RADEON_GEM_DOMAIN_GTT, 0, SH_ACTION_ENA_bit);
533
534 BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
535 R600_OUT_BATCH_REGSEQ(SQ_PGM_START_FS, 1);
536 R600_OUT_BATCH(r700->fs.SQ_PGM_START_FS.u32All);
537 R600_OUT_BATCH_RELOC(r700->fs.SQ_PGM_START_FS.u32All,
538 pbo,
539 r700->fs.SQ_PGM_START_FS.u32All,
540 RADEON_GEM_DOMAIN_GTT, 0, 0);
541 END_BATCH();
542
543 BEGIN_BATCH_NO_AUTOSTATE(6);
544 R600_OUT_BATCH_REGVAL(SQ_PGM_RESOURCES_FS, r700->fs.SQ_PGM_RESOURCES_FS.u32All);
545 R600_OUT_BATCH_REGVAL(SQ_PGM_CF_OFFSET_FS, r700->fs.SQ_PGM_CF_OFFSET_FS.u32All);
546 END_BATCH();
547
548 COMMIT_BATCH();
549
550 }
551
552 static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
553 {
554 context_t *context = R700_CONTEXT(ctx);
555 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
556 BATCH_LOCALS(&context->radeon);
557 int id = 0;
558 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
559
560 if (id > R700_MAX_VIEWPORTS)
561 return;
562
563 if (!r700->viewport[id].enabled)
564 return;
565
566 BEGIN_BATCH_NO_AUTOSTATE(16);
567 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
568 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
569 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
570 R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
571 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
572 R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
573 R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
574 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
575 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
576 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
577 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
578 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
579 R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
580 END_BATCH();
581
582 COMMIT_BATCH();
583
584 }
585
586 static void r700SendSQConfig(GLcontext *ctx, struct radeon_state_atom *atom)
587 {
588 context_t *context = R700_CONTEXT(ctx);
589 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
590 BATCH_LOCALS(&context->radeon);
591 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
592
593 BEGIN_BATCH_NO_AUTOSTATE(34);
594 R600_OUT_BATCH_REGSEQ(SQ_CONFIG, 6);
595 R600_OUT_BATCH(r700->sq_config.SQ_CONFIG.u32All);
596 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_1.u32All);
597 R600_OUT_BATCH(r700->sq_config.SQ_GPR_RESOURCE_MGMT_2.u32All);
598 R600_OUT_BATCH(r700->sq_config.SQ_THREAD_RESOURCE_MGMT.u32All);
599 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_1.u32All);
600 R600_OUT_BATCH(r700->sq_config.SQ_STACK_RESOURCE_MGMT_2.u32All);
601
602 R600_OUT_BATCH_REGVAL(TA_CNTL_AUX, r700->TA_CNTL_AUX.u32All);
603 R600_OUT_BATCH_REGVAL(VC_ENHANCE, r700->VC_ENHANCE.u32All);
604 R600_OUT_BATCH_REGVAL(R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, r700->SQ_DYN_GPR_CNTL_PS_FLUSH_REQ.u32All);
605 R600_OUT_BATCH_REGVAL(DB_DEBUG, r700->DB_DEBUG.u32All);
606 R600_OUT_BATCH_REGVAL(DB_WATERMARKS, r700->DB_WATERMARKS.u32All);
607
608 R600_OUT_BATCH_REGSEQ(SQ_ESGS_RING_ITEMSIZE, 9);
609 R600_OUT_BATCH(r700->SQ_ESGS_RING_ITEMSIZE.u32All);
610 R600_OUT_BATCH(r700->SQ_GSVS_RING_ITEMSIZE.u32All);
611 R600_OUT_BATCH(r700->SQ_ESTMP_RING_ITEMSIZE.u32All);
612 R600_OUT_BATCH(r700->SQ_GSTMP_RING_ITEMSIZE.u32All);
613 R600_OUT_BATCH(r700->SQ_VSTMP_RING_ITEMSIZE.u32All);
614 R600_OUT_BATCH(r700->SQ_PSTMP_RING_ITEMSIZE.u32All);
615 R600_OUT_BATCH(r700->SQ_FBUF_RING_ITEMSIZE.u32All);
616 R600_OUT_BATCH(r700->SQ_REDUC_RING_ITEMSIZE.u32All);
617 R600_OUT_BATCH(r700->SQ_GS_VERT_ITEMSIZE.u32All);
618 END_BATCH();
619
620 COMMIT_BATCH();
621 }
622
623 static void r700SendUCPState(GLcontext *ctx, struct radeon_state_atom *atom)
624 {
625 context_t *context = R700_CONTEXT(ctx);
626 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
627 BATCH_LOCALS(&context->radeon);
628 int i;
629 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
630
631 for (i = 0; i < R700_MAX_UCP; i++) {
632 if (r700->ucp[i].enabled) {
633 BEGIN_BATCH_NO_AUTOSTATE(6);
634 R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
635 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
636 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Y.u32All);
637 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_Z.u32All);
638 R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
639 END_BATCH();
640 COMMIT_BATCH();
641 }
642 }
643 }
644
645 static void r700SendSPIState(GLcontext *ctx, struct radeon_state_atom *atom)
646 {
647 context_t *context = R700_CONTEXT(ctx);
648 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
649 BATCH_LOCALS(&context->radeon);
650 unsigned int ui;
651 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
652
653 BEGIN_BATCH_NO_AUTOSTATE(59 + R700_MAX_SHADER_EXPORTS);
654
655 R600_OUT_BATCH_REGSEQ(SQ_VTX_SEMANTIC_0, 32);
656 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_0.u32All);
657 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_1.u32All);
658 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_2.u32All);
659 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_3.u32All);
660 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_4.u32All);
661 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_5.u32All);
662 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_6.u32All);
663 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_7.u32All);
664 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_8.u32All);
665 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_9.u32All);
666 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_10.u32All);
667 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_11.u32All);
668 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_12.u32All);
669 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_13.u32All);
670 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_14.u32All);
671 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_15.u32All);
672 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_16.u32All);
673 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_17.u32All);
674 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_18.u32All);
675 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_19.u32All);
676 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_20.u32All);
677 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_21.u32All);
678 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_22.u32All);
679 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_23.u32All);
680 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_24.u32All);
681 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_25.u32All);
682 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_26.u32All);
683 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_27.u32All);
684 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_28.u32All);
685 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_29.u32All);
686 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_30.u32All);
687 R600_OUT_BATCH(r700->SQ_VTX_SEMANTIC_31.u32All);
688
689 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_ID_0, 10);
690 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_0.u32All);
691 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_1.u32All);
692 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_2.u32All);
693 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_3.u32All);
694 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_4.u32All);
695 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_5.u32All);
696 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_6.u32All);
697 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_7.u32All);
698 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_8.u32All);
699 R600_OUT_BATCH(r700->SPI_VS_OUT_ID_9.u32All);
700
701 R600_OUT_BATCH_REGSEQ(SPI_VS_OUT_CONFIG, 9);
702 R600_OUT_BATCH(r700->SPI_VS_OUT_CONFIG.u32All);
703 R600_OUT_BATCH(r700->SPI_THREAD_GROUPING.u32All);
704 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_0.u32All);
705 R600_OUT_BATCH(r700->SPI_PS_IN_CONTROL_1.u32All);
706 R600_OUT_BATCH(r700->SPI_INTERP_CONTROL_0.u32All);
707 R600_OUT_BATCH(r700->SPI_INPUT_Z.u32All);
708 R600_OUT_BATCH(r700->SPI_FOG_CNTL.u32All);
709 R600_OUT_BATCH(r700->SPI_FOG_FUNC_SCALE.u32All);
710 R600_OUT_BATCH(r700->SPI_FOG_FUNC_BIAS.u32All);
711
712 R600_OUT_BATCH_REGSEQ(SPI_PS_INPUT_CNTL_0, R700_MAX_SHADER_EXPORTS);
713 for(ui = 0; ui < R700_MAX_SHADER_EXPORTS; ui++)
714 R600_OUT_BATCH(r700->SPI_PS_INPUT_CNTL[ui].u32All);
715
716 END_BATCH();
717 COMMIT_BATCH();
718 }
719
720 static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom)
721 {
722 context_t *context = R700_CONTEXT(ctx);
723 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
724 BATCH_LOCALS(&context->radeon);
725 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
726
727 BEGIN_BATCH_NO_AUTOSTATE(41);
728
729 R600_OUT_BATCH_REGSEQ(VGT_MAX_VTX_INDX, 4);
730 R600_OUT_BATCH(r700->VGT_MAX_VTX_INDX.u32All);
731 R600_OUT_BATCH(r700->VGT_MIN_VTX_INDX.u32All);
732 R600_OUT_BATCH(r700->VGT_INDX_OFFSET.u32All);
733 R600_OUT_BATCH(r700->VGT_MULTI_PRIM_IB_RESET_INDX.u32All);
734
735 R600_OUT_BATCH_REGSEQ(VGT_OUTPUT_PATH_CNTL, 13);
736 R600_OUT_BATCH(r700->VGT_OUTPUT_PATH_CNTL.u32All);
737 R600_OUT_BATCH(r700->VGT_HOS_CNTL.u32All);
738 R600_OUT_BATCH(r700->VGT_HOS_MAX_TESS_LEVEL.u32All);
739 R600_OUT_BATCH(r700->VGT_HOS_MIN_TESS_LEVEL.u32All);
740 R600_OUT_BATCH(r700->VGT_HOS_REUSE_DEPTH.u32All);
741 R600_OUT_BATCH(r700->VGT_GROUP_PRIM_TYPE.u32All);
742 R600_OUT_BATCH(r700->VGT_GROUP_FIRST_DECR.u32All);
743 R600_OUT_BATCH(r700->VGT_GROUP_DECR.u32All);
744 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_CNTL.u32All);
745 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_CNTL.u32All);
746 R600_OUT_BATCH(r700->VGT_GROUP_VECT_0_FMT_CNTL.u32All);
747 R600_OUT_BATCH(r700->VGT_GROUP_VECT_1_FMT_CNTL.u32All);
748 R600_OUT_BATCH(r700->VGT_GS_MODE.u32All);
749
750 R600_OUT_BATCH_REGVAL(VGT_PRIMITIVEID_EN, r700->VGT_PRIMITIVEID_EN.u32All);
751 R600_OUT_BATCH_REGVAL(VGT_MULTI_PRIM_IB_RESET_EN, r700->VGT_MULTI_PRIM_IB_RESET_EN.u32All);
752 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_0, r700->VGT_INSTANCE_STEP_RATE_0.u32All);
753 R600_OUT_BATCH_REGVAL(VGT_INSTANCE_STEP_RATE_1, r700->VGT_INSTANCE_STEP_RATE_1.u32All);
754
755 R600_OUT_BATCH_REGSEQ(VGT_STRMOUT_EN, 3);
756 R600_OUT_BATCH(r700->VGT_STRMOUT_EN.u32All);
757 R600_OUT_BATCH(r700->VGT_REUSE_OFF.u32All);
758 R600_OUT_BATCH(r700->VGT_VTX_CNT_EN.u32All);
759
760 R600_OUT_BATCH_REGVAL(VGT_STRMOUT_BUFFER_EN, r700->VGT_STRMOUT_BUFFER_EN.u32All);
761
762 END_BATCH();
763 COMMIT_BATCH();
764 }
765
766 static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom)
767 {
768 context_t *context = R700_CONTEXT(ctx);
769 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
770 BATCH_LOCALS(&context->radeon);
771 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
772
773 BEGIN_BATCH_NO_AUTOSTATE(9);
774 R600_OUT_BATCH_REGVAL(SX_MISC, r700->SX_MISC.u32All);
775 R600_OUT_BATCH_REGVAL(SX_ALPHA_TEST_CONTROL, r700->SX_ALPHA_TEST_CONTROL.u32All);
776 R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
777 END_BATCH();
778 COMMIT_BATCH();
779 }
780
781 static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
782 {
783 context_t *context = R700_CONTEXT(ctx);
784 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
785 BATCH_LOCALS(&context->radeon);
786 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
787
788 BEGIN_BATCH_NO_AUTOSTATE(17);
789
790 R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
791 R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
792 R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);
793
794 R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
795 R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);
796
797 R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
798 R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
799 R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);
800
801 R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);
802
803 END_BATCH();
804 COMMIT_BATCH();
805 }
806
807 static void r700SendStencilState(GLcontext *ctx, struct radeon_state_atom *atom)
808 {
809 context_t *context = R700_CONTEXT(ctx);
810 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
811 BATCH_LOCALS(&context->radeon);
812
813 BEGIN_BATCH_NO_AUTOSTATE(4);
814 R600_OUT_BATCH_REGSEQ(DB_STENCILREFMASK, 2);
815 R600_OUT_BATCH(r700->DB_STENCILREFMASK.u32All);
816 R600_OUT_BATCH(r700->DB_STENCILREFMASK_BF.u32All);
817 END_BATCH();
818 COMMIT_BATCH();
819 }
820
821 static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
822 {
823 context_t *context = R700_CONTEXT(ctx);
824 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
825 BATCH_LOCALS(&context->radeon);
826 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
827
828 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
829 BEGIN_BATCH_NO_AUTOSTATE(11);
830 R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
831 R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
832 R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
833 R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
834 R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
835 R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
836 R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
837 R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
838 R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
839 END_BATCH();
840 }
841
842 BEGIN_BATCH_NO_AUTOSTATE(7);
843 R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
844 R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
845 R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
846 R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
847 END_BATCH();
848 COMMIT_BATCH();
849 }
850
851 static void r700SendCBCLRCMPState(GLcontext *ctx, struct radeon_state_atom *atom)
852 {
853 context_t *context = R700_CONTEXT(ctx);
854 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
855 BATCH_LOCALS(&context->radeon);
856
857 BEGIN_BATCH_NO_AUTOSTATE(6);
858 R600_OUT_BATCH_REGSEQ(CB_CLRCMP_CONTROL, 4);
859 R600_OUT_BATCH(r700->CB_CLRCMP_CONTROL.u32All);
860 R600_OUT_BATCH(r700->CB_CLRCMP_SRC.u32All);
861 R600_OUT_BATCH(r700->CB_CLRCMP_DST.u32All);
862 R600_OUT_BATCH(r700->CB_CLRCMP_MSK.u32All);
863 END_BATCH();
864 COMMIT_BATCH();
865 }
866
867 static void r700SendCBBlendState(GLcontext *ctx, struct radeon_state_atom *atom)
868 {
869 context_t *context = R700_CONTEXT(ctx);
870 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
871 BATCH_LOCALS(&context->radeon);
872 unsigned int ui;
873 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
874
875 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
876 BEGIN_BATCH_NO_AUTOSTATE(3);
877 R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
878 END_BATCH();
879 }
880
881 BEGIN_BATCH_NO_AUTOSTATE(3);
882 R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
883 END_BATCH();
884
885 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
886 for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
887 if (r700->render_target[ui].enabled) {
888 BEGIN_BATCH_NO_AUTOSTATE(3);
889 R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
890 r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
891 END_BATCH();
892 }
893 }
894 }
895
896 COMMIT_BATCH();
897 }
898
899 static void r700SendCBBlendColorState(GLcontext *ctx, struct radeon_state_atom *atom)
900 {
901 context_t *context = R700_CONTEXT(ctx);
902 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
903 BATCH_LOCALS(&context->radeon);
904 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
905
906 BEGIN_BATCH_NO_AUTOSTATE(6);
907 R600_OUT_BATCH_REGSEQ(CB_BLEND_RED, 4);
908 R600_OUT_BATCH(r700->CB_BLEND_RED.u32All);
909 R600_OUT_BATCH(r700->CB_BLEND_GREEN.u32All);
910 R600_OUT_BATCH(r700->CB_BLEND_BLUE.u32All);
911 R600_OUT_BATCH(r700->CB_BLEND_ALPHA.u32All);
912 END_BATCH();
913 COMMIT_BATCH();
914 }
915
916 static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom)
917 {
918 context_t *context = R700_CONTEXT(ctx);
919 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
920 BATCH_LOCALS(&context->radeon);
921
922 BEGIN_BATCH_NO_AUTOSTATE(9);
923 R600_OUT_BATCH_REGVAL(PA_SU_SC_MODE_CNTL, r700->PA_SU_SC_MODE_CNTL.u32All);
924 R600_OUT_BATCH_REGSEQ(PA_SU_POINT_SIZE, 4);
925 R600_OUT_BATCH(r700->PA_SU_POINT_SIZE.u32All);
926 R600_OUT_BATCH(r700->PA_SU_POINT_MINMAX.u32All);
927 R600_OUT_BATCH(r700->PA_SU_LINE_CNTL.u32All);
928 R600_OUT_BATCH(r700->PA_SU_VTX_CNTL.u32All);
929 END_BATCH();
930 COMMIT_BATCH();
931
932 }
933
934 static void r700SendPolyState(GLcontext *ctx, struct radeon_state_atom *atom)
935 {
936 context_t *context = R700_CONTEXT(ctx);
937 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
938 BATCH_LOCALS(&context->radeon);
939
940 BEGIN_BATCH_NO_AUTOSTATE(10);
941 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2);
942 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_DB_FMT_CNTL.u32All);
943 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_CLAMP.u32All);
944 R600_OUT_BATCH_REGSEQ(PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
945 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_SCALE.u32All);
946 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.u32All);
947 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_SCALE.u32All);
948 R600_OUT_BATCH(r700->PA_SU_POLY_OFFSET_BACK_OFFSET.u32All);
949 END_BATCH();
950 COMMIT_BATCH();
951
952 }
953
954 static void r700SendCLState(GLcontext *ctx, struct radeon_state_atom *atom)
955 {
956 context_t *context = R700_CONTEXT(ctx);
957 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
958 BATCH_LOCALS(&context->radeon);
959 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
960
961 BEGIN_BATCH_NO_AUTOSTATE(12);
962 R600_OUT_BATCH_REGVAL(PA_CL_CLIP_CNTL, r700->PA_CL_CLIP_CNTL.u32All);
963 R600_OUT_BATCH_REGVAL(PA_CL_VTE_CNTL, r700->PA_CL_VTE_CNTL.u32All);
964 R600_OUT_BATCH_REGVAL(PA_CL_VS_OUT_CNTL, r700->PA_CL_VS_OUT_CNTL.u32All);
965 R600_OUT_BATCH_REGVAL(PA_CL_NANINF_CNTL, r700->PA_CL_NANINF_CNTL.u32All);
966 END_BATCH();
967 COMMIT_BATCH();
968 }
969
970 static void r700SendGBState(GLcontext *ctx, struct radeon_state_atom *atom)
971 {
972 context_t *context = R700_CONTEXT(ctx);
973 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
974 BATCH_LOCALS(&context->radeon);
975
976 BEGIN_BATCH_NO_AUTOSTATE(6);
977 R600_OUT_BATCH_REGSEQ(PA_CL_GB_VERT_CLIP_ADJ, 4);
978 R600_OUT_BATCH(r700->PA_CL_GB_VERT_CLIP_ADJ.u32All);
979 R600_OUT_BATCH(r700->PA_CL_GB_VERT_DISC_ADJ.u32All);
980 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_CLIP_ADJ.u32All);
981 R600_OUT_BATCH(r700->PA_CL_GB_HORZ_DISC_ADJ.u32All);
982 END_BATCH();
983 COMMIT_BATCH();
984 }
985
986 static void r700SendScissorState(GLcontext *ctx, struct radeon_state_atom *atom)
987 {
988 context_t *context = R700_CONTEXT(ctx);
989 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
990 BATCH_LOCALS(&context->radeon);
991 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
992
993 BEGIN_BATCH_NO_AUTOSTATE(22);
994 R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
995 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
996 R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);
997
998 R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12);
999 R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
1000 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
1001 R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
1002 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
1003 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
1004 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
1005 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
1006 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
1007 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
1008 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
1009 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
1010 R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);
1011
1012 R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
1013 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
1014 R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
1015 END_BATCH();
1016 COMMIT_BATCH();
1017 }
1018
1019 static void r700SendSCState(GLcontext *ctx, struct radeon_state_atom *atom)
1020 {
1021 context_t *context = R700_CONTEXT(ctx);
1022 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1023 BATCH_LOCALS(&context->radeon);
1024 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1025
1026 BEGIN_BATCH_NO_AUTOSTATE(15);
1027 R600_OUT_BATCH_REGVAL(R7xx_PA_SC_EDGERULE, r700->PA_SC_EDGERULE.u32All);
1028 R600_OUT_BATCH_REGVAL(PA_SC_LINE_STIPPLE, r700->PA_SC_LINE_STIPPLE.u32All);
1029 R600_OUT_BATCH_REGVAL(PA_SC_MPASS_PS_CNTL, r700->PA_SC_MPASS_PS_CNTL.u32All);
1030 R600_OUT_BATCH_REGVAL(PA_SC_MODE_CNTL, r700->PA_SC_MODE_CNTL.u32All);
1031 R600_OUT_BATCH_REGVAL(PA_SC_LINE_CNTL, r700->PA_SC_LINE_CNTL.u32All);
1032 END_BATCH();
1033 COMMIT_BATCH();
1034 }
1035
1036 static void r700SendAAState(GLcontext *ctx, struct radeon_state_atom *atom)
1037 {
1038 context_t *context = R700_CONTEXT(ctx);
1039 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1040 BATCH_LOCALS(&context->radeon);
1041
1042 BEGIN_BATCH_NO_AUTOSTATE(12);
1043 R600_OUT_BATCH_REGVAL(PA_SC_AA_CONFIG, r700->PA_SC_AA_CONFIG.u32All);
1044 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_MCTX.u32All);
1045 R600_OUT_BATCH_REGVAL(PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX, r700->PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX.u32All);
1046 R600_OUT_BATCH_REGVAL(PA_SC_AA_MASK, r700->PA_SC_AA_MASK.u32All);
1047 END_BATCH();
1048 COMMIT_BATCH();
1049 }
1050
1051 static void r700SendPSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1052 {
1053 context_t *context = R700_CONTEXT(ctx);
1054 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1055 int i;
1056 BATCH_LOCALS(&context->radeon);
1057
1058 if (r700->ps.num_consts == 0)
1059 return;
1060
1061 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->ps.num_consts * 4));
1062 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->ps.num_consts * 4)));
1063 /* assembler map const from very beginning. */
1064 R600_OUT_BATCH(SQ_ALU_CONSTANT_PS_OFFSET * 4);
1065 for (i = 0; i < r700->ps.num_consts; i++) {
1066 R600_OUT_BATCH(r700->ps.consts[i][0].u32All);
1067 R600_OUT_BATCH(r700->ps.consts[i][1].u32All);
1068 R600_OUT_BATCH(r700->ps.consts[i][2].u32All);
1069 R600_OUT_BATCH(r700->ps.consts[i][3].u32All);
1070 }
1071 END_BATCH();
1072 COMMIT_BATCH();
1073 }
1074
1075 static void r700SendVSConsts(GLcontext *ctx, struct radeon_state_atom *atom)
1076 {
1077 context_t *context = R700_CONTEXT(ctx);
1078 R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
1079 int i;
1080 BATCH_LOCALS(&context->radeon);
1081 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1082
1083 if (r700->vs.num_consts == 0)
1084 return;
1085
1086 BEGIN_BATCH_NO_AUTOSTATE(2 + (r700->vs.num_consts * 4));
1087 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (r700->vs.num_consts * 4)));
1088 /* assembler map const from very beginning. */
1089 R600_OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
1090 for (i = 0; i < r700->vs.num_consts; i++) {
1091 R600_OUT_BATCH(r700->vs.consts[i][0].u32All);
1092 R600_OUT_BATCH(r700->vs.consts[i][1].u32All);
1093 R600_OUT_BATCH(r700->vs.consts[i][2].u32All);
1094 R600_OUT_BATCH(r700->vs.consts[i][3].u32All);
1095 }
1096 END_BATCH();
1097 COMMIT_BATCH();
1098 }
1099
1100 static void r700SendQueryBegin(GLcontext *ctx, struct radeon_state_atom *atom)
1101 {
1102 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1103 struct radeon_query_object *query = radeon->query.current;
1104 BATCH_LOCALS(radeon);
1105 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);
1106
1107 /* clear the buffer */
1108 radeon_bo_map(query->bo, GL_FALSE);
1109 memset(query->bo->ptr, 0, 4 * 2 * sizeof(uint64_t)); /* 4 DBs, 2 qwords each */
1110 radeon_bo_unmap(query->bo);
1111
1112 radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
1113 query->bo,
1114 0, RADEON_GEM_DOMAIN_GTT);
1115
1116 BEGIN_BATCH_NO_AUTOSTATE(4 + 2);
1117 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 2));
1118 R600_OUT_BATCH(ZPASS_DONE);
1119 R600_OUT_BATCH(query->curr_offset); /* hw writes qwords */
1120 R600_OUT_BATCH(0x00000000);
1121 R600_OUT_BATCH_RELOC(VGT_EVENT_INITIATOR, query->bo, 0, 0, RADEON_GEM_DOMAIN_GTT, 0);
1122 END_BATCH();
1123 query->emitted_begin = GL_TRUE;
1124 }
1125
1126 static int check_always(GLcontext *ctx, struct radeon_state_atom *atom)
1127 {
1128 return atom->cmd_size;
1129 }
1130
1131 static int check_cb(GLcontext *ctx, struct radeon_state_atom *atom)
1132 {
1133 context_t *context = R700_CONTEXT(ctx);
1134 int count = 7;
1135
1136 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1137 count += 11;
1138 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1139
1140 return count;
1141 }
1142
1143 static int check_blnd(GLcontext *ctx, struct radeon_state_atom *atom)
1144 {
1145 context_t *context = R700_CONTEXT(ctx);
1146 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1147 unsigned int ui;
1148 int count = 3;
1149
1150 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
1151 count += 3;
1152
1153 if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
1154 /* targets are enabled in r700SetRenderTarget but state
1155 size is calculated before that. Until MRT's are done
1156 hardcode target0 as enabled. */
1157 count += 3;
1158 for (ui = 1; ui < R700_MAX_RENDER_TARGETS; ui++) {
1159 if (r700->render_target[ui].enabled)
1160 count += 3;
1161 }
1162 }
1163 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1164
1165 return count;
1166 }
1167
1168 static int check_ucp(GLcontext *ctx, struct radeon_state_atom *atom)
1169 {
1170 context_t *context = R700_CONTEXT(ctx);
1171 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1172 int i;
1173 int count = 0;
1174
1175 for (i = 0; i < R700_MAX_UCP; i++) {
1176 if (r700->ucp[i].enabled)
1177 count += 6;
1178 }
1179 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1180 return count;
1181 }
1182
1183 static int check_vtx(GLcontext *ctx, struct radeon_state_atom *atom)
1184 {
1185 context_t *context = R700_CONTEXT(ctx);
1186 int count = context->radeon.tcl.aos_count * 18;
1187
1188 if (count)
1189 count += 6;
1190
1191 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1192 return count;
1193 }
1194
1195 static int check_tx(GLcontext *ctx, struct radeon_state_atom *atom)
1196 {
1197 context_t *context = R700_CONTEXT(ctx);
1198 unsigned int i, count = 0;
1199 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1200
1201 for (i = 0; i < R700_TEXTURE_NUMBERUNITS; i++) {
1202 if (ctx->Texture.Unit[i]._ReallyEnabled) {
1203 radeonTexObj *t = r700->textures[i];
1204 if (t)
1205 count++;
1206 }
1207 }
1208 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1209 return count * 31;
1210 }
1211
1212 static int check_ps_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1213 {
1214 context_t *context = R700_CONTEXT(ctx);
1215 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1216 int count = r700->ps.num_consts * 4;
1217
1218 if (count)
1219 count += 2;
1220 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1221
1222 return count;
1223 }
1224
1225 static int check_vs_consts(GLcontext *ctx, struct radeon_state_atom *atom)
1226 {
1227 context_t *context = R700_CONTEXT(ctx);
1228 R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
1229 int count = r700->vs.num_consts * 4;
1230
1231 if (count)
1232 count += 2;
1233 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1234
1235 return count;
1236 }
1237
1238 static int check_queryobj(GLcontext *ctx, struct radeon_state_atom *atom)
1239 {
1240 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1241 struct radeon_query_object *query = radeon->query.current;
1242 int count;
1243
1244 if (!query || query->emitted_begin)
1245 count = 0;
1246 else
1247 count = atom->cmd_size;
1248 radeon_print(RADEON_STATE, RADEON_TRACE, "%s %d\n", __func__, count);
1249 return count;
1250 }
1251
1252 #define ALLOC_STATE( ATOM, CHK, SZ, EMIT ) \
1253 do { \
1254 context->atoms.ATOM.cmd_size = (SZ); \
1255 context->atoms.ATOM.cmd = NULL; \
1256 context->atoms.ATOM.name = #ATOM; \
1257 context->atoms.ATOM.idx = 0; \
1258 context->atoms.ATOM.check = check_##CHK; \
1259 context->atoms.ATOM.dirty = GL_FALSE; \
1260 context->atoms.ATOM.emit = (EMIT); \
1261 context->radeon.hw.max_state_size += (SZ); \
1262 insert_at_tail(&context->radeon.hw.atomlist, &context->atoms.ATOM); \
1263 } while (0)
1264
1265 static void r600_init_query_stateobj(radeonContextPtr radeon, int SZ)
1266 {
1267 radeon->query.queryobj.cmd_size = (SZ);
1268 radeon->query.queryobj.cmd = NULL;
1269 radeon->query.queryobj.name = "queryobj";
1270 radeon->query.queryobj.idx = 0;
1271 radeon->query.queryobj.check = check_queryobj;
1272 radeon->query.queryobj.dirty = GL_FALSE;
1273 radeon->query.queryobj.emit = r700SendQueryBegin;
1274 radeon->hw.max_state_size += (SZ);
1275 insert_at_tail(&radeon->hw.atomlist, &radeon->query.queryobj);
1276 }
1277
1278 void r600InitAtoms(context_t *context)
1279 {
1280 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %p\n", __func__, context);
1281 context->radeon.hw.max_state_size = 10 + 5 + 14; /* start 3d, idle, cb/db flush */
1282
1283 /* Setup the atom linked list */
1284 make_empty_list(&context->radeon.hw.atomlist);
1285 context->radeon.hw.atomlist.name = "atom-list";
1286
1287 ALLOC_STATE(sq, always, 34, r700SendSQConfig);
1288 ALLOC_STATE(db, always, 17, r700SendDBState);
1289 ALLOC_STATE(stencil, always, 4, r700SendStencilState);
1290 ALLOC_STATE(db_target, always, 12, r700SendDepthTargetState);
1291 ALLOC_STATE(sc, always, 15, r700SendSCState);
1292 ALLOC_STATE(scissor, always, 22, r700SendScissorState);
1293 ALLOC_STATE(aa, always, 12, r700SendAAState);
1294 ALLOC_STATE(cl, always, 12, r700SendCLState);
1295 ALLOC_STATE(gb, always, 6, r700SendGBState);
1296 ALLOC_STATE(ucp, ucp, (R700_MAX_UCP * 6), r700SendUCPState);
1297 ALLOC_STATE(su, always, 9, r700SendSUState);
1298 ALLOC_STATE(poly, always, 10, r700SendPolyState);
1299 ALLOC_STATE(cb, cb, 18, r700SendCBState);
1300 ALLOC_STATE(clrcmp, always, 6, r700SendCBCLRCMPState);
1301 ALLOC_STATE(cb_target, always, 25, r700SendRenderTargetState);
1302 ALLOC_STATE(blnd, blnd, (6 + (R700_MAX_RENDER_TARGETS * 3)), r700SendCBBlendState);
1303 ALLOC_STATE(blnd_clr, always, 6, r700SendCBBlendColorState);
1304 ALLOC_STATE(sx, always, 9, r700SendSXState);
1305 ALLOC_STATE(vgt, always, 41, r700SendVGTState);
1306 ALLOC_STATE(spi, always, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState);
1307 ALLOC_STATE(vpt, always, 16, r700SendViewportState);
1308 ALLOC_STATE(fs, always, 18, r700SendFSState);
1309 ALLOC_STATE(vs, always, 18, r700SendVSState);
1310 ALLOC_STATE(ps, always, 21, r700SendPSState);
1311 ALLOC_STATE(vs_consts, vs_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendVSConsts);
1312 ALLOC_STATE(ps_consts, ps_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendPSConsts);
1313 ALLOC_STATE(vtx, vtx, (6 + (VERT_ATTRIB_MAX * 18)), r700SendVTXState);
1314 ALLOC_STATE(tx, tx, (R700_TEXTURE_NUMBERUNITS * 20), r700SendTexState);
1315 ALLOC_STATE(tx_smplr, tx, (R700_TEXTURE_NUMBERUNITS * 5), r700SendTexSamplerState);
1316 ALLOC_STATE(tx_brdr_clr, tx, (R700_TEXTURE_NUMBERUNITS * 6), r700SendTexBorderColorState);
1317 r600_init_query_stateobj(&context->radeon, 6 * 2);
1318
1319 context->radeon.hw.is_dirty = GL_TRUE;
1320 context->radeon.hw.all_dirty = GL_TRUE;
1321 }