6307b3a3de68de9bfe6a21a2f4f39e9d1915db9c
[mesa.git] / src / gallium / drivers / nvc0 / nvc0_state_validate.c
1
2 #include "util/u_math.h"
3
4 #include "nvc0_context.h"
5
6 static void
7 nvc0_validate_zcull(struct nvc0_context *nvc0)
8 {
9 struct nouveau_channel *chan = nvc0->screen->base.channel;
10 struct pipe_framebuffer_state *fb = &nvc0->framebuffer;
11 struct nv50_surface *sf = nv50_surface(fb->zsbuf);
12 struct nv50_miptree *mt = nv50_miptree(sf->base.texture);
13 struct nouveau_bo *bo = mt->base.bo;
14 uint32_t size;
15 uint32_t offset = align(mt->total_size, 1 << 17);
16 unsigned width, height;
17
18 assert(mt->base.base.depth0 == 1 && mt->base.base.array_size < 2);
19
20 size = mt->total_size * 2;
21
22 height = align(fb->height, 32);
23 width = fb->width % 224;
24 if (width)
25 width = fb->width + (224 - width);
26 else
27 width = fb->width;
28
29 MARK_RING (chan, 23, 4);
30 BEGIN_RING(chan, RING_3D_(0x1590), 1); /* ZCULL_REGION_INDEX (bits 0x3f) */
31 OUT_RING (chan, 0);
32 BEGIN_RING(chan, RING_3D_(0x07e8), 2); /* ZCULL_ADDRESS_A_HIGH */
33 OUT_RELOCh(chan, bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
34 OUT_RELOCl(chan, bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
35 offset += 1 << 17;
36 BEGIN_RING(chan, RING_3D_(0x07f0), 2); /* ZCULL_ADDRESS_B_HIGH */
37 OUT_RELOCh(chan, bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
38 OUT_RELOCl(chan, bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
39 BEGIN_RING(chan, RING_3D_(0x07e0), 2);
40 OUT_RING (chan, size);
41 OUT_RING (chan, size >> 16);
42 BEGIN_RING(chan, RING_3D_(0x15c8), 1); /* bits 0x3 */
43 OUT_RING (chan, 2);
44 BEGIN_RING(chan, RING_3D_(0x07c0), 4); /* ZCULL dimensions */
45 OUT_RING (chan, width);
46 OUT_RING (chan, height);
47 OUT_RING (chan, 1);
48 OUT_RING (chan, 0);
49 BEGIN_RING(chan, RING_3D_(0x15fc), 2);
50 OUT_RING (chan, 0); /* bits 0xffff */
51 OUT_RING (chan, 0); /* bits 0xffff */
52 BEGIN_RING(chan, RING_3D_(0x1958), 1);
53 OUT_RING (chan, 0); /* bits ~0 */
54 }
55
56 static void
57 nvc0_validate_fb(struct nvc0_context *nvc0)
58 {
59 struct nouveau_channel *chan = nvc0->screen->base.channel;
60 struct pipe_framebuffer_state *fb = &nvc0->framebuffer;
61 unsigned i;
62 unsigned ms_mode = NVC0_3D_MULTISAMPLE_MODE_MS1;
63 boolean serialize = FALSE;
64
65 nvc0_bufctx_reset(nvc0, NVC0_BUFCTX_FRAME);
66
67 BEGIN_RING(chan, RING_3D(RT_CONTROL), 1);
68 OUT_RING (chan, (076543210 << 4) | fb->nr_cbufs);
69 BEGIN_RING(chan, RING_3D(SCREEN_SCISSOR_HORIZ), 2);
70 OUT_RING (chan, fb->width << 16);
71 OUT_RING (chan, fb->height << 16);
72
73 MARK_RING(chan, 9 * fb->nr_cbufs, 2 * fb->nr_cbufs);
74
75 for (i = 0; i < fb->nr_cbufs; ++i) {
76 struct nv50_surface *sf = nv50_surface(fb->cbufs[i]);
77 struct nv04_resource *res = nv04_resource(sf->base.texture);
78 struct nouveau_bo *bo = res->bo;
79 uint32_t offset = sf->offset + res->offset;
80
81 BEGIN_RING(chan, RING_3D(RT_ADDRESS_HIGH(i)), 9);
82 OUT_RELOCh(chan, res->bo, offset, res->domain | NOUVEAU_BO_RDWR);
83 OUT_RELOCl(chan, res->bo, offset, res->domain | NOUVEAU_BO_RDWR);
84 if (likely(nouveau_bo_tile_layout(bo))) {
85 struct nv50_miptree *mt = nv50_miptree(sf->base.texture);
86
87 assert(sf->base.texture->target != PIPE_BUFFER);
88
89 OUT_RING(chan, sf->width);
90 OUT_RING(chan, sf->height);
91 OUT_RING(chan, nvc0_format_table[sf->base.format].rt);
92 OUT_RING(chan, (mt->layout_3d << 16) |
93 mt->level[sf->base.u.tex.level].tile_mode);
94 OUT_RING(chan, sf->base.u.tex.first_layer + sf->depth);
95 OUT_RING(chan, mt->layer_stride >> 2);
96 OUT_RING(chan, sf->base.u.tex.first_layer);
97
98 ms_mode = mt->ms_mode;
99 } else {
100 if (res->base.target == PIPE_BUFFER) {
101 OUT_RING(chan, 262144);
102 OUT_RING(chan, 1);
103 } else {
104 OUT_RING(chan, nv50_miptree(sf->base.texture)->level[0].pitch);
105 OUT_RING(chan, sf->height);
106 }
107 OUT_RING(chan, nvc0_format_table[sf->base.format].rt);
108 OUT_RING(chan, 1 << 12);
109 OUT_RING(chan, 1);
110 OUT_RING(chan, 0);
111 OUT_RING(chan, 0);
112
113 nvc0_resource_fence(res, NOUVEAU_BO_WR);
114
115 assert(!fb->zsbuf);
116 }
117
118 if (res->status & NOUVEAU_BUFFER_STATUS_GPU_READING)
119 serialize = TRUE;
120 res->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
121 res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
122
123 /* only register for writing, otherwise we'd always serialize here */
124 nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_FRAME, res,
125 res->domain | NOUVEAU_BO_WR);
126 }
127
128 if (fb->zsbuf) {
129 struct nv50_miptree *mt = nv50_miptree(fb->zsbuf->texture);
130 struct nv50_surface *sf = nv50_surface(fb->zsbuf);
131 struct nouveau_bo *bo = mt->base.bo;
132 int unk = mt->base.base.target == PIPE_TEXTURE_2D;
133 uint32_t offset = sf->offset;
134
135 MARK_RING (chan, 12, 2);
136 BEGIN_RING(chan, RING_3D(ZETA_ADDRESS_HIGH), 5);
137 OUT_RELOCh(chan, bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
138 OUT_RELOCl(chan, bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
139 OUT_RING (chan, nvc0_format_table[fb->zsbuf->format].rt);
140 OUT_RING (chan, mt->level[sf->base.u.tex.level].tile_mode);
141 OUT_RING (chan, mt->layer_stride >> 2);
142 BEGIN_RING(chan, RING_3D(ZETA_ENABLE), 1);
143 OUT_RING (chan, 1);
144 BEGIN_RING(chan, RING_3D(ZETA_HORIZ), 3);
145 OUT_RING (chan, sf->width);
146 OUT_RING (chan, sf->height);
147 OUT_RING (chan, (unk << 16) |
148 (sf->base.u.tex.first_layer + sf->depth));
149 BEGIN_RING(chan, RING_3D(ZETA_BASE_LAYER), 1);
150 OUT_RING (chan, sf->base.u.tex.first_layer);
151
152 ms_mode = mt->ms_mode;
153
154 if (mt->base.status & NOUVEAU_BUFFER_STATUS_GPU_READING)
155 serialize = TRUE;
156 mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
157 mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
158
159 nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_FRAME, &mt->base,
160 NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
161 } else {
162 BEGIN_RING(chan, RING_3D(ZETA_ENABLE), 1);
163 OUT_RING (chan, 0);
164 }
165
166 IMMED_RING(chan, RING_3D(MULTISAMPLE_MODE), ms_mode);
167
168 if (serialize) {
169 BEGIN_RING(chan, RING_3D(SERIALIZE), 1);
170 OUT_RING (chan, 0);
171 }
172 }
173
174 static void
175 nvc0_validate_blend_colour(struct nvc0_context *nvc0)
176 {
177 struct nouveau_channel *chan = nvc0->screen->base.channel;
178
179 BEGIN_RING(chan, RING_3D(BLEND_COLOR(0)), 4);
180 OUT_RINGf (chan, nvc0->blend_colour.color[0]);
181 OUT_RINGf (chan, nvc0->blend_colour.color[1]);
182 OUT_RINGf (chan, nvc0->blend_colour.color[2]);
183 OUT_RINGf (chan, nvc0->blend_colour.color[3]);
184 }
185
186 static void
187 nvc0_validate_stencil_ref(struct nvc0_context *nvc0)
188 {
189 struct nouveau_channel *chan = nvc0->screen->base.channel;
190 const ubyte *ref = &nvc0->stencil_ref.ref_value[0];
191
192 IMMED_RING(chan, RING_3D(STENCIL_FRONT_FUNC_REF), ref[0]);
193 IMMED_RING(chan, RING_3D(STENCIL_BACK_FUNC_REF), ref[1]);
194 }
195
196 static void
197 nvc0_validate_stipple(struct nvc0_context *nvc0)
198 {
199 struct nouveau_channel *chan = nvc0->screen->base.channel;
200 unsigned i;
201
202 BEGIN_RING(chan, RING_3D(POLYGON_STIPPLE_PATTERN(0)), 32);
203 for (i = 0; i < 32; ++i)
204 OUT_RING(chan, util_bswap32(nvc0->stipple.stipple[i]));
205 }
206
207 static void
208 nvc0_validate_scissor(struct nvc0_context *nvc0)
209 {
210 struct nouveau_channel *chan = nvc0->screen->base.channel;
211 struct pipe_scissor_state *s = &nvc0->scissor;
212
213 if (!(nvc0->dirty & NVC0_NEW_SCISSOR) &&
214 nvc0->rast->pipe.scissor == nvc0->state.scissor)
215 return;
216 nvc0->state.scissor = nvc0->rast->pipe.scissor;
217
218 BEGIN_RING(chan, RING_3D(SCISSOR_HORIZ(0)), 2);
219 if (nvc0->rast->pipe.scissor) {
220 OUT_RING(chan, (s->maxx << 16) | s->minx);
221 OUT_RING(chan, (s->maxy << 16) | s->miny);
222 } else {
223 OUT_RING(chan, (0xffff << 16) | 0);
224 OUT_RING(chan, (0xffff << 16) | 0);
225 }
226 }
227
228 static void
229 nvc0_validate_viewport(struct nvc0_context *nvc0)
230 {
231 struct nouveau_channel *chan = nvc0->screen->base.channel;
232 struct pipe_viewport_state *vp = &nvc0->viewport;
233 int x, y, w, h;
234 float zmin, zmax;
235
236 BEGIN_RING(chan, RING_3D(VIEWPORT_TRANSLATE_X(0)), 3);
237 OUT_RINGf (chan, vp->translate[0]);
238 OUT_RINGf (chan, vp->translate[1]);
239 OUT_RINGf (chan, vp->translate[2]);
240 BEGIN_RING(chan, RING_3D(VIEWPORT_SCALE_X(0)), 3);
241 OUT_RINGf (chan, vp->scale[0]);
242 OUT_RINGf (chan, vp->scale[1]);
243 OUT_RINGf (chan, vp->scale[2]);
244
245 /* now set the viewport rectangle to viewport dimensions for clipping */
246
247 x = util_iround(MAX2(0.0f, vp->translate[0] - fabsf(vp->scale[0])));
248 y = util_iround(MAX2(0.0f, vp->translate[1] - fabsf(vp->scale[1])));
249 w = util_iround(vp->translate[0] + fabsf(vp->scale[0])) - x;
250 h = util_iround(vp->translate[1] + fabsf(vp->scale[1])) - y;
251
252 zmin = vp->translate[2] - fabsf(vp->scale[2]);
253 zmax = vp->translate[2] + fabsf(vp->scale[2]);
254
255 BEGIN_RING(chan, RING_3D(VIEWPORT_HORIZ(0)), 2);
256 OUT_RING (chan, (w << 16) | x);
257 OUT_RING (chan, (h << 16) | y);
258 BEGIN_RING(chan, RING_3D(DEPTH_RANGE_NEAR(0)), 2);
259 OUT_RINGf (chan, zmin);
260 OUT_RINGf (chan, zmax);
261 }
262
263 static INLINE void
264 nvc0_upload_uclip_planes(struct nvc0_context *nvc0)
265 {
266 struct nouveau_channel *chan = nvc0->screen->base.channel;
267 struct nouveau_bo *bo = nvc0->screen->uniforms;
268
269 MARK_RING (chan, 6 + PIPE_MAX_CLIP_PLANES * 4, 2);
270 BEGIN_RING(chan, RING_3D(CB_SIZE), 3);
271 OUT_RING (chan, 256);
272 OUT_RELOCh(chan, bo, 5 << 16, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
273 OUT_RELOCl(chan, bo, 5 << 16, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
274 BEGIN_RING_1I(chan, RING_3D(CB_POS), PIPE_MAX_CLIP_PLANES * 4 + 1);
275 OUT_RING (chan, 0);
276 OUT_RINGp (chan, &nvc0->clip.ucp[0][0], PIPE_MAX_CLIP_PLANES * 4);
277 }
278
279 static INLINE void
280 nvc0_check_program_ucps(struct nvc0_context *nvc0,
281 struct nvc0_program *vp, uint8_t mask)
282 {
283 const unsigned n = util_logbase2(mask) + 1;
284
285 if (vp->vp.num_ucps >= n)
286 return;
287 nvc0_program_destroy(nvc0, vp);
288
289 vp->vp.num_ucps = n;
290 if (likely(vp == nvc0->vertprog))
291 nvc0_vertprog_validate(nvc0);
292 else
293 if (likely(vp == nvc0->gmtyprog))
294 nvc0_vertprog_validate(nvc0);
295 else
296 nvc0_tevlprog_validate(nvc0);
297 }
298
299 static void
300 nvc0_validate_clip(struct nvc0_context *nvc0)
301 {
302 struct nouveau_channel *chan = nvc0->screen->base.channel;
303 struct nvc0_program *vp;
304 uint8_t clip_enable = nvc0->rast->pipe.clip_plane_enable;
305
306 if (nvc0->dirty & NVC0_NEW_CLIP)
307 nvc0_upload_uclip_planes(nvc0);
308
309 vp = nvc0->gmtyprog;
310 if (!vp) {
311 vp = nvc0->tevlprog;
312 if (!vp)
313 vp = nvc0->vertprog;
314 }
315
316 if (clip_enable && vp->vp.num_ucps < PIPE_MAX_CLIP_PLANES)
317 nvc0_check_program_ucps(nvc0, vp, clip_enable);
318
319 clip_enable &= vp->vp.clip_enable;
320
321 if (nvc0->state.clip_enable != clip_enable) {
322 nvc0->state.clip_enable = clip_enable;
323 IMMED_RING(chan, RING_3D(CLIP_DISTANCE_ENABLE), clip_enable);
324 }
325 if (nvc0->state.clip_mode != vp->vp.clip_mode) {
326 nvc0->state.clip_mode = vp->vp.clip_mode;
327 BEGIN_RING(chan, RING_3D(CLIP_DISTANCE_MODE), 1);
328 OUT_RING (chan, vp->vp.clip_mode);
329 }
330 }
331
332 static void
333 nvc0_validate_blend(struct nvc0_context *nvc0)
334 {
335 struct nouveau_channel *chan = nvc0->screen->base.channel;
336
337 WAIT_RING(chan, nvc0->blend->size);
338 OUT_RINGp(chan, nvc0->blend->state, nvc0->blend->size);
339 }
340
341 static void
342 nvc0_validate_zsa(struct nvc0_context *nvc0)
343 {
344 struct nouveau_channel *chan = nvc0->screen->base.channel;
345
346 WAIT_RING(chan, nvc0->zsa->size);
347 OUT_RINGp(chan, nvc0->zsa->state, nvc0->zsa->size);
348 }
349
350 static void
351 nvc0_validate_rasterizer(struct nvc0_context *nvc0)
352 {
353 struct nouveau_channel *chan = nvc0->screen->base.channel;
354
355 WAIT_RING(chan, nvc0->rast->size);
356 OUT_RINGp(chan, nvc0->rast->state, nvc0->rast->size);
357 }
358
359 static void
360 nvc0_constbufs_validate(struct nvc0_context *nvc0)
361 {
362 struct nouveau_channel *chan = nvc0->screen->base.channel;
363 struct nouveau_bo *bo;
364 unsigned s;
365
366 for (s = 0; s < 5; ++s) {
367 struct nv04_resource *res;
368 int i;
369
370 while (nvc0->constbuf_dirty[s]) {
371 unsigned base = 0;
372 unsigned words = 0;
373 boolean rebind = TRUE;
374
375 i = ffs(nvc0->constbuf_dirty[s]) - 1;
376 nvc0->constbuf_dirty[s] &= ~(1 << i);
377
378 res = nv04_resource(nvc0->constbuf[s][i]);
379 if (!res) {
380 BEGIN_RING(chan, RING_3D(CB_BIND(s)), 1);
381 OUT_RING (chan, (i << 4) | 0);
382 if (i == 0)
383 nvc0->state.uniform_buffer_bound[s] = 0;
384 continue;
385 }
386
387 if (!nouveau_resource_mapped_by_gpu(&res->base)) {
388 if (i == 0 && (res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY)) {
389 base = s << 16;
390 bo = nvc0->screen->uniforms;
391
392 if (nvc0->state.uniform_buffer_bound[s] >= res->base.width0)
393 rebind = FALSE;
394 else
395 nvc0->state.uniform_buffer_bound[s] =
396 align(res->base.width0, 0x100);
397
398 words = res->base.width0 / 4;
399 } else {
400 nouveau_buffer_migrate(&nvc0->base, res, NOUVEAU_BO_VRAM);
401 bo = res->bo;
402 base = res->offset;
403 }
404 } else {
405 bo = res->bo;
406 base = res->offset;
407 if (i == 0)
408 nvc0->state.uniform_buffer_bound[s] = 0;
409 }
410
411 if (bo != nvc0->screen->uniforms)
412 nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_CONSTANT, res,
413 NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
414
415 if (rebind) {
416 MARK_RING (chan, 4, 2);
417 BEGIN_RING(chan, RING_3D(CB_SIZE), 3);
418 OUT_RING (chan, align(res->base.width0, 0x100));
419 OUT_RELOCh(chan, bo, base, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
420 OUT_RELOCl(chan, bo, base, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
421 BEGIN_RING(chan, RING_3D(CB_BIND(s)), 1);
422 OUT_RING (chan, (i << 4) | 1);
423 }
424
425 if (words)
426 nvc0_cb_push(&nvc0->base,
427 bo, NOUVEAU_BO_VRAM, base, res->base.width0,
428 0, words, (const uint32_t *)res->data);
429 }
430 }
431 }
432
433 static void
434 nvc0_validate_sample_mask(struct nvc0_context *nvc0)
435 {
436 struct nouveau_channel *chan = nvc0->screen->base.channel;
437
438 unsigned mask[4] =
439 {
440 nvc0->sample_mask & 0xffff,
441 nvc0->sample_mask & 0xffff,
442 nvc0->sample_mask & 0xffff,
443 nvc0->sample_mask & 0xffff
444 };
445
446 BEGIN_RING(chan, RING_3D(MSAA_MASK(0)), 4);
447 OUT_RING (chan, mask[0]);
448 OUT_RING (chan, mask[1]);
449 OUT_RING (chan, mask[2]);
450 OUT_RING (chan, mask[3]);
451 BEGIN_RING(chan, RING_3D(SAMPLE_SHADING), 1);
452 OUT_RING (chan, 0x01);
453 }
454
455 static void
456 nvc0_validate_derived_1(struct nvc0_context *nvc0)
457 {
458 struct nouveau_channel *chan = nvc0->screen->base.channel;
459 boolean early_z;
460 boolean rasterizer_discard;
461
462 early_z = nvc0->fragprog->fp.early_z && !nvc0->zsa->pipe.alpha.enabled;
463
464 if (early_z != nvc0->state.early_z) {
465 nvc0->state.early_z = early_z;
466 IMMED_RING(chan, RING_3D(EARLY_FRAGMENT_TESTS), early_z);
467 }
468
469 rasterizer_discard = (!nvc0->fragprog || !nvc0->fragprog->hdr[18]) &&
470 !nvc0->zsa->pipe.depth.enabled && !nvc0->zsa->pipe.stencil[0].enabled;
471 rasterizer_discard = rasterizer_discard ||
472 nvc0->rast->pipe.rasterizer_discard;
473
474 if (rasterizer_discard != nvc0->state.rasterizer_discard) {
475 nvc0->state.rasterizer_discard = rasterizer_discard;
476 IMMED_RING(chan, RING_3D(RASTERIZE_ENABLE), !rasterizer_discard);
477 }
478 }
479
480 static void
481 nvc0_switch_pipe_context(struct nvc0_context *ctx_to)
482 {
483 struct nvc0_context *ctx_from = ctx_to->screen->cur_ctx;
484
485 if (ctx_from)
486 ctx_to->state = ctx_from->state;
487
488 ctx_to->dirty = ~0;
489
490 if (!ctx_to->vertex)
491 ctx_to->dirty &= ~(NVC0_NEW_VERTEX | NVC0_NEW_ARRAYS);
492
493 if (!ctx_to->vertprog)
494 ctx_to->dirty &= ~NVC0_NEW_VERTPROG;
495 if (!ctx_to->fragprog)
496 ctx_to->dirty &= ~NVC0_NEW_FRAGPROG;
497
498 if (!ctx_to->blend)
499 ctx_to->dirty &= ~NVC0_NEW_BLEND;
500 if (!ctx_to->rast)
501 ctx_to->dirty &= ~(NVC0_NEW_RASTERIZER | NVC0_NEW_SCISSOR);
502 if (!ctx_to->zsa)
503 ctx_to->dirty &= ~NVC0_NEW_ZSA;
504
505 ctx_to->screen->cur_ctx = ctx_to;
506 }
507
508 static struct state_validate {
509 void (*func)(struct nvc0_context *);
510 uint32_t states;
511 } validate_list[] = {
512 { nvc0_validate_fb, NVC0_NEW_FRAMEBUFFER },
513 { nvc0_validate_blend, NVC0_NEW_BLEND },
514 { nvc0_validate_zsa, NVC0_NEW_ZSA },
515 { nvc0_validate_sample_mask, NVC0_NEW_SAMPLE_MASK },
516 { nvc0_validate_rasterizer, NVC0_NEW_RASTERIZER },
517 { nvc0_validate_blend_colour, NVC0_NEW_BLEND_COLOUR },
518 { nvc0_validate_stencil_ref, NVC0_NEW_STENCIL_REF },
519 { nvc0_validate_stipple, NVC0_NEW_STIPPLE },
520 { nvc0_validate_scissor, NVC0_NEW_SCISSOR | NVC0_NEW_RASTERIZER },
521 { nvc0_validate_viewport, NVC0_NEW_VIEWPORT },
522 { nvc0_vertprog_validate, NVC0_NEW_VERTPROG },
523 { nvc0_tctlprog_validate, NVC0_NEW_TCTLPROG },
524 { nvc0_tevlprog_validate, NVC0_NEW_TEVLPROG },
525 { nvc0_gmtyprog_validate, NVC0_NEW_GMTYPROG },
526 { nvc0_fragprog_validate, NVC0_NEW_FRAGPROG },
527 { nvc0_validate_derived_1, NVC0_NEW_FRAGPROG | NVC0_NEW_ZSA |
528 NVC0_NEW_RASTERIZER },
529 { nvc0_validate_clip, NVC0_NEW_CLIP | NVC0_NEW_RASTERIZER |
530 NVC0_NEW_VERTPROG |
531 NVC0_NEW_TEVLPROG |
532 NVC0_NEW_GMTYPROG },
533 { nvc0_constbufs_validate, NVC0_NEW_CONSTBUF },
534 { nvc0_validate_textures, NVC0_NEW_TEXTURES },
535 { nvc0_validate_samplers, NVC0_NEW_SAMPLERS },
536 { nvc0_vertex_arrays_validate, NVC0_NEW_VERTEX | NVC0_NEW_ARRAYS },
537 { nvc0_tfb_validate, NVC0_NEW_TFB_TARGETS | NVC0_NEW_GMTYPROG }
538 };
539 #define validate_list_len (sizeof(validate_list) / sizeof(validate_list[0]))
540
541 boolean
542 nvc0_state_validate(struct nvc0_context *nvc0, uint32_t mask, unsigned words)
543 {
544 uint32_t state_mask;
545 unsigned i;
546
547 if (nvc0->screen->cur_ctx != nvc0)
548 nvc0_switch_pipe_context(nvc0);
549
550 state_mask = nvc0->dirty & mask;
551
552 if (state_mask) {
553 for (i = 0; i < validate_list_len; ++i) {
554 struct state_validate *validate = &validate_list[i];
555
556 if (state_mask & validate->states)
557 validate->func(nvc0);
558 }
559 nvc0->dirty &= ~state_mask;
560 }
561
562 MARK_RING(nvc0->screen->base.channel, words, 0);
563
564 nvc0_bufctx_emit_relocs(nvc0);
565
566 return TRUE;
567 }