gallium: separate out floating-point CAPs into its own enum
[mesa.git] / src / gallium / drivers / nvc0 / nvc0_state_validate.c
1
2 #include "util/u_math.h"
3
4 #include "nvc0_context.h"
5
6 static void
7 nvc0_validate_zcull(struct nvc0_context *nvc0)
8 {
9 struct nouveau_channel *chan = nvc0->screen->base.channel;
10 struct pipe_framebuffer_state *fb = &nvc0->framebuffer;
11 struct nv50_surface *sf = nv50_surface(fb->zsbuf);
12 struct nv50_miptree *mt = nv50_miptree(sf->base.texture);
13 struct nouveau_bo *bo = mt->base.bo;
14 uint32_t size;
15 uint32_t offset = align(mt->total_size, 1 << 17);
16 unsigned width, height;
17
18 assert(mt->base.base.depth0 == 1 && mt->base.base.array_size < 2);
19
20 size = mt->total_size * 2;
21
22 height = align(fb->height, 32);
23 width = fb->width % 224;
24 if (width)
25 width = fb->width + (224 - width);
26 else
27 width = fb->width;
28
29 MARK_RING (chan, 23, 4);
30 BEGIN_RING(chan, RING_3D_(0x1590), 1); /* ZCULL_REGION_INDEX (bits 0x3f) */
31 OUT_RING (chan, 0);
32 BEGIN_RING(chan, RING_3D_(0x07e8), 2); /* ZCULL_ADDRESS_A_HIGH */
33 OUT_RELOCh(chan, bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
34 OUT_RELOCl(chan, bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
35 offset += 1 << 17;
36 BEGIN_RING(chan, RING_3D_(0x07f0), 2); /* ZCULL_ADDRESS_B_HIGH */
37 OUT_RELOCh(chan, bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
38 OUT_RELOCl(chan, bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
39 BEGIN_RING(chan, RING_3D_(0x07e0), 2);
40 OUT_RING (chan, size);
41 OUT_RING (chan, size >> 16);
42 BEGIN_RING(chan, RING_3D_(0x15c8), 1); /* bits 0x3 */
43 OUT_RING (chan, 2);
44 BEGIN_RING(chan, RING_3D_(0x07c0), 4); /* ZCULL dimensions */
45 OUT_RING (chan, width);
46 OUT_RING (chan, height);
47 OUT_RING (chan, 1);
48 OUT_RING (chan, 0);
49 BEGIN_RING(chan, RING_3D_(0x15fc), 2);
50 OUT_RING (chan, 0); /* bits 0xffff */
51 OUT_RING (chan, 0); /* bits 0xffff */
52 BEGIN_RING(chan, RING_3D_(0x1958), 1);
53 OUT_RING (chan, 0); /* bits ~0 */
54 }
55
56 static void
57 nvc0_validate_fb(struct nvc0_context *nvc0)
58 {
59 struct nouveau_channel *chan = nvc0->screen->base.channel;
60 struct pipe_framebuffer_state *fb = &nvc0->framebuffer;
61 unsigned i;
62 unsigned ms_mode = NVC0_3D_MULTISAMPLE_MODE_MS1;
63 boolean serialize = FALSE;
64
65 nvc0_bufctx_reset(nvc0, NVC0_BUFCTX_FRAME);
66
67 BEGIN_RING(chan, RING_3D(RT_CONTROL), 1);
68 OUT_RING (chan, (076543210 << 4) | fb->nr_cbufs);
69 BEGIN_RING(chan, RING_3D(SCREEN_SCISSOR_HORIZ), 2);
70 OUT_RING (chan, fb->width << 16);
71 OUT_RING (chan, fb->height << 16);
72
73 MARK_RING(chan, 9 * fb->nr_cbufs, 2 * fb->nr_cbufs);
74
75 for (i = 0; i < fb->nr_cbufs; ++i) {
76 struct nv50_surface *sf = nv50_surface(fb->cbufs[i]);
77 struct nv04_resource *res = nv04_resource(sf->base.texture);
78 struct nouveau_bo *bo = res->bo;
79 uint32_t offset = sf->offset + res->offset;
80
81 BEGIN_RING(chan, RING_3D(RT_ADDRESS_HIGH(i)), 9);
82 OUT_RELOCh(chan, res->bo, offset, res->domain | NOUVEAU_BO_RDWR);
83 OUT_RELOCl(chan, res->bo, offset, res->domain | NOUVEAU_BO_RDWR);
84 if (likely(nouveau_bo_tile_layout(bo))) {
85 struct nv50_miptree *mt = nv50_miptree(sf->base.texture);
86
87 assert(sf->base.texture->target != PIPE_BUFFER);
88
89 OUT_RING(chan, sf->width);
90 OUT_RING(chan, sf->height);
91 OUT_RING(chan, nvc0_format_table[sf->base.format].rt);
92 OUT_RING(chan, (mt->layout_3d << 16) |
93 mt->level[sf->base.u.tex.level].tile_mode);
94 OUT_RING(chan, sf->base.u.tex.first_layer + sf->depth);
95 OUT_RING(chan, mt->layer_stride >> 2);
96 OUT_RING(chan, sf->base.u.tex.first_layer);
97
98 ms_mode = mt->ms_mode;
99 } else {
100 if (res->base.target == PIPE_BUFFER) {
101 OUT_RING(chan, 262144);
102 OUT_RING(chan, 1);
103 } else {
104 OUT_RING(chan, nv50_miptree(sf->base.texture)->level[0].pitch);
105 OUT_RING(chan, sf->height);
106 }
107 OUT_RING(chan, nvc0_format_table[sf->base.format].rt);
108 OUT_RING(chan, 1 << 12);
109 OUT_RING(chan, 1);
110 OUT_RING(chan, 0);
111 OUT_RING(chan, 0);
112
113 nvc0_resource_fence(res, NOUVEAU_BO_WR);
114
115 assert(!fb->zsbuf);
116 }
117
118 if (res->status & NOUVEAU_BUFFER_STATUS_GPU_READING)
119 serialize = TRUE;
120 res->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
121 res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
122
123 /* only register for writing, otherwise we'd always serialize here */
124 nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_FRAME, res,
125 res->domain | NOUVEAU_BO_WR);
126 }
127
128 if (fb->zsbuf) {
129 struct nv50_miptree *mt = nv50_miptree(fb->zsbuf->texture);
130 struct nv50_surface *sf = nv50_surface(fb->zsbuf);
131 struct nouveau_bo *bo = mt->base.bo;
132 int unk = mt->base.base.target == PIPE_TEXTURE_2D;
133 uint32_t offset = sf->offset;
134
135 MARK_RING (chan, 12, 2);
136 BEGIN_RING(chan, RING_3D(ZETA_ADDRESS_HIGH), 5);
137 OUT_RELOCh(chan, bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
138 OUT_RELOCl(chan, bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
139 OUT_RING (chan, nvc0_format_table[fb->zsbuf->format].rt);
140 OUT_RING (chan, mt->level[sf->base.u.tex.level].tile_mode);
141 OUT_RING (chan, mt->layer_stride >> 2);
142 BEGIN_RING(chan, RING_3D(ZETA_ENABLE), 1);
143 OUT_RING (chan, 1);
144 BEGIN_RING(chan, RING_3D(ZETA_HORIZ), 3);
145 OUT_RING (chan, sf->width);
146 OUT_RING (chan, sf->height);
147 OUT_RING (chan, (unk << 16) |
148 (sf->base.u.tex.first_layer + sf->depth));
149 BEGIN_RING(chan, RING_3D(ZETA_BASE_LAYER), 1);
150 OUT_RING (chan, sf->base.u.tex.first_layer);
151
152 ms_mode = mt->ms_mode;
153
154 if (mt->base.status & NOUVEAU_BUFFER_STATUS_GPU_READING)
155 serialize = TRUE;
156 mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
157 mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
158
159 nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_FRAME, &mt->base,
160 NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
161 } else {
162 BEGIN_RING(chan, RING_3D(ZETA_ENABLE), 1);
163 OUT_RING (chan, 0);
164 }
165
166 IMMED_RING(chan, RING_3D(MULTISAMPLE_MODE), ms_mode);
167
168 if (serialize) {
169 BEGIN_RING(chan, RING_3D(SERIALIZE), 1);
170 OUT_RING (chan, 0);
171 }
172 }
173
174 static void
175 nvc0_validate_blend_colour(struct nvc0_context *nvc0)
176 {
177 struct nouveau_channel *chan = nvc0->screen->base.channel;
178
179 BEGIN_RING(chan, RING_3D(BLEND_COLOR(0)), 4);
180 OUT_RINGf (chan, nvc0->blend_colour.color[0]);
181 OUT_RINGf (chan, nvc0->blend_colour.color[1]);
182 OUT_RINGf (chan, nvc0->blend_colour.color[2]);
183 OUT_RINGf (chan, nvc0->blend_colour.color[3]);
184 }
185
186 static void
187 nvc0_validate_stencil_ref(struct nvc0_context *nvc0)
188 {
189 struct nouveau_channel *chan = nvc0->screen->base.channel;
190 const ubyte *ref = &nvc0->stencil_ref.ref_value[0];
191
192 IMMED_RING(chan, RING_3D(STENCIL_FRONT_FUNC_REF), ref[0]);
193 IMMED_RING(chan, RING_3D(STENCIL_BACK_FUNC_REF), ref[1]);
194 }
195
196 static void
197 nvc0_validate_stipple(struct nvc0_context *nvc0)
198 {
199 struct nouveau_channel *chan = nvc0->screen->base.channel;
200 unsigned i;
201
202 BEGIN_RING(chan, RING_3D(POLYGON_STIPPLE_PATTERN(0)), 32);
203 for (i = 0; i < 32; ++i)
204 OUT_RING(chan, util_bswap32(nvc0->stipple.stipple[i]));
205 }
206
207 static void
208 nvc0_validate_scissor(struct nvc0_context *nvc0)
209 {
210 struct nouveau_channel *chan = nvc0->screen->base.channel;
211 struct pipe_scissor_state *s = &nvc0->scissor;
212
213 if (!(nvc0->dirty & NVC0_NEW_SCISSOR) &&
214 nvc0->rast->pipe.scissor == nvc0->state.scissor)
215 return;
216 nvc0->state.scissor = nvc0->rast->pipe.scissor;
217
218 BEGIN_RING(chan, RING_3D(SCISSOR_HORIZ(0)), 2);
219 if (nvc0->rast->pipe.scissor) {
220 OUT_RING(chan, (s->maxx << 16) | s->minx);
221 OUT_RING(chan, (s->maxy << 16) | s->miny);
222 } else {
223 OUT_RING(chan, (0xffff << 16) | 0);
224 OUT_RING(chan, (0xffff << 16) | 0);
225 }
226 }
227
228 static void
229 nvc0_validate_viewport(struct nvc0_context *nvc0)
230 {
231 struct nouveau_channel *chan = nvc0->screen->base.channel;
232 struct pipe_viewport_state *vp = &nvc0->viewport;
233 int x, y, w, h;
234 float zmin, zmax;
235
236 BEGIN_RING(chan, RING_3D(VIEWPORT_TRANSLATE_X(0)), 3);
237 OUT_RINGf (chan, vp->translate[0]);
238 OUT_RINGf (chan, vp->translate[1]);
239 OUT_RINGf (chan, vp->translate[2]);
240 BEGIN_RING(chan, RING_3D(VIEWPORT_SCALE_X(0)), 3);
241 OUT_RINGf (chan, vp->scale[0]);
242 OUT_RINGf (chan, vp->scale[1]);
243 OUT_RINGf (chan, vp->scale[2]);
244
245 /* now set the viewport rectangle to viewport dimensions for clipping */
246
247 x = util_iround(MAX2(0.0f, vp->translate[0] - fabsf(vp->scale[0])));
248 y = util_iround(MAX2(0.0f, vp->translate[1] - fabsf(vp->scale[1])));
249 w = util_iround(vp->translate[0] + fabsf(vp->scale[0])) - x;
250 h = util_iround(vp->translate[1] + fabsf(vp->scale[1])) - y;
251
252 zmin = vp->translate[2] - fabsf(vp->scale[2]);
253 zmax = vp->translate[2] + fabsf(vp->scale[2]);
254
255 BEGIN_RING(chan, RING_3D(VIEWPORT_HORIZ(0)), 2);
256 OUT_RING (chan, (w << 16) | x);
257 OUT_RING (chan, (h << 16) | y);
258 BEGIN_RING(chan, RING_3D(DEPTH_RANGE_NEAR(0)), 2);
259 OUT_RINGf (chan, zmin);
260 OUT_RINGf (chan, zmax);
261 }
262
263 static void
264 nvc0_validate_clip(struct nvc0_context *nvc0)
265 {
266 struct nouveau_channel *chan = nvc0->screen->base.channel;
267 uint32_t clip;
268
269 if (nvc0->clip.depth_clamp) {
270 clip =
271 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1 |
272 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_NEAR |
273 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_FAR |
274 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK12_UNK2;
275 } else {
276 clip = NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1;
277 }
278
279 BEGIN_RING(chan, RING_3D(VIEW_VOLUME_CLIP_CTRL), 1);
280 OUT_RING (chan, clip);
281
282 if (nvc0->clip.nr) {
283 struct nouveau_bo *bo = nvc0->screen->uniforms;
284
285 MARK_RING (chan, 6 + nvc0->clip.nr * 4, 2);
286 BEGIN_RING(chan, RING_3D(CB_SIZE), 3);
287 OUT_RING (chan, 256);
288 OUT_RELOCh(chan, bo, 5 << 16, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
289 OUT_RELOCl(chan, bo, 5 << 16, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
290 BEGIN_RING_1I(chan, RING_3D(CB_POS), nvc0->clip.nr * 4 + 1);
291 OUT_RING (chan, 0);
292 OUT_RINGp (chan, &nvc0->clip.ucp[0][0], nvc0->clip.nr * 4);
293 }
294
295 if (nvc0->vertprog->vp.num_ucps) {
296 nvc0->state.clip_mode = 0;
297 nvc0->state.clip_enable = (1 << nvc0->clip.nr) - 1;
298 IMMED_RING(chan, RING_3D(CLIP_DISTANCE_ENABLE), nvc0->state.clip_enable);
299 IMMED_RING(chan, RING_3D(CLIP_DISTANCE_MODE), 0);
300 }
301 }
302
303 static void
304 nvc0_validate_blend(struct nvc0_context *nvc0)
305 {
306 struct nouveau_channel *chan = nvc0->screen->base.channel;
307
308 WAIT_RING(chan, nvc0->blend->size);
309 OUT_RINGp(chan, nvc0->blend->state, nvc0->blend->size);
310 }
311
312 static void
313 nvc0_validate_zsa(struct nvc0_context *nvc0)
314 {
315 struct nouveau_channel *chan = nvc0->screen->base.channel;
316
317 WAIT_RING(chan, nvc0->zsa->size);
318 OUT_RINGp(chan, nvc0->zsa->state, nvc0->zsa->size);
319 }
320
321 static void
322 nvc0_validate_rasterizer(struct nvc0_context *nvc0)
323 {
324 struct nouveau_channel *chan = nvc0->screen->base.channel;
325
326 WAIT_RING(chan, nvc0->rast->size);
327 OUT_RINGp(chan, nvc0->rast->state, nvc0->rast->size);
328 }
329
330 static void
331 nvc0_constbufs_validate(struct nvc0_context *nvc0)
332 {
333 struct nouveau_channel *chan = nvc0->screen->base.channel;
334 struct nouveau_bo *bo;
335 unsigned s;
336
337 for (s = 0; s < 5; ++s) {
338 struct nv04_resource *res;
339 int i;
340
341 while (nvc0->constbuf_dirty[s]) {
342 unsigned base = 0;
343 unsigned words = 0;
344 boolean rebind = TRUE;
345
346 i = ffs(nvc0->constbuf_dirty[s]) - 1;
347 nvc0->constbuf_dirty[s] &= ~(1 << i);
348
349 res = nv04_resource(nvc0->constbuf[s][i]);
350 if (!res) {
351 BEGIN_RING(chan, RING_3D(CB_BIND(s)), 1);
352 OUT_RING (chan, (i << 4) | 0);
353 if (i == 0)
354 nvc0->state.uniform_buffer_bound[s] = 0;
355 continue;
356 }
357
358 if (!nouveau_resource_mapped_by_gpu(&res->base)) {
359 if (i == 0 && (res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY)) {
360 base = s << 16;
361 bo = nvc0->screen->uniforms;
362
363 if (nvc0->state.uniform_buffer_bound[s] >= res->base.width0)
364 rebind = FALSE;
365 else
366 nvc0->state.uniform_buffer_bound[s] =
367 align(res->base.width0, 0x100);
368
369 words = res->base.width0 / 4;
370 } else {
371 nouveau_buffer_migrate(&nvc0->base, res, NOUVEAU_BO_VRAM);
372 bo = res->bo;
373 base = res->offset;
374 }
375 } else {
376 bo = res->bo;
377 base = res->offset;
378 if (i == 0)
379 nvc0->state.uniform_buffer_bound[s] = 0;
380 }
381
382 if (bo != nvc0->screen->uniforms)
383 nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_CONSTANT, res,
384 NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
385
386 if (rebind) {
387 MARK_RING (chan, 4, 2);
388 BEGIN_RING(chan, RING_3D(CB_SIZE), 3);
389 OUT_RING (chan, align(res->base.width0, 0x100));
390 OUT_RELOCh(chan, bo, base, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
391 OUT_RELOCl(chan, bo, base, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
392 BEGIN_RING(chan, RING_3D(CB_BIND(s)), 1);
393 OUT_RING (chan, (i << 4) | 1);
394 }
395
396 if (words)
397 nvc0_cb_push(&nvc0->base,
398 bo, NOUVEAU_BO_VRAM, base, res->base.width0,
399 0, words, (const uint32_t *)res->data);
400 }
401 }
402 }
403
404 static void
405 nvc0_validate_sample_mask(struct nvc0_context *nvc0)
406 {
407 struct nouveau_channel *chan = nvc0->screen->base.channel;
408
409 unsigned mask[4] =
410 {
411 nvc0->sample_mask & 0xffff,
412 nvc0->sample_mask & 0xffff,
413 nvc0->sample_mask & 0xffff,
414 nvc0->sample_mask & 0xffff
415 };
416
417 BEGIN_RING(chan, RING_3D(MSAA_MASK(0)), 4);
418 OUT_RING (chan, mask[0]);
419 OUT_RING (chan, mask[1]);
420 OUT_RING (chan, mask[2]);
421 OUT_RING (chan, mask[3]);
422 BEGIN_RING(chan, RING_3D(SAMPLE_SHADING), 1);
423 OUT_RING (chan, 0x01);
424 }
425
426 static void
427 nvc0_validate_derived_1(struct nvc0_context *nvc0)
428 {
429 struct nouveau_channel *chan = nvc0->screen->base.channel;
430 boolean early_z;
431
432 early_z = nvc0->fragprog->fp.early_z && !nvc0->zsa->pipe.alpha.enabled;
433
434 if (early_z != nvc0->state.early_z) {
435 nvc0->state.early_z = early_z;
436 IMMED_RING(chan, RING_3D(EARLY_FRAGMENT_TESTS), early_z);
437 }
438 }
439
440 static void
441 nvc0_switch_pipe_context(struct nvc0_context *ctx_to)
442 {
443 struct nvc0_context *ctx_from = ctx_to->screen->cur_ctx;
444
445 if (ctx_from)
446 ctx_to->state = ctx_from->state;
447
448 ctx_to->dirty = ~0;
449
450 if (!ctx_to->vertex)
451 ctx_to->dirty &= ~(NVC0_NEW_VERTEX | NVC0_NEW_ARRAYS);
452
453 if (!ctx_to->vertprog)
454 ctx_to->dirty &= ~NVC0_NEW_VERTPROG;
455 if (!ctx_to->fragprog)
456 ctx_to->dirty &= ~NVC0_NEW_FRAGPROG;
457
458 if (!ctx_to->blend)
459 ctx_to->dirty &= ~NVC0_NEW_BLEND;
460 if (!ctx_to->rast)
461 ctx_to->dirty &= ~(NVC0_NEW_RASTERIZER | NVC0_NEW_SCISSOR);
462 if (!ctx_to->zsa)
463 ctx_to->dirty &= ~NVC0_NEW_ZSA;
464
465 ctx_to->screen->cur_ctx = ctx_to;
466 }
467
468 static struct state_validate {
469 void (*func)(struct nvc0_context *);
470 uint32_t states;
471 } validate_list[] = {
472 { nvc0_validate_fb, NVC0_NEW_FRAMEBUFFER },
473 { nvc0_validate_blend, NVC0_NEW_BLEND },
474 { nvc0_validate_zsa, NVC0_NEW_ZSA },
475 { nvc0_validate_sample_mask, NVC0_NEW_SAMPLE_MASK },
476 { nvc0_validate_rasterizer, NVC0_NEW_RASTERIZER },
477 { nvc0_validate_blend_colour, NVC0_NEW_BLEND_COLOUR },
478 { nvc0_validate_stencil_ref, NVC0_NEW_STENCIL_REF },
479 { nvc0_validate_stipple, NVC0_NEW_STIPPLE },
480 { nvc0_validate_scissor, NVC0_NEW_SCISSOR | NVC0_NEW_RASTERIZER },
481 { nvc0_validate_viewport, NVC0_NEW_VIEWPORT },
482 { nvc0_vertprog_validate, NVC0_NEW_VERTPROG },
483 { nvc0_tctlprog_validate, NVC0_NEW_TCTLPROG },
484 { nvc0_tevlprog_validate, NVC0_NEW_TEVLPROG },
485 { nvc0_gmtyprog_validate, NVC0_NEW_GMTYPROG },
486 { nvc0_fragprog_validate, NVC0_NEW_FRAGPROG },
487 { nvc0_validate_derived_1, NVC0_NEW_FRAGPROG | NVC0_NEW_ZSA },
488 { nvc0_validate_clip, NVC0_NEW_CLIP },
489 { nvc0_constbufs_validate, NVC0_NEW_CONSTBUF },
490 { nvc0_validate_textures, NVC0_NEW_TEXTURES },
491 { nvc0_validate_samplers, NVC0_NEW_SAMPLERS },
492 { nvc0_vertex_arrays_validate, NVC0_NEW_VERTEX | NVC0_NEW_ARRAYS },
493 { nvc0_tfb_validate, NVC0_NEW_TFB | NVC0_NEW_TFB_BUFFERS }
494 };
495 #define validate_list_len (sizeof(validate_list) / sizeof(validate_list[0]))
496
497 boolean
498 nvc0_state_validate(struct nvc0_context *nvc0, uint32_t mask, unsigned words)
499 {
500 uint32_t state_mask;
501 unsigned i;
502
503 if (nvc0->screen->cur_ctx != nvc0)
504 nvc0_switch_pipe_context(nvc0);
505
506 state_mask = nvc0->dirty & mask;
507
508 if (state_mask) {
509 for (i = 0; i < validate_list_len; ++i) {
510 struct state_validate *validate = &validate_list[i];
511
512 if (state_mask & validate->states)
513 validate->func(nvc0);
514 }
515 nvc0->dirty &= ~state_mask;
516 }
517
518 MARK_RING(nvc0->screen->base.channel, words, 0);
519
520 nvc0_bufctx_emit_relocs(nvc0);
521
522 return TRUE;
523 }