nvc0: avoid crash on updating RASTERIZE_ENABLE state
[mesa.git] / src / gallium / drivers / nvc0 / nvc0_state_validate.c
1
2 #include "util/u_math.h"
3
4 #include "nvc0_context.h"
5
6 #if 0
7 static void
8 nvc0_validate_zcull(struct nvc0_context *nvc0)
9 {
10 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
11 struct pipe_framebuffer_state *fb = &nvc0->framebuffer;
12 struct nv50_surface *sf = nv50_surface(fb->zsbuf);
13 struct nv50_miptree *mt = nv50_miptree(sf->base.texture);
14 struct nouveau_bo *bo = mt->base.bo;
15 uint32_t size;
16 uint32_t offset = align(mt->total_size, 1 << 17);
17 unsigned width, height;
18
19 assert(mt->base.base.depth0 == 1 && mt->base.base.array_size < 2);
20
21 size = mt->total_size * 2;
22
23 height = align(fb->height, 32);
24 width = fb->width % 224;
25 if (width)
26 width = fb->width + (224 - width);
27 else
28 width = fb->width;
29
30 BEGIN_NVC0(push, NVC0_3D(ZCULL_REGION), 1);
31 PUSH_DATA (push, 0);
32 BEGIN_NVC0(push, NVC0_3D(ZCULL_ADDRESS_HIGH), 2);
33 PUSH_DATAh(push, bo->offset + offset);
34 PUSH_DATA (push, bo->offset + offset);
35 offset += 1 << 17;
36 BEGIN_NVC0(push, NVC0_3D(ZCULL_LIMIT_HIGH), 2);
37 PUSH_DATAh(push, bo->offset + offset);
38 PUSH_DATA (push, bo->offset + offset);
39 BEGIN_NVC0(push, SUBC_3D(0x07e0), 2);
40 PUSH_DATA (push, size);
41 PUSH_DATA (push, size >> 16);
42 BEGIN_NVC0(push, SUBC_3D(0x15c8), 1); /* bits 0x3 */
43 PUSH_DATA (push, 2);
44 BEGIN_NVC0(push, NVC0_3D(ZCULL_WIDTH), 4);
45 PUSH_DATA (push, width);
46 PUSH_DATA (push, height);
47 PUSH_DATA (push, 1);
48 PUSH_DATA (push, 0);
49 BEGIN_NVC0(push, NVC0_3D(ZCULL_WINDOW_OFFSET_X), 2);
50 PUSH_DATA (push, 0);
51 PUSH_DATA (push, 0);
52 BEGIN_NVC0(push, NVC0_3D(ZCULL_INVALIDATE), 1);
53 PUSH_DATA (push, 0);
54 }
55 #endif
56
57 static void
58 nvc0_validate_fb(struct nvc0_context *nvc0)
59 {
60 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
61 struct pipe_framebuffer_state *fb = &nvc0->framebuffer;
62 unsigned i;
63 unsigned ms_mode = NVC0_3D_MULTISAMPLE_MODE_MS1;
64 boolean serialize = FALSE;
65
66 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_FB);
67
68 BEGIN_NVC0(push, NVC0_3D(RT_CONTROL), 1);
69 PUSH_DATA (push, (076543210 << 4) | fb->nr_cbufs);
70 BEGIN_NVC0(push, NVC0_3D(SCREEN_SCISSOR_HORIZ), 2);
71 PUSH_DATA (push, fb->width << 16);
72 PUSH_DATA (push, fb->height << 16);
73
74 for (i = 0; i < fb->nr_cbufs; ++i) {
75 struct nv50_surface *sf = nv50_surface(fb->cbufs[i]);
76 struct nv04_resource *res = nv04_resource(sf->base.texture);
77 struct nouveau_bo *bo = res->bo;
78
79 BEGIN_NVC0(push, NVC0_3D(RT_ADDRESS_HIGH(i)), 9);
80 PUSH_DATAh(push, res->address + sf->offset);
81 PUSH_DATA (push, res->address + sf->offset);
82 if (likely(nouveau_bo_memtype(bo))) {
83 struct nv50_miptree *mt = nv50_miptree(sf->base.texture);
84
85 assert(sf->base.texture->target != PIPE_BUFFER);
86
87 PUSH_DATA(push, sf->width);
88 PUSH_DATA(push, sf->height);
89 PUSH_DATA(push, nvc0_format_table[sf->base.format].rt);
90 PUSH_DATA(push, (mt->layout_3d << 16) |
91 mt->level[sf->base.u.tex.level].tile_mode);
92 PUSH_DATA(push, sf->base.u.tex.first_layer + sf->depth);
93 PUSH_DATA(push, mt->layer_stride >> 2);
94 PUSH_DATA(push, sf->base.u.tex.first_layer);
95
96 ms_mode = mt->ms_mode;
97 } else {
98 if (res->base.target == PIPE_BUFFER) {
99 PUSH_DATA(push, 262144);
100 PUSH_DATA(push, 1);
101 } else {
102 PUSH_DATA(push, nv50_miptree(sf->base.texture)->level[0].pitch);
103 PUSH_DATA(push, sf->height);
104 }
105 PUSH_DATA(push, nvc0_format_table[sf->base.format].rt);
106 PUSH_DATA(push, 1 << 12);
107 PUSH_DATA(push, 1);
108 PUSH_DATA(push, 0);
109 PUSH_DATA(push, 0);
110
111 nvc0_resource_fence(res, NOUVEAU_BO_WR);
112
113 assert(!fb->zsbuf);
114 }
115
116 if (res->status & NOUVEAU_BUFFER_STATUS_GPU_READING)
117 serialize = TRUE;
118 res->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
119 res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
120
121 /* only register for writing, otherwise we'd always serialize here */
122 BCTX_REFN(nvc0->bufctx_3d, FB, res, WR);
123 }
124
125 if (fb->zsbuf) {
126 struct nv50_miptree *mt = nv50_miptree(fb->zsbuf->texture);
127 struct nv50_surface *sf = nv50_surface(fb->zsbuf);
128 int unk = mt->base.base.target == PIPE_TEXTURE_2D;
129
130 BEGIN_NVC0(push, NVC0_3D(ZETA_ADDRESS_HIGH), 5);
131 PUSH_DATAh(push, mt->base.address + sf->offset);
132 PUSH_DATA (push, mt->base.address + sf->offset);
133 PUSH_DATA (push, nvc0_format_table[fb->zsbuf->format].rt);
134 PUSH_DATA (push, mt->level[sf->base.u.tex.level].tile_mode);
135 PUSH_DATA (push, mt->layer_stride >> 2);
136 BEGIN_NVC0(push, NVC0_3D(ZETA_ENABLE), 1);
137 PUSH_DATA (push, 1);
138 BEGIN_NVC0(push, NVC0_3D(ZETA_HORIZ), 3);
139 PUSH_DATA (push, sf->width);
140 PUSH_DATA (push, sf->height);
141 PUSH_DATA (push, (unk << 16) |
142 (sf->base.u.tex.first_layer + sf->depth));
143 BEGIN_NVC0(push, NVC0_3D(ZETA_BASE_LAYER), 1);
144 PUSH_DATA (push, sf->base.u.tex.first_layer);
145
146 ms_mode = mt->ms_mode;
147
148 if (mt->base.status & NOUVEAU_BUFFER_STATUS_GPU_READING)
149 serialize = TRUE;
150 mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
151 mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
152
153 BCTX_REFN(nvc0->bufctx_3d, FB, &mt->base, WR);
154 } else {
155 BEGIN_NVC0(push, NVC0_3D(ZETA_ENABLE), 1);
156 PUSH_DATA (push, 0);
157 }
158
159 IMMED_NVC0(push, NVC0_3D(MULTISAMPLE_MODE), ms_mode);
160
161 if (serialize)
162 IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0);
163 }
164
165 static void
166 nvc0_validate_blend_colour(struct nvc0_context *nvc0)
167 {
168 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
169
170 BEGIN_NVC0(push, NVC0_3D(BLEND_COLOR(0)), 4);
171 PUSH_DATAf(push, nvc0->blend_colour.color[0]);
172 PUSH_DATAf(push, nvc0->blend_colour.color[1]);
173 PUSH_DATAf(push, nvc0->blend_colour.color[2]);
174 PUSH_DATAf(push, nvc0->blend_colour.color[3]);
175 }
176
177 static void
178 nvc0_validate_stencil_ref(struct nvc0_context *nvc0)
179 {
180 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
181 const ubyte *ref = &nvc0->stencil_ref.ref_value[0];
182
183 IMMED_NVC0(push, NVC0_3D(STENCIL_FRONT_FUNC_REF), ref[0]);
184 IMMED_NVC0(push, NVC0_3D(STENCIL_BACK_FUNC_REF), ref[1]);
185 }
186
187 static void
188 nvc0_validate_stipple(struct nvc0_context *nvc0)
189 {
190 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
191 unsigned i;
192
193 BEGIN_NVC0(push, NVC0_3D(POLYGON_STIPPLE_PATTERN(0)), 32);
194 for (i = 0; i < 32; ++i)
195 PUSH_DATA(push, util_bswap32(nvc0->stipple.stipple[i]));
196 }
197
198 static void
199 nvc0_validate_scissor(struct nvc0_context *nvc0)
200 {
201 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
202 struct pipe_scissor_state *s = &nvc0->scissor;
203
204 if (!(nvc0->dirty & NVC0_NEW_SCISSOR) &&
205 nvc0->rast->pipe.scissor == nvc0->state.scissor)
206 return;
207 nvc0->state.scissor = nvc0->rast->pipe.scissor;
208
209 BEGIN_NVC0(push, NVC0_3D(SCISSOR_HORIZ(0)), 2);
210 if (nvc0->rast->pipe.scissor) {
211 PUSH_DATA(push, (s->maxx << 16) | s->minx);
212 PUSH_DATA(push, (s->maxy << 16) | s->miny);
213 } else {
214 PUSH_DATA(push, (0xffff << 16) | 0);
215 PUSH_DATA(push, (0xffff << 16) | 0);
216 }
217 }
218
219 static void
220 nvc0_validate_viewport(struct nvc0_context *nvc0)
221 {
222 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
223 struct pipe_viewport_state *vp = &nvc0->viewport;
224 int x, y, w, h;
225 float zmin, zmax;
226
227 BEGIN_NVC0(push, NVC0_3D(VIEWPORT_TRANSLATE_X(0)), 3);
228 PUSH_DATAf(push, vp->translate[0]);
229 PUSH_DATAf(push, vp->translate[1]);
230 PUSH_DATAf(push, vp->translate[2]);
231 BEGIN_NVC0(push, NVC0_3D(VIEWPORT_SCALE_X(0)), 3);
232 PUSH_DATAf(push, vp->scale[0]);
233 PUSH_DATAf(push, vp->scale[1]);
234 PUSH_DATAf(push, vp->scale[2]);
235
236 /* now set the viewport rectangle to viewport dimensions for clipping */
237
238 x = util_iround(MAX2(0.0f, vp->translate[0] - fabsf(vp->scale[0])));
239 y = util_iround(MAX2(0.0f, vp->translate[1] - fabsf(vp->scale[1])));
240 w = util_iround(vp->translate[0] + fabsf(vp->scale[0])) - x;
241 h = util_iround(vp->translate[1] + fabsf(vp->scale[1])) - y;
242
243 zmin = vp->translate[2] - fabsf(vp->scale[2]);
244 zmax = vp->translate[2] + fabsf(vp->scale[2]);
245
246 BEGIN_NVC0(push, NVC0_3D(VIEWPORT_HORIZ(0)), 2);
247 PUSH_DATA (push, (w << 16) | x);
248 PUSH_DATA (push, (h << 16) | y);
249 BEGIN_NVC0(push, NVC0_3D(DEPTH_RANGE_NEAR(0)), 2);
250 PUSH_DATAf(push, zmin);
251 PUSH_DATAf(push, zmax);
252 }
253
254 static INLINE void
255 nvc0_upload_uclip_planes(struct nvc0_context *nvc0, unsigned s)
256 {
257 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
258 struct nouveau_bo *bo = nvc0->screen->uniform_bo;
259
260 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
261 PUSH_DATA (push, 512);
262 PUSH_DATAh(push, bo->offset + (5 << 16) + (s << 9));
263 PUSH_DATA (push, bo->offset + (5 << 16) + (s << 9));
264 BEGIN_1IC0(push, NVC0_3D(CB_POS), PIPE_MAX_CLIP_PLANES * 4 + 1);
265 PUSH_DATA (push, 256);
266 PUSH_DATAp(push, &nvc0->clip.ucp[0][0], PIPE_MAX_CLIP_PLANES * 4);
267 }
268
269 static INLINE void
270 nvc0_check_program_ucps(struct nvc0_context *nvc0,
271 struct nvc0_program *vp, uint8_t mask)
272 {
273 const unsigned n = util_logbase2(mask) + 1;
274
275 if (vp->vp.num_ucps >= n)
276 return;
277 nvc0_program_destroy(nvc0, vp);
278
279 vp->vp.num_ucps = n;
280 if (likely(vp == nvc0->vertprog))
281 nvc0_vertprog_validate(nvc0);
282 else
283 if (likely(vp == nvc0->gmtyprog))
284 nvc0_vertprog_validate(nvc0);
285 else
286 nvc0_tevlprog_validate(nvc0);
287 }
288
289 static void
290 nvc0_validate_clip(struct nvc0_context *nvc0)
291 {
292 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
293 struct nvc0_program *vp;
294 unsigned stage;
295 uint8_t clip_enable = nvc0->rast->pipe.clip_plane_enable;
296
297 if (nvc0->gmtyprog) {
298 stage = 3;
299 vp = nvc0->gmtyprog;
300 } else
301 if (nvc0->tevlprog) {
302 stage = 2;
303 vp = nvc0->tevlprog;
304 } else {
305 stage = 0;
306 vp = nvc0->vertprog;
307 }
308
309 if (clip_enable && vp->vp.num_ucps < PIPE_MAX_CLIP_PLANES)
310 nvc0_check_program_ucps(nvc0, vp, clip_enable);
311
312 if (nvc0->dirty & (NVC0_NEW_CLIP | (NVC0_NEW_VERTPROG << stage)))
313 if (vp->vp.num_ucps > 0 && vp->vp.num_ucps <= PIPE_MAX_CLIP_PLANES)
314 nvc0_upload_uclip_planes(nvc0, stage);
315
316 clip_enable &= vp->vp.clip_enable;
317
318 if (nvc0->state.clip_enable != clip_enable) {
319 nvc0->state.clip_enable = clip_enable;
320 IMMED_NVC0(push, NVC0_3D(CLIP_DISTANCE_ENABLE), clip_enable);
321 }
322 if (nvc0->state.clip_mode != vp->vp.clip_mode) {
323 nvc0->state.clip_mode = vp->vp.clip_mode;
324 BEGIN_NVC0(push, NVC0_3D(CLIP_DISTANCE_MODE), 1);
325 PUSH_DATA (push, vp->vp.clip_mode);
326 }
327 }
328
329 static void
330 nvc0_validate_blend(struct nvc0_context *nvc0)
331 {
332 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
333
334 PUSH_SPACE(push, nvc0->blend->size);
335 PUSH_DATAp(push, nvc0->blend->state, nvc0->blend->size);
336 }
337
338 static void
339 nvc0_validate_zsa(struct nvc0_context *nvc0)
340 {
341 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
342
343 PUSH_SPACE(push, nvc0->zsa->size);
344 PUSH_DATAp(push, nvc0->zsa->state, nvc0->zsa->size);
345 }
346
347 static void
348 nvc0_validate_rasterizer(struct nvc0_context *nvc0)
349 {
350 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
351
352 PUSH_SPACE(push, nvc0->rast->size);
353 PUSH_DATAp(push, nvc0->rast->state, nvc0->rast->size);
354 }
355
356 static void
357 nvc0_constbufs_validate(struct nvc0_context *nvc0)
358 {
359 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
360 unsigned s;
361
362 for (s = 0; s < 5; ++s) {
363 while (nvc0->constbuf_dirty[s]) {
364 int i = ffs(nvc0->constbuf_dirty[s]) - 1;
365 nvc0->constbuf_dirty[s] &= ~(1 << i);
366
367 if (nvc0->constbuf[s][i].user) {
368 struct nouveau_bo *bo = nvc0->screen->uniform_bo;
369 const unsigned base = s << 16;
370 const unsigned size = nvc0->constbuf[s][0].size;
371 assert(i == 0); /* we really only want OpenGL uniforms here */
372 assert(nvc0->constbuf[s][0].u.data);
373
374 if (nvc0->state.uniform_buffer_bound[s] < size) {
375 nvc0->state.uniform_buffer_bound[s] = align(size, 0x100);
376
377 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
378 PUSH_DATA (push, nvc0->state.uniform_buffer_bound[s]);
379 PUSH_DATAh(push, bo->offset + base);
380 PUSH_DATA (push, bo->offset + base);
381 BEGIN_NVC0(push, NVC0_3D(CB_BIND(s)), 1);
382 PUSH_DATA (push, (0 << 4) | 1);
383 }
384 nvc0_cb_push(&nvc0->base, bo, NOUVEAU_BO_VRAM,
385 base, nvc0->state.uniform_buffer_bound[s],
386 0, (size + 3) / 4,
387 nvc0->constbuf[s][0].u.data);
388 } else {
389 struct nv04_resource *res =
390 nv04_resource(nvc0->constbuf[s][i].u.buf);
391 if (res) {
392 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
393 PUSH_DATA (push, nvc0->constbuf[s][i].size);
394 PUSH_DATAh(push, res->address + nvc0->constbuf[s][i].offset);
395 PUSH_DATA (push, res->address + nvc0->constbuf[s][i].offset);
396 BEGIN_NVC0(push, NVC0_3D(CB_BIND(s)), 1);
397 PUSH_DATA (push, (i << 4) | 1);
398
399 BCTX_REFN(nvc0->bufctx_3d, CB(s, i), res, RD);
400 } else {
401 BEGIN_NVC0(push, NVC0_3D(CB_BIND(s)), 1);
402 PUSH_DATA (push, (i << 4) | 0);
403 }
404 if (i == 0)
405 nvc0->state.uniform_buffer_bound[s] = 0;
406 }
407 }
408 }
409 }
410
411 static void
412 nvc0_validate_sample_mask(struct nvc0_context *nvc0)
413 {
414 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
415
416 unsigned mask[4] =
417 {
418 nvc0->sample_mask & 0xffff,
419 nvc0->sample_mask & 0xffff,
420 nvc0->sample_mask & 0xffff,
421 nvc0->sample_mask & 0xffff
422 };
423
424 BEGIN_NVC0(push, NVC0_3D(MSAA_MASK(0)), 4);
425 PUSH_DATA (push, mask[0]);
426 PUSH_DATA (push, mask[1]);
427 PUSH_DATA (push, mask[2]);
428 PUSH_DATA (push, mask[3]);
429 BEGIN_NVC0(push, NVC0_3D(SAMPLE_SHADING), 1);
430 PUSH_DATA (push, 0x01);
431 }
432
433 void
434 nvc0_validate_global_residents(struct nvc0_context *nvc0,
435 struct nouveau_bufctx *bctx, int bin)
436 {
437 unsigned i;
438
439 for (i = 0; i < nvc0->global_residents.size / sizeof(struct pipe_resource *);
440 ++i) {
441 struct pipe_resource *res = *util_dynarray_element(
442 &nvc0->global_residents, struct pipe_resource *, i);
443 if (res)
444 nvc0_add_resident(bctx, bin, nv04_resource(res), NOUVEAU_BO_RDWR);
445 }
446 }
447
448 static void
449 nvc0_validate_derived_1(struct nvc0_context *nvc0)
450 {
451 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
452 boolean rasterizer_discard;
453
454 if (nvc0->rast && nvc0->rast->pipe.rasterizer_discard) {
455 rasterizer_discard = TRUE;
456 } else {
457 boolean zs = nvc0->zsa &&
458 (nvc0->zsa->pipe.depth.enabled || nvc0->zsa->pipe.stencil[0].enabled);
459 rasterizer_discard = !zs &&
460 (!nvc0->fragprog || !nvc0->fragprog->hdr[18]);
461 }
462
463 if (rasterizer_discard != nvc0->state.rasterizer_discard) {
464 nvc0->state.rasterizer_discard = rasterizer_discard;
465 IMMED_NVC0(push, NVC0_3D(RASTERIZE_ENABLE), !rasterizer_discard);
466 }
467 }
468
469 static void
470 nvc0_switch_pipe_context(struct nvc0_context *ctx_to)
471 {
472 struct nvc0_context *ctx_from = ctx_to->screen->cur_ctx;
473 unsigned s;
474
475 if (ctx_from)
476 ctx_to->state = ctx_from->state;
477
478 ctx_to->dirty = ~0;
479
480 for (s = 0; s < 5; ++s) {
481 ctx_to->samplers_dirty[s] = ~0;
482 ctx_to->textures_dirty[s] = ~0;
483 }
484
485 if (!ctx_to->vertex)
486 ctx_to->dirty &= ~(NVC0_NEW_VERTEX | NVC0_NEW_ARRAYS);
487 if (!ctx_to->idxbuf.buffer)
488 ctx_to->dirty &= ~NVC0_NEW_IDXBUF;
489
490 if (!ctx_to->vertprog)
491 ctx_to->dirty &= ~NVC0_NEW_VERTPROG;
492 if (!ctx_to->fragprog)
493 ctx_to->dirty &= ~NVC0_NEW_FRAGPROG;
494
495 if (!ctx_to->blend)
496 ctx_to->dirty &= ~NVC0_NEW_BLEND;
497 if (!ctx_to->rast)
498 ctx_to->dirty &= ~(NVC0_NEW_RASTERIZER | NVC0_NEW_SCISSOR);
499 if (!ctx_to->zsa)
500 ctx_to->dirty &= ~NVC0_NEW_ZSA;
501
502 ctx_to->screen->cur_ctx = ctx_to;
503 }
504
505 static struct state_validate {
506 void (*func)(struct nvc0_context *);
507 uint32_t states;
508 } validate_list[] = {
509 { nvc0_validate_fb, NVC0_NEW_FRAMEBUFFER },
510 { nvc0_validate_blend, NVC0_NEW_BLEND },
511 { nvc0_validate_zsa, NVC0_NEW_ZSA },
512 { nvc0_validate_sample_mask, NVC0_NEW_SAMPLE_MASK },
513 { nvc0_validate_rasterizer, NVC0_NEW_RASTERIZER },
514 { nvc0_validate_blend_colour, NVC0_NEW_BLEND_COLOUR },
515 { nvc0_validate_stencil_ref, NVC0_NEW_STENCIL_REF },
516 { nvc0_validate_stipple, NVC0_NEW_STIPPLE },
517 { nvc0_validate_scissor, NVC0_NEW_SCISSOR | NVC0_NEW_RASTERIZER },
518 { nvc0_validate_viewport, NVC0_NEW_VIEWPORT },
519 { nvc0_vertprog_validate, NVC0_NEW_VERTPROG },
520 { nvc0_tctlprog_validate, NVC0_NEW_TCTLPROG },
521 { nvc0_tevlprog_validate, NVC0_NEW_TEVLPROG },
522 { nvc0_gmtyprog_validate, NVC0_NEW_GMTYPROG },
523 { nvc0_fragprog_validate, NVC0_NEW_FRAGPROG },
524 { nvc0_validate_derived_1, NVC0_NEW_FRAGPROG | NVC0_NEW_ZSA |
525 NVC0_NEW_RASTERIZER },
526 { nvc0_validate_clip, NVC0_NEW_CLIP | NVC0_NEW_RASTERIZER |
527 NVC0_NEW_VERTPROG |
528 NVC0_NEW_TEVLPROG |
529 NVC0_NEW_GMTYPROG },
530 { nvc0_constbufs_validate, NVC0_NEW_CONSTBUF },
531 { nvc0_validate_textures, NVC0_NEW_TEXTURES },
532 { nvc0_validate_samplers, NVC0_NEW_SAMPLERS },
533 { nve4_set_tex_handles, NVC0_NEW_TEXTURES | NVC0_NEW_SAMPLERS },
534 { nvc0_vertex_arrays_validate, NVC0_NEW_VERTEX | NVC0_NEW_ARRAYS },
535 { nvc0_validate_surfaces, NVC0_NEW_SURFACES },
536 { nvc0_idxbuf_validate, NVC0_NEW_IDXBUF },
537 { nvc0_tfb_validate, NVC0_NEW_TFB_TARGETS | NVC0_NEW_GMTYPROG }
538 };
539 #define validate_list_len (sizeof(validate_list) / sizeof(validate_list[0]))
540
541 boolean
542 nvc0_state_validate(struct nvc0_context *nvc0, uint32_t mask, unsigned words)
543 {
544 uint32_t state_mask;
545 int ret;
546 unsigned i;
547
548 if (nvc0->screen->cur_ctx != nvc0)
549 nvc0_switch_pipe_context(nvc0);
550
551 state_mask = nvc0->dirty & mask;
552
553 if (state_mask) {
554 for (i = 0; i < validate_list_len; ++i) {
555 struct state_validate *validate = &validate_list[i];
556
557 if (state_mask & validate->states)
558 validate->func(nvc0);
559 }
560 nvc0->dirty &= ~state_mask;
561
562 nvc0_bufctx_fence(nvc0, nvc0->bufctx_3d, FALSE);
563 }
564
565 nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx_3d);
566 ret = nouveau_pushbuf_validate(nvc0->base.pushbuf);
567 if (unlikely(ret))
568 return FALSE;
569
570 if (unlikely(nvc0->state.flushed))
571 nvc0_bufctx_fence(nvc0, nvc0->bufctx_3d, TRUE);
572
573 return TRUE;
574 }