i965/gs: implement EndPrimitive() functionality in the visitor.
[mesa.git] / src / gallium / drivers / nvc0 / nvc0_state_validate.c
1
2 #include "util/u_math.h"
3
4 #include "nvc0_context.h"
5
6 #if 0
7 static void
8 nvc0_validate_zcull(struct nvc0_context *nvc0)
9 {
10 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
11 struct pipe_framebuffer_state *fb = &nvc0->framebuffer;
12 struct nv50_surface *sf = nv50_surface(fb->zsbuf);
13 struct nv50_miptree *mt = nv50_miptree(sf->base.texture);
14 struct nouveau_bo *bo = mt->base.bo;
15 uint32_t size;
16 uint32_t offset = align(mt->total_size, 1 << 17);
17 unsigned width, height;
18
19 assert(mt->base.base.depth0 == 1 && mt->base.base.array_size < 2);
20
21 size = mt->total_size * 2;
22
23 height = align(fb->height, 32);
24 width = fb->width % 224;
25 if (width)
26 width = fb->width + (224 - width);
27 else
28 width = fb->width;
29
30 BEGIN_NVC0(push, NVC0_3D(ZCULL_REGION), 1);
31 PUSH_DATA (push, 0);
32 BEGIN_NVC0(push, NVC0_3D(ZCULL_ADDRESS_HIGH), 2);
33 PUSH_DATAh(push, bo->offset + offset);
34 PUSH_DATA (push, bo->offset + offset);
35 offset += 1 << 17;
36 BEGIN_NVC0(push, NVC0_3D(ZCULL_LIMIT_HIGH), 2);
37 PUSH_DATAh(push, bo->offset + offset);
38 PUSH_DATA (push, bo->offset + offset);
39 BEGIN_NVC0(push, SUBC_3D(0x07e0), 2);
40 PUSH_DATA (push, size);
41 PUSH_DATA (push, size >> 16);
42 BEGIN_NVC0(push, SUBC_3D(0x15c8), 1); /* bits 0x3 */
43 PUSH_DATA (push, 2);
44 BEGIN_NVC0(push, NVC0_3D(ZCULL_WIDTH), 4);
45 PUSH_DATA (push, width);
46 PUSH_DATA (push, height);
47 PUSH_DATA (push, 1);
48 PUSH_DATA (push, 0);
49 BEGIN_NVC0(push, NVC0_3D(ZCULL_WINDOW_OFFSET_X), 2);
50 PUSH_DATA (push, 0);
51 PUSH_DATA (push, 0);
52 BEGIN_NVC0(push, NVC0_3D(ZCULL_INVALIDATE), 1);
53 PUSH_DATA (push, 0);
54 }
55 #endif
56
57 static void
58 nvc0_validate_fb(struct nvc0_context *nvc0)
59 {
60 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
61 struct pipe_framebuffer_state *fb = &nvc0->framebuffer;
62 unsigned i;
63 unsigned ms_mode = NVC0_3D_MULTISAMPLE_MODE_MS1;
64 boolean serialize = FALSE;
65
66 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_FB);
67
68 BEGIN_NVC0(push, NVC0_3D(RT_CONTROL), 1);
69 PUSH_DATA (push, (076543210 << 4) | fb->nr_cbufs);
70 BEGIN_NVC0(push, NVC0_3D(SCREEN_SCISSOR_HORIZ), 2);
71 PUSH_DATA (push, fb->width << 16);
72 PUSH_DATA (push, fb->height << 16);
73
74 for (i = 0; i < fb->nr_cbufs; ++i) {
75 struct nv50_surface *sf = nv50_surface(fb->cbufs[i]);
76 struct nv04_resource *res = nv04_resource(sf->base.texture);
77 struct nouveau_bo *bo = res->bo;
78
79 BEGIN_NVC0(push, NVC0_3D(RT_ADDRESS_HIGH(i)), 9);
80 PUSH_DATAh(push, res->address + sf->offset);
81 PUSH_DATA (push, res->address + sf->offset);
82 if (likely(nouveau_bo_memtype(bo))) {
83 struct nv50_miptree *mt = nv50_miptree(sf->base.texture);
84
85 assert(sf->base.texture->target != PIPE_BUFFER);
86
87 PUSH_DATA(push, sf->width);
88 PUSH_DATA(push, sf->height);
89 PUSH_DATA(push, nvc0_format_table[sf->base.format].rt);
90 PUSH_DATA(push, (mt->layout_3d << 16) |
91 mt->level[sf->base.u.tex.level].tile_mode);
92 PUSH_DATA(push, sf->base.u.tex.first_layer + sf->depth);
93 PUSH_DATA(push, mt->layer_stride >> 2);
94 PUSH_DATA(push, sf->base.u.tex.first_layer);
95
96 ms_mode = mt->ms_mode;
97 } else {
98 if (res->base.target == PIPE_BUFFER) {
99 PUSH_DATA(push, 262144);
100 PUSH_DATA(push, 1);
101 } else {
102 PUSH_DATA(push, nv50_miptree(sf->base.texture)->level[0].pitch);
103 PUSH_DATA(push, sf->height);
104 }
105 PUSH_DATA(push, nvc0_format_table[sf->base.format].rt);
106 PUSH_DATA(push, 1 << 12);
107 PUSH_DATA(push, 1);
108 PUSH_DATA(push, 0);
109 PUSH_DATA(push, 0);
110
111 nvc0_resource_fence(res, NOUVEAU_BO_WR);
112
113 assert(!fb->zsbuf);
114 }
115
116 if (res->status & NOUVEAU_BUFFER_STATUS_GPU_READING)
117 serialize = TRUE;
118 res->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
119 res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
120
121 /* only register for writing, otherwise we'd always serialize here */
122 BCTX_REFN(nvc0->bufctx_3d, FB, res, WR);
123 }
124
125 if (fb->zsbuf) {
126 struct nv50_miptree *mt = nv50_miptree(fb->zsbuf->texture);
127 struct nv50_surface *sf = nv50_surface(fb->zsbuf);
128 int unk = mt->base.base.target == PIPE_TEXTURE_2D;
129
130 BEGIN_NVC0(push, NVC0_3D(ZETA_ADDRESS_HIGH), 5);
131 PUSH_DATAh(push, mt->base.address + sf->offset);
132 PUSH_DATA (push, mt->base.address + sf->offset);
133 PUSH_DATA (push, nvc0_format_table[fb->zsbuf->format].rt);
134 PUSH_DATA (push, mt->level[sf->base.u.tex.level].tile_mode);
135 PUSH_DATA (push, mt->layer_stride >> 2);
136 BEGIN_NVC0(push, NVC0_3D(ZETA_ENABLE), 1);
137 PUSH_DATA (push, 1);
138 BEGIN_NVC0(push, NVC0_3D(ZETA_HORIZ), 3);
139 PUSH_DATA (push, sf->width);
140 PUSH_DATA (push, sf->height);
141 PUSH_DATA (push, (unk << 16) |
142 (sf->base.u.tex.first_layer + sf->depth));
143 BEGIN_NVC0(push, NVC0_3D(ZETA_BASE_LAYER), 1);
144 PUSH_DATA (push, sf->base.u.tex.first_layer);
145
146 ms_mode = mt->ms_mode;
147
148 if (mt->base.status & NOUVEAU_BUFFER_STATUS_GPU_READING)
149 serialize = TRUE;
150 mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
151 mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
152
153 BCTX_REFN(nvc0->bufctx_3d, FB, &mt->base, WR);
154 } else {
155 BEGIN_NVC0(push, NVC0_3D(ZETA_ENABLE), 1);
156 PUSH_DATA (push, 0);
157 }
158
159 IMMED_NVC0(push, NVC0_3D(MULTISAMPLE_MODE), ms_mode);
160
161 if (serialize)
162 IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0);
163
164 NOUVEAU_DRV_STAT(&nvc0->screen->base, gpu_serialize_count, serialize);
165 }
166
167 static void
168 nvc0_validate_blend_colour(struct nvc0_context *nvc0)
169 {
170 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
171
172 BEGIN_NVC0(push, NVC0_3D(BLEND_COLOR(0)), 4);
173 PUSH_DATAf(push, nvc0->blend_colour.color[0]);
174 PUSH_DATAf(push, nvc0->blend_colour.color[1]);
175 PUSH_DATAf(push, nvc0->blend_colour.color[2]);
176 PUSH_DATAf(push, nvc0->blend_colour.color[3]);
177 }
178
179 static void
180 nvc0_validate_stencil_ref(struct nvc0_context *nvc0)
181 {
182 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
183 const ubyte *ref = &nvc0->stencil_ref.ref_value[0];
184
185 IMMED_NVC0(push, NVC0_3D(STENCIL_FRONT_FUNC_REF), ref[0]);
186 IMMED_NVC0(push, NVC0_3D(STENCIL_BACK_FUNC_REF), ref[1]);
187 }
188
189 static void
190 nvc0_validate_stipple(struct nvc0_context *nvc0)
191 {
192 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
193 unsigned i;
194
195 BEGIN_NVC0(push, NVC0_3D(POLYGON_STIPPLE_PATTERN(0)), 32);
196 for (i = 0; i < 32; ++i)
197 PUSH_DATA(push, util_bswap32(nvc0->stipple.stipple[i]));
198 }
199
200 static void
201 nvc0_validate_scissor(struct nvc0_context *nvc0)
202 {
203 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
204 struct pipe_scissor_state *s = &nvc0->scissor;
205
206 if (!(nvc0->dirty & NVC0_NEW_SCISSOR) &&
207 nvc0->rast->pipe.scissor == nvc0->state.scissor)
208 return;
209 nvc0->state.scissor = nvc0->rast->pipe.scissor;
210
211 BEGIN_NVC0(push, NVC0_3D(SCISSOR_HORIZ(0)), 2);
212 if (nvc0->rast->pipe.scissor) {
213 PUSH_DATA(push, (s->maxx << 16) | s->minx);
214 PUSH_DATA(push, (s->maxy << 16) | s->miny);
215 } else {
216 PUSH_DATA(push, (0xffff << 16) | 0);
217 PUSH_DATA(push, (0xffff << 16) | 0);
218 }
219 }
220
221 static void
222 nvc0_validate_viewport(struct nvc0_context *nvc0)
223 {
224 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
225 struct pipe_viewport_state *vp = &nvc0->viewport;
226 int x, y, w, h;
227 float zmin, zmax;
228
229 BEGIN_NVC0(push, NVC0_3D(VIEWPORT_TRANSLATE_X(0)), 3);
230 PUSH_DATAf(push, vp->translate[0]);
231 PUSH_DATAf(push, vp->translate[1]);
232 PUSH_DATAf(push, vp->translate[2]);
233 BEGIN_NVC0(push, NVC0_3D(VIEWPORT_SCALE_X(0)), 3);
234 PUSH_DATAf(push, vp->scale[0]);
235 PUSH_DATAf(push, vp->scale[1]);
236 PUSH_DATAf(push, vp->scale[2]);
237
238 /* now set the viewport rectangle to viewport dimensions for clipping */
239
240 x = util_iround(MAX2(0.0f, vp->translate[0] - fabsf(vp->scale[0])));
241 y = util_iround(MAX2(0.0f, vp->translate[1] - fabsf(vp->scale[1])));
242 w = util_iround(vp->translate[0] + fabsf(vp->scale[0])) - x;
243 h = util_iround(vp->translate[1] + fabsf(vp->scale[1])) - y;
244
245 zmin = vp->translate[2] - fabsf(vp->scale[2]);
246 zmax = vp->translate[2] + fabsf(vp->scale[2]);
247
248 nvc0->vport_int[0] = (w << 16) | x;
249 nvc0->vport_int[1] = (h << 16) | y;
250 BEGIN_NVC0(push, NVC0_3D(VIEWPORT_HORIZ(0)), 2);
251 PUSH_DATA (push, nvc0->vport_int[0]);
252 PUSH_DATA (push, nvc0->vport_int[1]);
253 BEGIN_NVC0(push, NVC0_3D(DEPTH_RANGE_NEAR(0)), 2);
254 PUSH_DATAf(push, zmin);
255 PUSH_DATAf(push, zmax);
256 }
257
258 static INLINE void
259 nvc0_upload_uclip_planes(struct nvc0_context *nvc0, unsigned s)
260 {
261 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
262 struct nouveau_bo *bo = nvc0->screen->uniform_bo;
263
264 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
265 PUSH_DATA (push, 512);
266 PUSH_DATAh(push, bo->offset + (5 << 16) + (s << 9));
267 PUSH_DATA (push, bo->offset + (5 << 16) + (s << 9));
268 BEGIN_1IC0(push, NVC0_3D(CB_POS), PIPE_MAX_CLIP_PLANES * 4 + 1);
269 PUSH_DATA (push, 256);
270 PUSH_DATAp(push, &nvc0->clip.ucp[0][0], PIPE_MAX_CLIP_PLANES * 4);
271 }
272
273 static INLINE void
274 nvc0_check_program_ucps(struct nvc0_context *nvc0,
275 struct nvc0_program *vp, uint8_t mask)
276 {
277 const unsigned n = util_logbase2(mask) + 1;
278
279 if (vp->vp.num_ucps >= n)
280 return;
281 nvc0_program_destroy(nvc0, vp);
282
283 vp->vp.num_ucps = n;
284 if (likely(vp == nvc0->vertprog))
285 nvc0_vertprog_validate(nvc0);
286 else
287 if (likely(vp == nvc0->gmtyprog))
288 nvc0_vertprog_validate(nvc0);
289 else
290 nvc0_tevlprog_validate(nvc0);
291 }
292
293 static void
294 nvc0_validate_clip(struct nvc0_context *nvc0)
295 {
296 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
297 struct nvc0_program *vp;
298 unsigned stage;
299 uint8_t clip_enable = nvc0->rast->pipe.clip_plane_enable;
300
301 if (nvc0->gmtyprog) {
302 stage = 3;
303 vp = nvc0->gmtyprog;
304 } else
305 if (nvc0->tevlprog) {
306 stage = 2;
307 vp = nvc0->tevlprog;
308 } else {
309 stage = 0;
310 vp = nvc0->vertprog;
311 }
312
313 if (clip_enable && vp->vp.num_ucps < PIPE_MAX_CLIP_PLANES)
314 nvc0_check_program_ucps(nvc0, vp, clip_enable);
315
316 if (nvc0->dirty & (NVC0_NEW_CLIP | (NVC0_NEW_VERTPROG << stage)))
317 if (vp->vp.num_ucps > 0 && vp->vp.num_ucps <= PIPE_MAX_CLIP_PLANES)
318 nvc0_upload_uclip_planes(nvc0, stage);
319
320 clip_enable &= vp->vp.clip_enable;
321
322 if (nvc0->state.clip_enable != clip_enable) {
323 nvc0->state.clip_enable = clip_enable;
324 IMMED_NVC0(push, NVC0_3D(CLIP_DISTANCE_ENABLE), clip_enable);
325 }
326 if (nvc0->state.clip_mode != vp->vp.clip_mode) {
327 nvc0->state.clip_mode = vp->vp.clip_mode;
328 BEGIN_NVC0(push, NVC0_3D(CLIP_DISTANCE_MODE), 1);
329 PUSH_DATA (push, vp->vp.clip_mode);
330 }
331 }
332
333 static void
334 nvc0_validate_blend(struct nvc0_context *nvc0)
335 {
336 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
337
338 PUSH_SPACE(push, nvc0->blend->size);
339 PUSH_DATAp(push, nvc0->blend->state, nvc0->blend->size);
340 }
341
342 static void
343 nvc0_validate_zsa(struct nvc0_context *nvc0)
344 {
345 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
346
347 PUSH_SPACE(push, nvc0->zsa->size);
348 PUSH_DATAp(push, nvc0->zsa->state, nvc0->zsa->size);
349 }
350
351 static void
352 nvc0_validate_rasterizer(struct nvc0_context *nvc0)
353 {
354 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
355
356 PUSH_SPACE(push, nvc0->rast->size);
357 PUSH_DATAp(push, nvc0->rast->state, nvc0->rast->size);
358 }
359
360 static void
361 nvc0_constbufs_validate(struct nvc0_context *nvc0)
362 {
363 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
364 unsigned s;
365
366 for (s = 0; s < 5; ++s) {
367 while (nvc0->constbuf_dirty[s]) {
368 int i = ffs(nvc0->constbuf_dirty[s]) - 1;
369 nvc0->constbuf_dirty[s] &= ~(1 << i);
370
371 if (nvc0->constbuf[s][i].user) {
372 struct nouveau_bo *bo = nvc0->screen->uniform_bo;
373 const unsigned base = s << 16;
374 const unsigned size = nvc0->constbuf[s][0].size;
375 assert(i == 0); /* we really only want OpenGL uniforms here */
376 assert(nvc0->constbuf[s][0].u.data);
377
378 if (nvc0->state.uniform_buffer_bound[s] < size) {
379 nvc0->state.uniform_buffer_bound[s] = align(size, 0x100);
380
381 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
382 PUSH_DATA (push, nvc0->state.uniform_buffer_bound[s]);
383 PUSH_DATAh(push, bo->offset + base);
384 PUSH_DATA (push, bo->offset + base);
385 BEGIN_NVC0(push, NVC0_3D(CB_BIND(s)), 1);
386 PUSH_DATA (push, (0 << 4) | 1);
387 }
388 nvc0_cb_push(&nvc0->base, bo, NOUVEAU_BO_VRAM,
389 base, nvc0->state.uniform_buffer_bound[s],
390 0, (size + 3) / 4,
391 nvc0->constbuf[s][0].u.data);
392 } else {
393 struct nv04_resource *res =
394 nv04_resource(nvc0->constbuf[s][i].u.buf);
395 if (res) {
396 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
397 PUSH_DATA (push, nvc0->constbuf[s][i].size);
398 PUSH_DATAh(push, res->address + nvc0->constbuf[s][i].offset);
399 PUSH_DATA (push, res->address + nvc0->constbuf[s][i].offset);
400 BEGIN_NVC0(push, NVC0_3D(CB_BIND(s)), 1);
401 PUSH_DATA (push, (i << 4) | 1);
402
403 BCTX_REFN(nvc0->bufctx_3d, CB(s, i), res, RD);
404 } else {
405 BEGIN_NVC0(push, NVC0_3D(CB_BIND(s)), 1);
406 PUSH_DATA (push, (i << 4) | 0);
407 }
408 if (i == 0)
409 nvc0->state.uniform_buffer_bound[s] = 0;
410 }
411 }
412 }
413 }
414
415 static void
416 nvc0_validate_sample_mask(struct nvc0_context *nvc0)
417 {
418 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
419
420 unsigned mask[4] =
421 {
422 nvc0->sample_mask & 0xffff,
423 nvc0->sample_mask & 0xffff,
424 nvc0->sample_mask & 0xffff,
425 nvc0->sample_mask & 0xffff
426 };
427
428 BEGIN_NVC0(push, NVC0_3D(MSAA_MASK(0)), 4);
429 PUSH_DATA (push, mask[0]);
430 PUSH_DATA (push, mask[1]);
431 PUSH_DATA (push, mask[2]);
432 PUSH_DATA (push, mask[3]);
433 BEGIN_NVC0(push, NVC0_3D(SAMPLE_SHADING), 1);
434 PUSH_DATA (push, 0x01);
435 }
436
437 void
438 nvc0_validate_global_residents(struct nvc0_context *nvc0,
439 struct nouveau_bufctx *bctx, int bin)
440 {
441 unsigned i;
442
443 for (i = 0; i < nvc0->global_residents.size / sizeof(struct pipe_resource *);
444 ++i) {
445 struct pipe_resource *res = *util_dynarray_element(
446 &nvc0->global_residents, struct pipe_resource *, i);
447 if (res)
448 nvc0_add_resident(bctx, bin, nv04_resource(res), NOUVEAU_BO_RDWR);
449 }
450 }
451
452 static void
453 nvc0_validate_derived_1(struct nvc0_context *nvc0)
454 {
455 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
456 boolean rasterizer_discard;
457
458 if (nvc0->rast && nvc0->rast->pipe.rasterizer_discard) {
459 rasterizer_discard = TRUE;
460 } else {
461 boolean zs = nvc0->zsa &&
462 (nvc0->zsa->pipe.depth.enabled || nvc0->zsa->pipe.stencil[0].enabled);
463 rasterizer_discard = !zs &&
464 (!nvc0->fragprog || !nvc0->fragprog->hdr[18]);
465 }
466
467 if (rasterizer_discard != nvc0->state.rasterizer_discard) {
468 nvc0->state.rasterizer_discard = rasterizer_discard;
469 IMMED_NVC0(push, NVC0_3D(RASTERIZE_ENABLE), !rasterizer_discard);
470 }
471 }
472
473 static void
474 nvc0_switch_pipe_context(struct nvc0_context *ctx_to)
475 {
476 struct nvc0_context *ctx_from = ctx_to->screen->cur_ctx;
477 unsigned s;
478
479 if (ctx_from)
480 ctx_to->state = ctx_from->state;
481
482 ctx_to->dirty = ~0;
483
484 for (s = 0; s < 5; ++s) {
485 ctx_to->samplers_dirty[s] = ~0;
486 ctx_to->textures_dirty[s] = ~0;
487 }
488
489 if (!ctx_to->vertex)
490 ctx_to->dirty &= ~(NVC0_NEW_VERTEX | NVC0_NEW_ARRAYS);
491 if (!ctx_to->idxbuf.buffer)
492 ctx_to->dirty &= ~NVC0_NEW_IDXBUF;
493
494 if (!ctx_to->vertprog)
495 ctx_to->dirty &= ~NVC0_NEW_VERTPROG;
496 if (!ctx_to->fragprog)
497 ctx_to->dirty &= ~NVC0_NEW_FRAGPROG;
498
499 if (!ctx_to->blend)
500 ctx_to->dirty &= ~NVC0_NEW_BLEND;
501 if (!ctx_to->rast)
502 ctx_to->dirty &= ~(NVC0_NEW_RASTERIZER | NVC0_NEW_SCISSOR);
503 if (!ctx_to->zsa)
504 ctx_to->dirty &= ~NVC0_NEW_ZSA;
505
506 ctx_to->screen->cur_ctx = ctx_to;
507 }
508
509 static struct state_validate {
510 void (*func)(struct nvc0_context *);
511 uint32_t states;
512 } validate_list[] = {
513 { nvc0_validate_fb, NVC0_NEW_FRAMEBUFFER },
514 { nvc0_validate_blend, NVC0_NEW_BLEND },
515 { nvc0_validate_zsa, NVC0_NEW_ZSA },
516 { nvc0_validate_sample_mask, NVC0_NEW_SAMPLE_MASK },
517 { nvc0_validate_rasterizer, NVC0_NEW_RASTERIZER },
518 { nvc0_validate_blend_colour, NVC0_NEW_BLEND_COLOUR },
519 { nvc0_validate_stencil_ref, NVC0_NEW_STENCIL_REF },
520 { nvc0_validate_stipple, NVC0_NEW_STIPPLE },
521 { nvc0_validate_scissor, NVC0_NEW_SCISSOR | NVC0_NEW_RASTERIZER },
522 { nvc0_validate_viewport, NVC0_NEW_VIEWPORT },
523 { nvc0_vertprog_validate, NVC0_NEW_VERTPROG },
524 { nvc0_tctlprog_validate, NVC0_NEW_TCTLPROG },
525 { nvc0_tevlprog_validate, NVC0_NEW_TEVLPROG },
526 { nvc0_gmtyprog_validate, NVC0_NEW_GMTYPROG },
527 { nvc0_fragprog_validate, NVC0_NEW_FRAGPROG },
528 { nvc0_validate_derived_1, NVC0_NEW_FRAGPROG | NVC0_NEW_ZSA |
529 NVC0_NEW_RASTERIZER },
530 { nvc0_validate_clip, NVC0_NEW_CLIP | NVC0_NEW_RASTERIZER |
531 NVC0_NEW_VERTPROG |
532 NVC0_NEW_TEVLPROG |
533 NVC0_NEW_GMTYPROG },
534 { nvc0_constbufs_validate, NVC0_NEW_CONSTBUF },
535 { nvc0_validate_textures, NVC0_NEW_TEXTURES },
536 { nvc0_validate_samplers, NVC0_NEW_SAMPLERS },
537 { nve4_set_tex_handles, NVC0_NEW_TEXTURES | NVC0_NEW_SAMPLERS },
538 { nvc0_vertex_arrays_validate, NVC0_NEW_VERTEX | NVC0_NEW_ARRAYS },
539 { nvc0_validate_surfaces, NVC0_NEW_SURFACES },
540 { nvc0_idxbuf_validate, NVC0_NEW_IDXBUF },
541 { nvc0_tfb_validate, NVC0_NEW_TFB_TARGETS | NVC0_NEW_GMTYPROG }
542 };
543 #define validate_list_len (sizeof(validate_list) / sizeof(validate_list[0]))
544
545 boolean
546 nvc0_state_validate(struct nvc0_context *nvc0, uint32_t mask, unsigned words)
547 {
548 uint32_t state_mask;
549 int ret;
550 unsigned i;
551
552 if (nvc0->screen->cur_ctx != nvc0)
553 nvc0_switch_pipe_context(nvc0);
554
555 state_mask = nvc0->dirty & mask;
556
557 if (state_mask) {
558 for (i = 0; i < validate_list_len; ++i) {
559 struct state_validate *validate = &validate_list[i];
560
561 if (state_mask & validate->states)
562 validate->func(nvc0);
563 }
564 nvc0->dirty &= ~state_mask;
565
566 nvc0_bufctx_fence(nvc0, nvc0->bufctx_3d, FALSE);
567 }
568
569 nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx_3d);
570 ret = nouveau_pushbuf_validate(nvc0->base.pushbuf);
571
572 if (unlikely(nvc0->state.flushed)) {
573 nvc0->state.flushed = FALSE;
574 nvc0_bufctx_fence(nvc0, nvc0->bufctx_3d, TRUE);
575 }
576 return !ret;
577 }