gallium: remove redundant nr_components field from pipe_vertex_element
[mesa.git] / src / gallium / drivers / nv50 / nv50_vbo.c
1 /*
2 * Copyright 2008 Ben Skeggs
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include "pipe/p_context.h"
24 #include "pipe/p_state.h"
25 #include "util/u_inlines.h"
26 #include "util/u_format.h"
27
28 #include "nv50_context.h"
29
30 static boolean
31 nv50_push_elements_u08(struct nv50_context *, uint8_t *, unsigned);
32
33 static boolean
34 nv50_push_elements_u16(struct nv50_context *, uint16_t *, unsigned);
35
36 static boolean
37 nv50_push_elements_u32(struct nv50_context *, uint32_t *, unsigned);
38
39 static boolean
40 nv50_push_arrays(struct nv50_context *, unsigned, unsigned);
41
42 #define NV50_USING_LOATHED_EDGEFLAG(ctx) ((ctx)->vertprog->cfg.edgeflag_in < 16)
43
44 static INLINE unsigned
45 nv50_prim(unsigned mode)
46 {
47 switch (mode) {
48 case PIPE_PRIM_POINTS: return NV50TCL_VERTEX_BEGIN_POINTS;
49 case PIPE_PRIM_LINES: return NV50TCL_VERTEX_BEGIN_LINES;
50 case PIPE_PRIM_LINE_LOOP: return NV50TCL_VERTEX_BEGIN_LINE_LOOP;
51 case PIPE_PRIM_LINE_STRIP: return NV50TCL_VERTEX_BEGIN_LINE_STRIP;
52 case PIPE_PRIM_TRIANGLES: return NV50TCL_VERTEX_BEGIN_TRIANGLES;
53 case PIPE_PRIM_TRIANGLE_STRIP:
54 return NV50TCL_VERTEX_BEGIN_TRIANGLE_STRIP;
55 case PIPE_PRIM_TRIANGLE_FAN: return NV50TCL_VERTEX_BEGIN_TRIANGLE_FAN;
56 case PIPE_PRIM_QUADS: return NV50TCL_VERTEX_BEGIN_QUADS;
57 case PIPE_PRIM_QUAD_STRIP: return NV50TCL_VERTEX_BEGIN_QUAD_STRIP;
58 case PIPE_PRIM_POLYGON: return NV50TCL_VERTEX_BEGIN_POLYGON;
59 case PIPE_PRIM_LINES_ADJACENCY:
60 return NV50TCL_VERTEX_BEGIN_LINES_ADJACENCY;
61 case PIPE_PRIM_LINE_STRIP_ADJACENCY:
62 return NV50TCL_VERTEX_BEGIN_LINE_STRIP_ADJACENCY;
63 case PIPE_PRIM_TRIANGLES_ADJACENCY:
64 return NV50TCL_VERTEX_BEGIN_TRIANGLES_ADJACENCY;
65 case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY:
66 return NV50TCL_VERTEX_BEGIN_TRIANGLE_STRIP_ADJACENCY;
67 default:
68 break;
69 }
70
71 NOUVEAU_ERR("invalid primitive type %d\n", mode);
72 return NV50TCL_VERTEX_BEGIN_POINTS;
73 }
74
75 static INLINE uint32_t
76 nv50_vbo_type_to_hw(enum pipe_format format)
77 {
78 const struct util_format_description *desc;
79
80 desc = util_format_description(format);
81 assert(desc);
82
83 switch (desc->channel[0].type) {
84 case UTIL_FORMAT_TYPE_FLOAT:
85 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_FLOAT;
86 case UTIL_FORMAT_TYPE_UNSIGNED:
87 if (desc->channel[0].normalized) {
88 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UNORM;
89 }
90 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_USCALED;
91 case UTIL_FORMAT_TYPE_SIGNED:
92 if (desc->channel[0].normalized) {
93 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SNORM;
94 }
95 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SSCALED;
96 /*
97 case PIPE_FORMAT_TYPE_UINT:
98 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UINT;
99 case PIPE_FORMAT_TYPE_SINT:
100 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SINT; */
101 default:
102 return 0;
103 }
104 }
105
106 static INLINE uint32_t
107 nv50_vbo_size_to_hw(unsigned size, unsigned nr_c)
108 {
109 static const uint32_t hw_values[] = {
110 0, 0, 0, 0,
111 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8,
112 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8,
113 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8_8,
114 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8_8_8,
115 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16,
116 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16,
117 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16_16,
118 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16_16_16,
119 0, 0, 0, 0,
120 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32,
121 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32,
122 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32_32,
123 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32_32_32 };
124
125 /* we'd also have R11G11B10 and R10G10B10A2 */
126
127 assert(nr_c > 0 && nr_c <= 4);
128
129 if (size > 32)
130 return 0;
131 size >>= (3 - 2);
132
133 return hw_values[size + (nr_c - 1)];
134 }
135
136 static INLINE uint32_t
137 nv50_vbo_vtxelt_to_hw(struct pipe_vertex_element *ve)
138 {
139 uint32_t hw_type, hw_size;
140 enum pipe_format pf = ve->src_format;
141 const struct util_format_description *desc;
142 unsigned size, nr_components;
143
144 desc = util_format_description(pf);
145 assert(desc);
146
147 size = util_format_get_component_bits(pf, UTIL_FORMAT_COLORSPACE_RGB, 0);
148 nr_components = util_format_get_nr_components(pf);
149
150 hw_type = nv50_vbo_type_to_hw(pf);
151 hw_size = nv50_vbo_size_to_hw(size, nr_components);
152
153 if (!hw_type || !hw_size) {
154 NOUVEAU_ERR("unsupported vbo format: %s\n", util_format_name(pf));
155 abort();
156 return 0x24e80000;
157 }
158
159 if (desc->swizzle[0] == UTIL_FORMAT_SWIZZLE_Z) /* BGRA */
160 hw_size |= (1 << 31); /* no real swizzle bits :-( */
161
162 return (hw_type | hw_size);
163 }
164
165 /* For instanced drawing from user buffers, hitting the FIFO repeatedly
166 * with the same vertex data is probably worse than uploading all data.
167 */
168 static boolean
169 nv50_upload_vtxbuf(struct nv50_context *nv50, unsigned i)
170 {
171 struct nv50_screen *nscreen = nv50->screen;
172 struct pipe_screen *pscreen = &nscreen->base.base;
173 struct pipe_buffer *buf = nscreen->strm_vbuf[i];
174 struct pipe_vertex_buffer *vb = &nv50->vtxbuf[i];
175 uint8_t *src;
176 unsigned size = align(vb->buffer->size, 4096);
177
178 if (buf && buf->size < size)
179 pipe_buffer_reference(&nscreen->strm_vbuf[i], NULL);
180
181 if (!nscreen->strm_vbuf[i]) {
182 nscreen->strm_vbuf[i] = pipe_buffer_create(
183 pscreen, 0, PIPE_BUFFER_USAGE_VERTEX, size);
184 buf = nscreen->strm_vbuf[i];
185 }
186
187 src = pipe_buffer_map(pscreen, vb->buffer, PIPE_BUFFER_USAGE_CPU_READ);
188 if (!src)
189 return FALSE;
190 src += vb->buffer_offset;
191
192 size = (vb->max_index + 1) * vb->stride + 16; /* + 16 is for stride 0 */
193 if (vb->buffer_offset + size > vb->buffer->size)
194 size = vb->buffer->size - vb->buffer_offset;
195
196 pipe_buffer_write(pscreen, buf, vb->buffer_offset, size, src);
197 pipe_buffer_unmap(pscreen, vb->buffer);
198
199 vb->buffer = buf; /* don't pipe_reference, this is a private copy */
200 return TRUE;
201 }
202
203 static void
204 nv50_upload_user_vbufs(struct nv50_context *nv50)
205 {
206 unsigned i;
207
208 if (nv50->vbo_fifo)
209 nv50->dirty |= NV50_NEW_ARRAYS;
210 if (!(nv50->dirty & NV50_NEW_ARRAYS))
211 return;
212
213 for (i = 0; i < nv50->vtxbuf_nr; ++i) {
214 if (nv50->vtxbuf[i].buffer->usage & PIPE_BUFFER_USAGE_VERTEX)
215 continue;
216 nv50_upload_vtxbuf(nv50, i);
217 }
218 }
219
220 static void
221 nv50_set_static_vtxattr(struct nv50_context *nv50, unsigned i, void *data)
222 {
223 struct nouveau_grobj *tesla = nv50->screen->tesla;
224 struct nouveau_channel *chan = tesla->channel;
225 float v[4];
226 unsigned nr_components = util_format_get_nr_components(nv50->vtxelt[i].src_format);
227
228
229 util_format_read_4f(nv50->vtxelt[i].src_format,
230 v, 0, data, 0, 0, 0, 1, 1);
231
232 switch (nr_components) {
233 case 4:
234 BEGIN_RING(chan, tesla, NV50TCL_VTX_ATTR_4F_X(i), 4);
235 OUT_RINGf (chan, v[0]);
236 OUT_RINGf (chan, v[1]);
237 OUT_RINGf (chan, v[2]);
238 OUT_RINGf (chan, v[3]);
239 break;
240 case 3:
241 BEGIN_RING(chan, tesla, NV50TCL_VTX_ATTR_3F_X(i), 3);
242 OUT_RINGf (chan, v[0]);
243 OUT_RINGf (chan, v[1]);
244 OUT_RINGf (chan, v[2]);
245 break;
246 case 2:
247 BEGIN_RING(chan, tesla, NV50TCL_VTX_ATTR_2F_X(i), 2);
248 OUT_RINGf (chan, v[0]);
249 OUT_RINGf (chan, v[1]);
250 break;
251 case 1:
252 BEGIN_RING(chan, tesla, NV50TCL_VTX_ATTR_1F(i), 1);
253 OUT_RINGf (chan, v[0]);
254 break;
255 default:
256 assert(0);
257 break;
258 }
259 }
260
261 static unsigned
262 init_per_instance_arrays_immd(struct nv50_context *nv50,
263 unsigned startInstance,
264 unsigned pos[16], unsigned step[16])
265 {
266 struct nouveau_bo *bo;
267 unsigned i, b, count = 0;
268
269 for (i = 0; i < nv50->vtxelt_nr; ++i) {
270 if (!nv50->vtxelt[i].instance_divisor)
271 continue;
272 ++count;
273 b = nv50->vtxelt[i].vertex_buffer_index;
274
275 pos[i] = nv50->vtxelt[i].src_offset +
276 nv50->vtxbuf[b].buffer_offset +
277 startInstance * nv50->vtxbuf[b].stride;
278 step[i] = startInstance % nv50->vtxelt[i].instance_divisor;
279
280 bo = nouveau_bo(nv50->vtxbuf[b].buffer);
281 if (!bo->map)
282 nouveau_bo_map(bo, NOUVEAU_BO_RD);
283
284 nv50_set_static_vtxattr(nv50, i, (uint8_t *)bo->map + pos[i]);
285 }
286
287 return count;
288 }
289
290 static unsigned
291 init_per_instance_arrays(struct nv50_context *nv50,
292 unsigned startInstance,
293 unsigned pos[16], unsigned step[16])
294 {
295 struct nouveau_grobj *tesla = nv50->screen->tesla;
296 struct nouveau_channel *chan = tesla->channel;
297 struct nouveau_bo *bo;
298 struct nouveau_stateobj *so;
299 unsigned i, b, count = 0;
300 const uint32_t rl = NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD;
301
302 if (nv50->vbo_fifo)
303 return init_per_instance_arrays_immd(nv50, startInstance,
304 pos, step);
305
306 so = so_new(nv50->vtxelt_nr, nv50->vtxelt_nr * 2, nv50->vtxelt_nr * 2);
307
308 for (i = 0; i < nv50->vtxelt_nr; ++i) {
309 if (!nv50->vtxelt[i].instance_divisor)
310 continue;
311 ++count;
312 b = nv50->vtxelt[i].vertex_buffer_index;
313
314 pos[i] = nv50->vtxelt[i].src_offset +
315 nv50->vtxbuf[b].buffer_offset +
316 startInstance * nv50->vtxbuf[b].stride;
317
318 if (!startInstance) {
319 step[i] = 0;
320 continue;
321 }
322 step[i] = startInstance % nv50->vtxelt[i].instance_divisor;
323
324 bo = nouveau_bo(nv50->vtxbuf[b].buffer);
325
326 so_method(so, tesla, NV50TCL_VERTEX_ARRAY_START_HIGH(i), 2);
327 so_reloc (so, bo, pos[i], rl | NOUVEAU_BO_HIGH, 0, 0);
328 so_reloc (so, bo, pos[i], rl | NOUVEAU_BO_LOW, 0, 0);
329 }
330
331 if (count && startInstance) {
332 so_ref (so, &nv50->state.instbuf); /* for flush notify */
333 so_emit(chan, nv50->state.instbuf);
334 }
335 so_ref (NULL, &so);
336
337 return count;
338 }
339
340 static void
341 step_per_instance_arrays_immd(struct nv50_context *nv50,
342 unsigned pos[16], unsigned step[16])
343 {
344 struct nouveau_bo *bo;
345 unsigned i, b;
346
347 for (i = 0; i < nv50->vtxelt_nr; ++i) {
348 if (!nv50->vtxelt[i].instance_divisor)
349 continue;
350 if (++step[i] != nv50->vtxelt[i].instance_divisor)
351 continue;
352 b = nv50->vtxelt[i].vertex_buffer_index;
353 bo = nouveau_bo(nv50->vtxbuf[b].buffer);
354
355 step[i] = 0;
356 pos[i] += nv50->vtxbuf[b].stride;
357
358 nv50_set_static_vtxattr(nv50, i, (uint8_t *)bo->map + pos[i]);
359 }
360 }
361
362 static void
363 step_per_instance_arrays(struct nv50_context *nv50,
364 unsigned pos[16], unsigned step[16])
365 {
366 struct nouveau_grobj *tesla = nv50->screen->tesla;
367 struct nouveau_channel *chan = tesla->channel;
368 struct nouveau_bo *bo;
369 struct nouveau_stateobj *so;
370 unsigned i, b;
371 const uint32_t rl = NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD;
372
373 if (nv50->vbo_fifo) {
374 step_per_instance_arrays_immd(nv50, pos, step);
375 return;
376 }
377
378 so = so_new(nv50->vtxelt_nr, nv50->vtxelt_nr * 2, nv50->vtxelt_nr * 2);
379
380 for (i = 0; i < nv50->vtxelt_nr; ++i) {
381 if (!nv50->vtxelt[i].instance_divisor)
382 continue;
383 b = nv50->vtxelt[i].vertex_buffer_index;
384
385 if (++step[i] == nv50->vtxelt[i].instance_divisor) {
386 step[i] = 0;
387 pos[i] += nv50->vtxbuf[b].stride;
388 }
389
390 bo = nouveau_bo(nv50->vtxbuf[b].buffer);
391
392 so_method(so, tesla, NV50TCL_VERTEX_ARRAY_START_HIGH(i), 2);
393 so_reloc (so, bo, pos[i], rl | NOUVEAU_BO_HIGH, 0, 0);
394 so_reloc (so, bo, pos[i], rl | NOUVEAU_BO_LOW, 0, 0);
395 }
396
397 so_ref (so, &nv50->state.instbuf); /* for flush notify */
398 so_ref (NULL, &so);
399
400 so_emit(chan, nv50->state.instbuf);
401 }
402
403 static INLINE void
404 nv50_unmap_vbufs(struct nv50_context *nv50)
405 {
406 unsigned i;
407
408 for (i = 0; i < nv50->vtxbuf_nr; ++i)
409 if (nouveau_bo(nv50->vtxbuf[i].buffer)->map)
410 nouveau_bo_unmap(nouveau_bo(nv50->vtxbuf[i].buffer));
411 }
412
413 void
414 nv50_draw_arrays_instanced(struct pipe_context *pipe,
415 unsigned mode, unsigned start, unsigned count,
416 unsigned startInstance, unsigned instanceCount)
417 {
418 struct nv50_context *nv50 = nv50_context(pipe);
419 struct nouveau_channel *chan = nv50->screen->tesla->channel;
420 struct nouveau_grobj *tesla = nv50->screen->tesla;
421 unsigned i, nz_divisors;
422 unsigned step[16], pos[16];
423
424 if (!NV50_USING_LOATHED_EDGEFLAG(nv50))
425 nv50_upload_user_vbufs(nv50);
426
427 nv50_state_validate(nv50);
428
429 nz_divisors = init_per_instance_arrays(nv50, startInstance, pos, step);
430
431 BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2);
432 OUT_RING (chan, NV50_CB_AUX | (24 << 8));
433 OUT_RING (chan, startInstance);
434
435 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
436 OUT_RING (chan, nv50_prim(mode));
437
438 if (nv50->vbo_fifo)
439 nv50_push_arrays(nv50, start, count);
440 else {
441 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BUFFER_FIRST, 2);
442 OUT_RING (chan, start);
443 OUT_RING (chan, count);
444 }
445 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
446 OUT_RING (chan, 0);
447
448 for (i = 1; i < instanceCount; i++) {
449 if (nz_divisors) /* any non-zero array divisors ? */
450 step_per_instance_arrays(nv50, pos, step);
451
452 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
453 OUT_RING (chan, nv50_prim(mode) | (1 << 28));
454
455 if (nv50->vbo_fifo)
456 nv50_push_arrays(nv50, start, count);
457 else {
458 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BUFFER_FIRST, 2);
459 OUT_RING (chan, start);
460 OUT_RING (chan, count);
461 }
462 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
463 OUT_RING (chan, 0);
464 }
465 nv50_unmap_vbufs(nv50);
466
467 so_ref(NULL, &nv50->state.instbuf);
468 }
469
470 void
471 nv50_draw_arrays(struct pipe_context *pipe, unsigned mode, unsigned start,
472 unsigned count)
473 {
474 struct nv50_context *nv50 = nv50_context(pipe);
475 struct nouveau_channel *chan = nv50->screen->tesla->channel;
476 struct nouveau_grobj *tesla = nv50->screen->tesla;
477 boolean ret;
478
479 nv50_state_validate(nv50);
480
481 BEGIN_RING(chan, tesla, 0x142c, 1);
482 OUT_RING (chan, 0);
483 BEGIN_RING(chan, tesla, 0x142c, 1);
484 OUT_RING (chan, 0);
485
486 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
487 OUT_RING (chan, nv50_prim(mode));
488
489 if (nv50->vbo_fifo)
490 ret = nv50_push_arrays(nv50, start, count);
491 else {
492 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BUFFER_FIRST, 2);
493 OUT_RING (chan, start);
494 OUT_RING (chan, count);
495 ret = TRUE;
496 }
497 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
498 OUT_RING (chan, 0);
499
500 nv50_unmap_vbufs(nv50);
501
502 /* XXX: not sure what to do if ret != TRUE: flush and retry?
503 */
504 assert(ret);
505 }
506
507 static INLINE boolean
508 nv50_draw_elements_inline_u08(struct nv50_context *nv50, uint8_t *map,
509 unsigned start, unsigned count)
510 {
511 struct nouveau_channel *chan = nv50->screen->tesla->channel;
512 struct nouveau_grobj *tesla = nv50->screen->tesla;
513
514 map += start;
515
516 if (nv50->vbo_fifo)
517 return nv50_push_elements_u08(nv50, map, count);
518
519 if (count & 1) {
520 BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32, 1);
521 OUT_RING (chan, map[0]);
522 map++;
523 count--;
524 }
525
526 while (count) {
527 unsigned nr = count > 2046 ? 2046 : count;
528 int i;
529
530 BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U16, nr >> 1);
531 for (i = 0; i < nr; i += 2)
532 OUT_RING (chan, (map[i + 1] << 16) | map[i]);
533
534 count -= nr;
535 map += nr;
536 }
537 return TRUE;
538 }
539
540 static INLINE boolean
541 nv50_draw_elements_inline_u16(struct nv50_context *nv50, uint16_t *map,
542 unsigned start, unsigned count)
543 {
544 struct nouveau_channel *chan = nv50->screen->tesla->channel;
545 struct nouveau_grobj *tesla = nv50->screen->tesla;
546
547 map += start;
548
549 if (nv50->vbo_fifo)
550 return nv50_push_elements_u16(nv50, map, count);
551
552 if (count & 1) {
553 BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32, 1);
554 OUT_RING (chan, map[0]);
555 map++;
556 count--;
557 }
558
559 while (count) {
560 unsigned nr = count > 2046 ? 2046 : count;
561 int i;
562
563 BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U16, nr >> 1);
564 for (i = 0; i < nr; i += 2)
565 OUT_RING (chan, (map[i + 1] << 16) | map[i]);
566
567 count -= nr;
568 map += nr;
569 }
570 return TRUE;
571 }
572
573 static INLINE boolean
574 nv50_draw_elements_inline_u32(struct nv50_context *nv50, uint32_t *map,
575 unsigned start, unsigned count)
576 {
577 struct nouveau_channel *chan = nv50->screen->tesla->channel;
578 struct nouveau_grobj *tesla = nv50->screen->tesla;
579
580 map += start;
581
582 if (nv50->vbo_fifo)
583 return nv50_push_elements_u32(nv50, map, count);
584
585 while (count) {
586 unsigned nr = count > 2047 ? 2047 : count;
587
588 BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U32, nr);
589 OUT_RINGp (chan, map, nr);
590
591 count -= nr;
592 map += nr;
593 }
594 return TRUE;
595 }
596
597 static INLINE void
598 nv50_draw_elements_inline(struct nv50_context *nv50,
599 void *map, unsigned indexSize,
600 unsigned start, unsigned count)
601 {
602 switch (indexSize) {
603 case 1:
604 nv50_draw_elements_inline_u08(nv50, map, start, count);
605 break;
606 case 2:
607 nv50_draw_elements_inline_u16(nv50, map, start, count);
608 break;
609 case 4:
610 nv50_draw_elements_inline_u32(nv50, map, start, count);
611 break;
612 }
613 }
614
615 void
616 nv50_draw_elements_instanced(struct pipe_context *pipe,
617 struct pipe_buffer *indexBuffer,
618 unsigned indexSize,
619 unsigned mode, unsigned start, unsigned count,
620 unsigned startInstance, unsigned instanceCount)
621 {
622 struct nv50_context *nv50 = nv50_context(pipe);
623 struct nouveau_grobj *tesla = nv50->screen->tesla;
624 struct nouveau_channel *chan = tesla->channel;
625 struct pipe_screen *pscreen = pipe->screen;
626 void *map;
627 unsigned i, nz_divisors;
628 unsigned step[16], pos[16];
629
630 map = pipe_buffer_map(pscreen, indexBuffer, PIPE_BUFFER_USAGE_CPU_READ);
631
632 if (!NV50_USING_LOATHED_EDGEFLAG(nv50))
633 nv50_upload_user_vbufs(nv50);
634
635 nv50_state_validate(nv50);
636
637 nz_divisors = init_per_instance_arrays(nv50, startInstance, pos, step);
638
639 BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2);
640 OUT_RING (chan, NV50_CB_AUX | (24 << 8));
641 OUT_RING (chan, startInstance);
642
643 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
644 OUT_RING (chan, nv50_prim(mode));
645
646 nv50_draw_elements_inline(nv50, map, indexSize, start, count);
647
648 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
649 OUT_RING (chan, 0);
650
651 for (i = 1; i < instanceCount; ++i) {
652 if (nz_divisors) /* any non-zero array divisors ? */
653 step_per_instance_arrays(nv50, pos, step);
654
655 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
656 OUT_RING (chan, nv50_prim(mode) | (1 << 28));
657
658 nv50_draw_elements_inline(nv50, map, indexSize, start, count);
659
660 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
661 OUT_RING (chan, 0);
662 }
663 nv50_unmap_vbufs(nv50);
664
665 so_ref(NULL, &nv50->state.instbuf);
666 }
667
668 void
669 nv50_draw_elements(struct pipe_context *pipe,
670 struct pipe_buffer *indexBuffer, unsigned indexSize,
671 unsigned mode, unsigned start, unsigned count)
672 {
673 struct nv50_context *nv50 = nv50_context(pipe);
674 struct nouveau_channel *chan = nv50->screen->tesla->channel;
675 struct nouveau_grobj *tesla = nv50->screen->tesla;
676 struct pipe_screen *pscreen = pipe->screen;
677 void *map;
678
679 nv50_state_validate(nv50);
680
681 BEGIN_RING(chan, tesla, 0x142c, 1);
682 OUT_RING (chan, 0);
683 BEGIN_RING(chan, tesla, 0x142c, 1);
684 OUT_RING (chan, 0);
685
686 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
687 OUT_RING (chan, nv50_prim(mode));
688
689 if (!nv50->vbo_fifo && indexSize == 4) {
690 BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32 | 0x30000, 0);
691 OUT_RING (chan, count);
692 nouveau_pushbuf_submit(chan, nouveau_bo(indexBuffer),
693 start << 2, count << 2);
694 } else
695 if (!nv50->vbo_fifo && indexSize == 2) {
696 unsigned vb_start = (start & ~1);
697 unsigned vb_end = (start + count + 1) & ~1;
698 unsigned dwords = (vb_end - vb_start) >> 1;
699
700 BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16_SETUP, 1);
701 OUT_RING (chan, ((start & 1) << 31) | count);
702 BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16 | 0x30000, 0);
703 OUT_RING (chan, dwords);
704 nouveau_pushbuf_submit(chan, nouveau_bo(indexBuffer),
705 vb_start << 1, dwords << 2);
706 BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16_SETUP, 1);
707 OUT_RING (chan, 0);
708 } else {
709 map = pipe_buffer_map(pscreen, indexBuffer,
710 PIPE_BUFFER_USAGE_CPU_READ);
711 nv50_draw_elements_inline(nv50, map, indexSize, start, count);
712 nv50_unmap_vbufs(nv50);
713 pipe_buffer_unmap(pscreen, indexBuffer);
714 }
715
716 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
717 OUT_RING (chan, 0);
718 }
719
720 static INLINE boolean
721 nv50_vbo_static_attrib(struct nv50_context *nv50, unsigned attrib,
722 struct nouveau_stateobj **pso,
723 struct pipe_vertex_element *ve,
724 struct pipe_vertex_buffer *vb)
725
726 {
727 struct nouveau_stateobj *so;
728 struct nouveau_grobj *tesla = nv50->screen->tesla;
729 struct nouveau_bo *bo = nouveau_bo(vb->buffer);
730 float v[4];
731 int ret;
732 unsigned nr_components = util_format_get_nr_components(ve->src_format);
733
734 ret = nouveau_bo_map(bo, NOUVEAU_BO_RD);
735 if (ret)
736 return FALSE;
737
738 util_format_read_4f(ve->src_format, v, 0, (uint8_t *)bo->map +
739 (vb->buffer_offset + ve->src_offset), 0,
740 0, 0, 1, 1);
741 so = *pso;
742 if (!so)
743 *pso = so = so_new(nv50->vtxelt_nr, nv50->vtxelt_nr * 4, 0);
744
745 switch (nr_components) {
746 case 4:
747 so_method(so, tesla, NV50TCL_VTX_ATTR_4F_X(attrib), 4);
748 so_data (so, fui(v[0]));
749 so_data (so, fui(v[1]));
750 so_data (so, fui(v[2]));
751 so_data (so, fui(v[3]));
752 break;
753 case 3:
754 so_method(so, tesla, NV50TCL_VTX_ATTR_3F_X(attrib), 3);
755 so_data (so, fui(v[0]));
756 so_data (so, fui(v[1]));
757 so_data (so, fui(v[2]));
758 break;
759 case 2:
760 so_method(so, tesla, NV50TCL_VTX_ATTR_2F_X(attrib), 2);
761 so_data (so, fui(v[0]));
762 so_data (so, fui(v[1]));
763 break;
764 case 1:
765 if (attrib == nv50->vertprog->cfg.edgeflag_in) {
766 so_method(so, tesla, NV50TCL_EDGEFLAG_ENABLE, 1);
767 so_data (so, v[0] ? 1 : 0);
768 }
769 so_method(so, tesla, NV50TCL_VTX_ATTR_1F(attrib), 1);
770 so_data (so, fui(v[0]));
771 break;
772 default:
773 nouveau_bo_unmap(bo);
774 return FALSE;
775 }
776
777 nouveau_bo_unmap(bo);
778 return TRUE;
779 }
780
781 void
782 nv50_vbo_validate(struct nv50_context *nv50)
783 {
784 struct nouveau_grobj *tesla = nv50->screen->tesla;
785 struct nouveau_stateobj *vtxbuf, *vtxfmt, *vtxattr;
786 unsigned i, n_ve;
787
788 /* don't validate if Gallium took away our buffers */
789 if (nv50->vtxbuf_nr == 0)
790 return;
791 nv50->vbo_fifo = 0;
792
793 for (i = 0; i < nv50->vtxbuf_nr; ++i)
794 if (nv50->vtxbuf[i].stride &&
795 !(nv50->vtxbuf[i].buffer->usage & PIPE_BUFFER_USAGE_VERTEX))
796 nv50->vbo_fifo = 0xffff;
797
798 if (NV50_USING_LOATHED_EDGEFLAG(nv50))
799 nv50->vbo_fifo = 0xffff; /* vertprog can't set edgeflag */
800
801 n_ve = MAX2(nv50->vtxelt_nr, nv50->state.vtxelt_nr);
802
803 vtxattr = NULL;
804 vtxbuf = so_new(n_ve * 2, n_ve * 5, nv50->vtxelt_nr * 4);
805 vtxfmt = so_new(1, n_ve, 0);
806 so_method(vtxfmt, tesla, NV50TCL_VERTEX_ARRAY_ATTRIB(0), n_ve);
807
808 for (i = 0; i < nv50->vtxelt_nr; i++) {
809 struct pipe_vertex_element *ve = &nv50->vtxelt[i];
810 struct pipe_vertex_buffer *vb =
811 &nv50->vtxbuf[ve->vertex_buffer_index];
812 struct nouveau_bo *bo = nouveau_bo(vb->buffer);
813 uint32_t hw = nv50_vbo_vtxelt_to_hw(ve);
814
815 if (!vb->stride &&
816 nv50_vbo_static_attrib(nv50, i, &vtxattr, ve, vb)) {
817 so_data(vtxfmt, hw | (1 << 4));
818
819 so_method(vtxbuf, tesla,
820 NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
821 so_data (vtxbuf, 0);
822
823 nv50->vbo_fifo &= ~(1 << i);
824 continue;
825 }
826
827 if (nv50->vbo_fifo) {
828 so_data (vtxfmt, hw |
829 (ve->instance_divisor ? (1 << 4) : i));
830 so_method(vtxbuf, tesla,
831 NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
832 so_data (vtxbuf, 0);
833 continue;
834 }
835 so_data(vtxfmt, hw | i);
836
837 so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_FORMAT(i), 3);
838 so_data (vtxbuf, 0x20000000 |
839 (ve->instance_divisor ? 0 : vb->stride));
840 so_reloc (vtxbuf, bo, vb->buffer_offset +
841 ve->src_offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART |
842 NOUVEAU_BO_RD | NOUVEAU_BO_HIGH, 0, 0);
843 so_reloc (vtxbuf, bo, vb->buffer_offset +
844 ve->src_offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART |
845 NOUVEAU_BO_RD | NOUVEAU_BO_LOW, 0, 0);
846
847 /* vertex array limits */
848 so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_LIMIT_HIGH(i), 2);
849 so_reloc (vtxbuf, bo, vb->buffer->size - 1,
850 NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD |
851 NOUVEAU_BO_HIGH, 0, 0);
852 so_reloc (vtxbuf, bo, vb->buffer->size - 1,
853 NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD |
854 NOUVEAU_BO_LOW, 0, 0);
855 }
856 for (; i < n_ve; ++i) {
857 so_data (vtxfmt, 0x7e080010);
858
859 so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
860 so_data (vtxbuf, 0);
861 }
862 nv50->state.vtxelt_nr = nv50->vtxelt_nr;
863
864 so_ref (vtxfmt, &nv50->state.vtxfmt);
865 so_ref (vtxbuf, &nv50->state.vtxbuf);
866 so_ref (vtxattr, &nv50->state.vtxattr);
867 so_ref (NULL, &vtxbuf);
868 so_ref (NULL, &vtxfmt);
869 so_ref (NULL, &vtxattr);
870 }
871
872 typedef void (*pfn_push)(struct nouveau_channel *, void *);
873
874 struct nv50_vbo_emitctx
875 {
876 pfn_push push[16];
877 uint8_t *map[16];
878 unsigned stride[16];
879 unsigned nr_ve;
880 unsigned vtx_dwords;
881 unsigned vtx_max;
882
883 float edgeflag;
884 unsigned ve_edgeflag;
885 };
886
887 static INLINE void
888 emit_vtx_next(struct nouveau_channel *chan, struct nv50_vbo_emitctx *emit)
889 {
890 unsigned i;
891
892 for (i = 0; i < emit->nr_ve; ++i) {
893 emit->push[i](chan, emit->map[i]);
894 emit->map[i] += emit->stride[i];
895 }
896 }
897
898 static INLINE void
899 emit_vtx(struct nouveau_channel *chan, struct nv50_vbo_emitctx *emit,
900 uint32_t vi)
901 {
902 unsigned i;
903
904 for (i = 0; i < emit->nr_ve; ++i)
905 emit->push[i](chan, emit->map[i] + emit->stride[i] * vi);
906 }
907
908 static INLINE boolean
909 nv50_map_vbufs(struct nv50_context *nv50)
910 {
911 int i;
912
913 for (i = 0; i < nv50->vtxbuf_nr; ++i) {
914 struct pipe_vertex_buffer *vb = &nv50->vtxbuf[i];
915 unsigned size = vb->stride * (vb->max_index + 1) + 16;
916
917 if (nouveau_bo(vb->buffer)->map)
918 continue;
919
920 size = vb->stride * (vb->max_index + 1) + 16;
921 size = MIN2(size, vb->buffer->size);
922 if (!size)
923 size = vb->buffer->size;
924
925 if (nouveau_bo_map_range(nouveau_bo(vb->buffer),
926 0, size, NOUVEAU_BO_RD))
927 break;
928 }
929
930 if (i == nv50->vtxbuf_nr)
931 return TRUE;
932 for (; i >= 0; --i)
933 nouveau_bo_unmap(nouveau_bo(nv50->vtxbuf[i].buffer));
934 return FALSE;
935 }
936
937 static void
938 emit_b32_1(struct nouveau_channel *chan, void *data)
939 {
940 uint32_t *v = data;
941
942 OUT_RING(chan, v[0]);
943 }
944
945 static void
946 emit_b32_2(struct nouveau_channel *chan, void *data)
947 {
948 uint32_t *v = data;
949
950 OUT_RING(chan, v[0]);
951 OUT_RING(chan, v[1]);
952 }
953
954 static void
955 emit_b32_3(struct nouveau_channel *chan, void *data)
956 {
957 uint32_t *v = data;
958
959 OUT_RING(chan, v[0]);
960 OUT_RING(chan, v[1]);
961 OUT_RING(chan, v[2]);
962 }
963
964 static void
965 emit_b32_4(struct nouveau_channel *chan, void *data)
966 {
967 uint32_t *v = data;
968
969 OUT_RING(chan, v[0]);
970 OUT_RING(chan, v[1]);
971 OUT_RING(chan, v[2]);
972 OUT_RING(chan, v[3]);
973 }
974
975 static void
976 emit_b16_1(struct nouveau_channel *chan, void *data)
977 {
978 uint16_t *v = data;
979
980 OUT_RING(chan, v[0]);
981 }
982
983 static void
984 emit_b16_3(struct nouveau_channel *chan, void *data)
985 {
986 uint16_t *v = data;
987
988 OUT_RING(chan, (v[1] << 16) | v[0]);
989 OUT_RING(chan, v[2]);
990 }
991
992 static void
993 emit_b08_1(struct nouveau_channel *chan, void *data)
994 {
995 uint8_t *v = data;
996
997 OUT_RING(chan, v[0]);
998 }
999
1000 static void
1001 emit_b08_3(struct nouveau_channel *chan, void *data)
1002 {
1003 uint8_t *v = data;
1004
1005 OUT_RING(chan, (v[2] << 16) | (v[1] << 8) | v[0]);
1006 }
1007
1008 static boolean
1009 emit_prepare(struct nv50_context *nv50, struct nv50_vbo_emitctx *emit,
1010 unsigned start)
1011 {
1012 unsigned i;
1013
1014 if (nv50_map_vbufs(nv50) == FALSE)
1015 return FALSE;
1016
1017 emit->ve_edgeflag = nv50->vertprog->cfg.edgeflag_in;
1018
1019 emit->edgeflag = 0.5f;
1020 emit->nr_ve = 0;
1021 emit->vtx_dwords = 0;
1022
1023 for (i = 0; i < nv50->vtxelt_nr; ++i) {
1024 struct pipe_vertex_element *ve;
1025 struct pipe_vertex_buffer *vb;
1026 unsigned n, size, nr_components;
1027 const struct util_format_description *desc;
1028
1029 ve = &nv50->vtxelt[i];
1030 vb = &nv50->vtxbuf[ve->vertex_buffer_index];
1031 if (!(nv50->vbo_fifo & (1 << i)) || ve->instance_divisor)
1032 continue;
1033 n = emit->nr_ve++;
1034
1035 emit->stride[n] = vb->stride;
1036 emit->map[n] = (uint8_t *)nouveau_bo(vb->buffer)->map +
1037 vb->buffer_offset +
1038 (start * vb->stride + ve->src_offset);
1039
1040 desc = util_format_description(ve->src_format);
1041 assert(desc);
1042
1043 size = util_format_get_component_bits(
1044 ve->src_format, UTIL_FORMAT_COLORSPACE_RGB, 0);
1045 nr_components = util_format_get_nr_components(ve->src_format);
1046
1047 assert(nr_components > 0 && nr_components <= 4);
1048
1049 /* It shouldn't be necessary to push the implicit 1s
1050 * for case 3 and size 8 cases 1, 2, 3.
1051 */
1052 switch (size) {
1053 default:
1054 NOUVEAU_ERR("unsupported vtxelt size: %u\n", size);
1055 return FALSE;
1056 case 32:
1057 switch (nr_components) {
1058 case 1: emit->push[n] = emit_b32_1; break;
1059 case 2: emit->push[n] = emit_b32_2; break;
1060 case 3: emit->push[n] = emit_b32_3; break;
1061 case 4: emit->push[n] = emit_b32_4; break;
1062 }
1063 emit->vtx_dwords += nr_components;
1064 break;
1065 case 16:
1066 switch (nr_components) {
1067 case 1: emit->push[n] = emit_b16_1; break;
1068 case 2: emit->push[n] = emit_b32_1; break;
1069 case 3: emit->push[n] = emit_b16_3; break;
1070 case 4: emit->push[n] = emit_b32_2; break;
1071 }
1072 emit->vtx_dwords += (nr_components + 1) >> 1;
1073 break;
1074 case 8:
1075 switch (nr_components) {
1076 case 1: emit->push[n] = emit_b08_1; break;
1077 case 2: emit->push[n] = emit_b16_1; break;
1078 case 3: emit->push[n] = emit_b08_3; break;
1079 case 4: emit->push[n] = emit_b32_1; break;
1080 }
1081 emit->vtx_dwords += 1;
1082 break;
1083 }
1084 }
1085
1086 emit->vtx_max = 512 / emit->vtx_dwords;
1087 if (emit->ve_edgeflag < 16)
1088 emit->vtx_max = 1;
1089
1090 return TRUE;
1091 }
1092
1093 static INLINE void
1094 set_edgeflag(struct nouveau_channel *chan,
1095 struct nouveau_grobj *tesla,
1096 struct nv50_vbo_emitctx *emit, uint32_t index)
1097 {
1098 unsigned i = emit->ve_edgeflag;
1099
1100 if (i < 16) {
1101 float f = *((float *)(emit->map[i] + index * emit->stride[i]));
1102
1103 if (emit->edgeflag != f) {
1104 emit->edgeflag = f;
1105
1106 BEGIN_RING(chan, tesla, 0x15e4, 1);
1107 OUT_RING (chan, f ? 1 : 0);
1108 }
1109 }
1110 }
1111
1112 static boolean
1113 nv50_push_arrays(struct nv50_context *nv50, unsigned start, unsigned count)
1114 {
1115 struct nouveau_channel *chan = nv50->screen->base.channel;
1116 struct nouveau_grobj *tesla = nv50->screen->tesla;
1117 struct nv50_vbo_emitctx emit;
1118
1119 if (emit_prepare(nv50, &emit, start) == FALSE)
1120 return FALSE;
1121
1122 while (count) {
1123 unsigned i, dw, nr = MIN2(count, emit.vtx_max);
1124 dw = nr * emit.vtx_dwords;
1125
1126 set_edgeflag(chan, tesla, &emit, 0); /* nr will be 1 */
1127
1128 BEGIN_RING_NI(chan, tesla, NV50TCL_VERTEX_DATA, dw);
1129 for (i = 0; i < nr; ++i)
1130 emit_vtx_next(chan, &emit);
1131
1132 count -= nr;
1133 }
1134
1135 return TRUE;
1136 }
1137
1138 static boolean
1139 nv50_push_elements_u32(struct nv50_context *nv50, uint32_t *map, unsigned count)
1140 {
1141 struct nouveau_channel *chan = nv50->screen->base.channel;
1142 struct nouveau_grobj *tesla = nv50->screen->tesla;
1143 struct nv50_vbo_emitctx emit;
1144
1145 if (emit_prepare(nv50, &emit, 0) == FALSE)
1146 return FALSE;
1147
1148 while (count) {
1149 unsigned i, dw, nr = MIN2(count, emit.vtx_max);
1150 dw = nr * emit.vtx_dwords;
1151
1152 set_edgeflag(chan, tesla, &emit, *map);
1153
1154 BEGIN_RING_NI(chan, tesla, NV50TCL_VERTEX_DATA, dw);
1155 for (i = 0; i < nr; ++i)
1156 emit_vtx(chan, &emit, *map++);
1157
1158 count -= nr;
1159 }
1160
1161 return TRUE;
1162 }
1163
1164 static boolean
1165 nv50_push_elements_u16(struct nv50_context *nv50, uint16_t *map, unsigned count)
1166 {
1167 struct nouveau_channel *chan = nv50->screen->base.channel;
1168 struct nouveau_grobj *tesla = nv50->screen->tesla;
1169 struct nv50_vbo_emitctx emit;
1170
1171 if (emit_prepare(nv50, &emit, 0) == FALSE)
1172 return FALSE;
1173
1174 while (count) {
1175 unsigned i, dw, nr = MIN2(count, emit.vtx_max);
1176 dw = nr * emit.vtx_dwords;
1177
1178 set_edgeflag(chan, tesla, &emit, *map);
1179
1180 BEGIN_RING_NI(chan, tesla, NV50TCL_VERTEX_DATA, dw);
1181 for (i = 0; i < nr; ++i)
1182 emit_vtx(chan, &emit, *map++);
1183
1184 count -= nr;
1185 }
1186
1187 return TRUE;
1188 }
1189
1190 static boolean
1191 nv50_push_elements_u08(struct nv50_context *nv50, uint8_t *map, unsigned count)
1192 {
1193 struct nouveau_channel *chan = nv50->screen->base.channel;
1194 struct nouveau_grobj *tesla = nv50->screen->tesla;
1195 struct nv50_vbo_emitctx emit;
1196
1197 if (emit_prepare(nv50, &emit, 0) == FALSE)
1198 return FALSE;
1199
1200 while (count) {
1201 unsigned i, dw, nr = MIN2(count, emit.vtx_max);
1202 dw = nr * emit.vtx_dwords;
1203
1204 set_edgeflag(chan, tesla, &emit, *map);
1205
1206 BEGIN_RING_NI(chan, tesla, NV50TCL_VERTEX_DATA, dw);
1207 for (i = 0; i < nr; ++i)
1208 emit_vtx(chan, &emit, *map++);
1209
1210 count -= nr;
1211 }
1212
1213 return TRUE;
1214 }