winsys/drm: Handle circular dependencies in Makefile.egl.
[mesa.git] / src / gallium / drivers / nv50 / nv50_vbo.c
1 /*
2 * Copyright 2008 Ben Skeggs
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include "pipe/p_context.h"
24 #include "pipe/p_state.h"
25 #include "util/u_inlines.h"
26 #include "util/u_format.h"
27
28 #include "nv50_context.h"
29
30 static boolean
31 nv50_push_elements_u08(struct nv50_context *, uint8_t *, unsigned);
32
33 static boolean
34 nv50_push_elements_u16(struct nv50_context *, uint16_t *, unsigned);
35
36 static boolean
37 nv50_push_elements_u32(struct nv50_context *, uint32_t *, unsigned);
38
39 static boolean
40 nv50_push_arrays(struct nv50_context *, unsigned, unsigned);
41
42 #define NV50_USING_LOATHED_EDGEFLAG(ctx) ((ctx)->vertprog->cfg.edgeflag_in < 16)
43
44 static INLINE unsigned
45 nv50_prim(unsigned mode)
46 {
47 switch (mode) {
48 case PIPE_PRIM_POINTS: return NV50TCL_VERTEX_BEGIN_POINTS;
49 case PIPE_PRIM_LINES: return NV50TCL_VERTEX_BEGIN_LINES;
50 case PIPE_PRIM_LINE_LOOP: return NV50TCL_VERTEX_BEGIN_LINE_LOOP;
51 case PIPE_PRIM_LINE_STRIP: return NV50TCL_VERTEX_BEGIN_LINE_STRIP;
52 case PIPE_PRIM_TRIANGLES: return NV50TCL_VERTEX_BEGIN_TRIANGLES;
53 case PIPE_PRIM_TRIANGLE_STRIP:
54 return NV50TCL_VERTEX_BEGIN_TRIANGLE_STRIP;
55 case PIPE_PRIM_TRIANGLE_FAN: return NV50TCL_VERTEX_BEGIN_TRIANGLE_FAN;
56 case PIPE_PRIM_QUADS: return NV50TCL_VERTEX_BEGIN_QUADS;
57 case PIPE_PRIM_QUAD_STRIP: return NV50TCL_VERTEX_BEGIN_QUAD_STRIP;
58 case PIPE_PRIM_POLYGON: return NV50TCL_VERTEX_BEGIN_POLYGON;
59 case PIPE_PRIM_LINES_ADJACENCY:
60 return NV50TCL_VERTEX_BEGIN_LINES_ADJACENCY;
61 case PIPE_PRIM_LINE_STRIP_ADJACENCY:
62 return NV50TCL_VERTEX_BEGIN_LINE_STRIP_ADJACENCY;
63 case PIPE_PRIM_TRIANGLES_ADJACENCY:
64 return NV50TCL_VERTEX_BEGIN_TRIANGLES_ADJACENCY;
65 case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY:
66 return NV50TCL_VERTEX_BEGIN_TRIANGLE_STRIP_ADJACENCY;
67 default:
68 break;
69 }
70
71 NOUVEAU_ERR("invalid primitive type %d\n", mode);
72 return NV50TCL_VERTEX_BEGIN_POINTS;
73 }
74
75 static INLINE uint32_t
76 nv50_vbo_type_to_hw(enum pipe_format format)
77 {
78 const struct util_format_description *desc;
79
80 desc = util_format_description(format);
81 assert(desc);
82
83 switch (desc->channel[0].type) {
84 case UTIL_FORMAT_TYPE_FLOAT:
85 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_FLOAT;
86 case UTIL_FORMAT_TYPE_UNSIGNED:
87 if (desc->channel[0].normalized) {
88 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UNORM;
89 }
90 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_USCALED;
91 case UTIL_FORMAT_TYPE_SIGNED:
92 if (desc->channel[0].normalized) {
93 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SNORM;
94 }
95 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SSCALED;
96 /*
97 case PIPE_FORMAT_TYPE_UINT:
98 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UINT;
99 case PIPE_FORMAT_TYPE_SINT:
100 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SINT; */
101 default:
102 return 0;
103 }
104 }
105
106 static INLINE uint32_t
107 nv50_vbo_size_to_hw(unsigned size, unsigned nr_c)
108 {
109 static const uint32_t hw_values[] = {
110 0, 0, 0, 0,
111 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8,
112 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8,
113 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8_8,
114 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8_8_8,
115 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16,
116 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16,
117 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16_16,
118 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16_16_16,
119 0, 0, 0, 0,
120 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32,
121 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32,
122 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32_32,
123 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32_32_32 };
124
125 /* we'd also have R11G11B10 and R10G10B10A2 */
126
127 assert(nr_c > 0 && nr_c <= 4);
128
129 if (size > 32)
130 return 0;
131 size >>= (3 - 2);
132
133 return hw_values[size + (nr_c - 1)];
134 }
135
136 static INLINE uint32_t
137 nv50_vbo_vtxelt_to_hw(struct pipe_vertex_element *ve)
138 {
139 uint32_t hw_type, hw_size;
140 enum pipe_format pf = ve->src_format;
141 const struct util_format_description *desc;
142 unsigned size;
143
144 desc = util_format_description(pf);
145 assert(desc);
146
147 size = util_format_get_component_bits(pf, UTIL_FORMAT_COLORSPACE_RGB, 0);
148
149 hw_type = nv50_vbo_type_to_hw(pf);
150 hw_size = nv50_vbo_size_to_hw(size, ve->nr_components);
151
152 if (!hw_type || !hw_size) {
153 NOUVEAU_ERR("unsupported vbo format: %s\n", util_format_name(pf));
154 abort();
155 return 0x24e80000;
156 }
157
158 if (desc->swizzle[0] == UTIL_FORMAT_SWIZZLE_Z) /* BGRA */
159 hw_size |= (1 << 31); /* no real swizzle bits :-( */
160
161 return (hw_type | hw_size);
162 }
163
164 /* For instanced drawing from user buffers, hitting the FIFO repeatedly
165 * with the same vertex data is probably worse than uploading all data.
166 */
167 static boolean
168 nv50_upload_vtxbuf(struct nv50_context *nv50, unsigned i)
169 {
170 struct nv50_screen *nscreen = nv50->screen;
171 struct pipe_screen *pscreen = &nscreen->base.base;
172 struct pipe_buffer *buf = nscreen->strm_vbuf[i];
173 struct pipe_vertex_buffer *vb = &nv50->vtxbuf[i];
174 uint8_t *src;
175 unsigned size = align(vb->buffer->size, 4096);
176
177 if (buf && buf->size < size)
178 pipe_buffer_reference(&nscreen->strm_vbuf[i], NULL);
179
180 if (!nscreen->strm_vbuf[i]) {
181 nscreen->strm_vbuf[i] = pipe_buffer_create(
182 pscreen, 0, PIPE_BUFFER_USAGE_VERTEX, size);
183 buf = nscreen->strm_vbuf[i];
184 }
185
186 src = pipe_buffer_map(pscreen, vb->buffer, PIPE_BUFFER_USAGE_CPU_READ);
187 if (!src)
188 return FALSE;
189 src += vb->buffer_offset;
190
191 size = (vb->max_index + 1) * vb->stride + 16; /* + 16 is for stride 0 */
192 if (vb->buffer_offset + size > vb->buffer->size)
193 size = vb->buffer->size - vb->buffer_offset;
194
195 pipe_buffer_write(pscreen, buf, vb->buffer_offset, size, src);
196 pipe_buffer_unmap(pscreen, vb->buffer);
197
198 vb->buffer = buf; /* don't pipe_reference, this is a private copy */
199 return TRUE;
200 }
201
202 static void
203 nv50_upload_user_vbufs(struct nv50_context *nv50)
204 {
205 unsigned i;
206
207 if (nv50->vbo_fifo)
208 nv50->dirty |= NV50_NEW_ARRAYS;
209 if (!(nv50->dirty & NV50_NEW_ARRAYS))
210 return;
211
212 for (i = 0; i < nv50->vtxbuf_nr; ++i) {
213 if (nv50->vtxbuf[i].buffer->usage & PIPE_BUFFER_USAGE_VERTEX)
214 continue;
215 nv50_upload_vtxbuf(nv50, i);
216 }
217 }
218
219 static void
220 nv50_set_static_vtxattr(struct nv50_context *nv50, unsigned i, void *data)
221 {
222 struct nouveau_grobj *tesla = nv50->screen->tesla;
223 struct nouveau_channel *chan = tesla->channel;
224 float v[4];
225
226 util_format_read_4f(nv50->vtxelt[i].src_format,
227 v, 0, data, 0, 0, 0, 1, 1);
228
229 switch (nv50->vtxelt[i].nr_components) {
230 case 4:
231 BEGIN_RING(chan, tesla, NV50TCL_VTX_ATTR_4F_X(i), 4);
232 OUT_RINGf (chan, v[0]);
233 OUT_RINGf (chan, v[1]);
234 OUT_RINGf (chan, v[2]);
235 OUT_RINGf (chan, v[3]);
236 break;
237 case 3:
238 BEGIN_RING(chan, tesla, NV50TCL_VTX_ATTR_3F_X(i), 3);
239 OUT_RINGf (chan, v[0]);
240 OUT_RINGf (chan, v[1]);
241 OUT_RINGf (chan, v[2]);
242 break;
243 case 2:
244 BEGIN_RING(chan, tesla, NV50TCL_VTX_ATTR_2F_X(i), 2);
245 OUT_RINGf (chan, v[0]);
246 OUT_RINGf (chan, v[1]);
247 break;
248 case 1:
249 BEGIN_RING(chan, tesla, NV50TCL_VTX_ATTR_1F(i), 1);
250 OUT_RINGf (chan, v[0]);
251 break;
252 default:
253 assert(0);
254 break;
255 }
256 }
257
258 static unsigned
259 init_per_instance_arrays_immd(struct nv50_context *nv50,
260 unsigned startInstance,
261 unsigned pos[16], unsigned step[16])
262 {
263 struct nouveau_bo *bo;
264 unsigned i, b, count = 0;
265
266 for (i = 0; i < nv50->vtxelt_nr; ++i) {
267 if (!nv50->vtxelt[i].instance_divisor)
268 continue;
269 ++count;
270 b = nv50->vtxelt[i].vertex_buffer_index;
271
272 pos[i] = nv50->vtxelt[i].src_offset +
273 nv50->vtxbuf[b].buffer_offset +
274 startInstance * nv50->vtxbuf[b].stride;
275 step[i] = startInstance % nv50->vtxelt[i].instance_divisor;
276
277 bo = nouveau_bo(nv50->vtxbuf[b].buffer);
278 if (!bo->map)
279 nouveau_bo_map(bo, NOUVEAU_BO_RD);
280
281 nv50_set_static_vtxattr(nv50, i, (uint8_t *)bo->map + pos[i]);
282 }
283
284 return count;
285 }
286
287 static unsigned
288 init_per_instance_arrays(struct nv50_context *nv50,
289 unsigned startInstance,
290 unsigned pos[16], unsigned step[16])
291 {
292 struct nouveau_grobj *tesla = nv50->screen->tesla;
293 struct nouveau_channel *chan = tesla->channel;
294 struct nouveau_bo *bo;
295 struct nouveau_stateobj *so;
296 unsigned i, b, count = 0;
297 const uint32_t rl = NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD;
298
299 if (nv50->vbo_fifo)
300 return init_per_instance_arrays_immd(nv50, startInstance,
301 pos, step);
302
303 so = so_new(nv50->vtxelt_nr, nv50->vtxelt_nr * 2, nv50->vtxelt_nr * 2);
304
305 for (i = 0; i < nv50->vtxelt_nr; ++i) {
306 if (!nv50->vtxelt[i].instance_divisor)
307 continue;
308 ++count;
309 b = nv50->vtxelt[i].vertex_buffer_index;
310
311 pos[i] = nv50->vtxelt[i].src_offset +
312 nv50->vtxbuf[b].buffer_offset +
313 startInstance * nv50->vtxbuf[b].stride;
314
315 if (!startInstance) {
316 step[i] = 0;
317 continue;
318 }
319 step[i] = startInstance % nv50->vtxelt[i].instance_divisor;
320
321 bo = nouveau_bo(nv50->vtxbuf[b].buffer);
322
323 so_method(so, tesla, NV50TCL_VERTEX_ARRAY_START_HIGH(i), 2);
324 so_reloc (so, bo, pos[i], rl | NOUVEAU_BO_HIGH, 0, 0);
325 so_reloc (so, bo, pos[i], rl | NOUVEAU_BO_LOW, 0, 0);
326 }
327
328 if (count && startInstance) {
329 so_ref (so, &nv50->state.instbuf); /* for flush notify */
330 so_emit(chan, nv50->state.instbuf);
331 }
332 so_ref (NULL, &so);
333
334 return count;
335 }
336
337 static void
338 step_per_instance_arrays_immd(struct nv50_context *nv50,
339 unsigned pos[16], unsigned step[16])
340 {
341 struct nouveau_bo *bo;
342 unsigned i, b;
343
344 for (i = 0; i < nv50->vtxelt_nr; ++i) {
345 if (!nv50->vtxelt[i].instance_divisor)
346 continue;
347 if (++step[i] != nv50->vtxelt[i].instance_divisor)
348 continue;
349 b = nv50->vtxelt[i].vertex_buffer_index;
350 bo = nouveau_bo(nv50->vtxbuf[b].buffer);
351
352 step[i] = 0;
353 pos[i] += nv50->vtxbuf[b].stride;
354
355 nv50_set_static_vtxattr(nv50, i, (uint8_t *)bo->map + pos[i]);
356 }
357 }
358
359 static void
360 step_per_instance_arrays(struct nv50_context *nv50,
361 unsigned pos[16], unsigned step[16])
362 {
363 struct nouveau_grobj *tesla = nv50->screen->tesla;
364 struct nouveau_channel *chan = tesla->channel;
365 struct nouveau_bo *bo;
366 struct nouveau_stateobj *so;
367 unsigned i, b;
368 const uint32_t rl = NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD;
369
370 if (nv50->vbo_fifo) {
371 step_per_instance_arrays_immd(nv50, pos, step);
372 return;
373 }
374
375 so = so_new(nv50->vtxelt_nr, nv50->vtxelt_nr * 2, nv50->vtxelt_nr * 2);
376
377 for (i = 0; i < nv50->vtxelt_nr; ++i) {
378 if (!nv50->vtxelt[i].instance_divisor)
379 continue;
380 b = nv50->vtxelt[i].vertex_buffer_index;
381
382 if (++step[i] == nv50->vtxelt[i].instance_divisor) {
383 step[i] = 0;
384 pos[i] += nv50->vtxbuf[b].stride;
385 }
386
387 bo = nouveau_bo(nv50->vtxbuf[b].buffer);
388
389 so_method(so, tesla, NV50TCL_VERTEX_ARRAY_START_HIGH(i), 2);
390 so_reloc (so, bo, pos[i], rl | NOUVEAU_BO_HIGH, 0, 0);
391 so_reloc (so, bo, pos[i], rl | NOUVEAU_BO_LOW, 0, 0);
392 }
393
394 so_ref (so, &nv50->state.instbuf); /* for flush notify */
395 so_ref (NULL, &so);
396
397 so_emit(chan, nv50->state.instbuf);
398 }
399
400 static INLINE void
401 nv50_unmap_vbufs(struct nv50_context *nv50)
402 {
403 unsigned i;
404
405 for (i = 0; i < nv50->vtxbuf_nr; ++i)
406 if (nouveau_bo(nv50->vtxbuf[i].buffer)->map)
407 nouveau_bo_unmap(nouveau_bo(nv50->vtxbuf[i].buffer));
408 }
409
410 void
411 nv50_draw_arrays_instanced(struct pipe_context *pipe,
412 unsigned mode, unsigned start, unsigned count,
413 unsigned startInstance, unsigned instanceCount)
414 {
415 struct nv50_context *nv50 = nv50_context(pipe);
416 struct nouveau_channel *chan = nv50->screen->tesla->channel;
417 struct nouveau_grobj *tesla = nv50->screen->tesla;
418 unsigned i, nz_divisors;
419 unsigned step[16], pos[16];
420
421 if (!NV50_USING_LOATHED_EDGEFLAG(nv50))
422 nv50_upload_user_vbufs(nv50);
423
424 nv50_state_validate(nv50);
425
426 nz_divisors = init_per_instance_arrays(nv50, startInstance, pos, step);
427
428 BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2);
429 OUT_RING (chan, NV50_CB_AUX | (24 << 8));
430 OUT_RING (chan, startInstance);
431
432 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
433 OUT_RING (chan, nv50_prim(mode));
434
435 if (nv50->vbo_fifo)
436 nv50_push_arrays(nv50, start, count);
437 else {
438 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BUFFER_FIRST, 2);
439 OUT_RING (chan, start);
440 OUT_RING (chan, count);
441 }
442 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
443 OUT_RING (chan, 0);
444
445 for (i = 1; i < instanceCount; i++) {
446 if (nz_divisors) /* any non-zero array divisors ? */
447 step_per_instance_arrays(nv50, pos, step);
448
449 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
450 OUT_RING (chan, nv50_prim(mode) | (1 << 28));
451
452 if (nv50->vbo_fifo)
453 nv50_push_arrays(nv50, start, count);
454 else {
455 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BUFFER_FIRST, 2);
456 OUT_RING (chan, start);
457 OUT_RING (chan, count);
458 }
459 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
460 OUT_RING (chan, 0);
461 }
462 nv50_unmap_vbufs(nv50);
463
464 so_ref(NULL, &nv50->state.instbuf);
465 }
466
467 void
468 nv50_draw_arrays(struct pipe_context *pipe, unsigned mode, unsigned start,
469 unsigned count)
470 {
471 struct nv50_context *nv50 = nv50_context(pipe);
472 struct nouveau_channel *chan = nv50->screen->tesla->channel;
473 struct nouveau_grobj *tesla = nv50->screen->tesla;
474 boolean ret;
475
476 nv50_state_validate(nv50);
477
478 BEGIN_RING(chan, tesla, 0x142c, 1);
479 OUT_RING (chan, 0);
480 BEGIN_RING(chan, tesla, 0x142c, 1);
481 OUT_RING (chan, 0);
482
483 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
484 OUT_RING (chan, nv50_prim(mode));
485
486 if (nv50->vbo_fifo)
487 ret = nv50_push_arrays(nv50, start, count);
488 else {
489 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BUFFER_FIRST, 2);
490 OUT_RING (chan, start);
491 OUT_RING (chan, count);
492 ret = TRUE;
493 }
494 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
495 OUT_RING (chan, 0);
496
497 nv50_unmap_vbufs(nv50);
498
499 /* XXX: not sure what to do if ret != TRUE: flush and retry?
500 */
501 assert(ret);
502 }
503
504 static INLINE boolean
505 nv50_draw_elements_inline_u08(struct nv50_context *nv50, uint8_t *map,
506 unsigned start, unsigned count)
507 {
508 struct nouveau_channel *chan = nv50->screen->tesla->channel;
509 struct nouveau_grobj *tesla = nv50->screen->tesla;
510
511 map += start;
512
513 if (nv50->vbo_fifo)
514 return nv50_push_elements_u08(nv50, map, count);
515
516 if (count & 1) {
517 BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32, 1);
518 OUT_RING (chan, map[0]);
519 map++;
520 count--;
521 }
522
523 while (count) {
524 unsigned nr = count > 2046 ? 2046 : count;
525 int i;
526
527 BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U16, nr >> 1);
528 for (i = 0; i < nr; i += 2)
529 OUT_RING (chan, (map[i + 1] << 16) | map[i]);
530
531 count -= nr;
532 map += nr;
533 }
534 return TRUE;
535 }
536
537 static INLINE boolean
538 nv50_draw_elements_inline_u16(struct nv50_context *nv50, uint16_t *map,
539 unsigned start, unsigned count)
540 {
541 struct nouveau_channel *chan = nv50->screen->tesla->channel;
542 struct nouveau_grobj *tesla = nv50->screen->tesla;
543
544 map += start;
545
546 if (nv50->vbo_fifo)
547 return nv50_push_elements_u16(nv50, map, count);
548
549 if (count & 1) {
550 BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32, 1);
551 OUT_RING (chan, map[0]);
552 map++;
553 count--;
554 }
555
556 while (count) {
557 unsigned nr = count > 2046 ? 2046 : count;
558 int i;
559
560 BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U16, nr >> 1);
561 for (i = 0; i < nr; i += 2)
562 OUT_RING (chan, (map[i + 1] << 16) | map[i]);
563
564 count -= nr;
565 map += nr;
566 }
567 return TRUE;
568 }
569
570 static INLINE boolean
571 nv50_draw_elements_inline_u32(struct nv50_context *nv50, uint32_t *map,
572 unsigned start, unsigned count)
573 {
574 struct nouveau_channel *chan = nv50->screen->tesla->channel;
575 struct nouveau_grobj *tesla = nv50->screen->tesla;
576
577 map += start;
578
579 if (nv50->vbo_fifo)
580 return nv50_push_elements_u32(nv50, map, count);
581
582 while (count) {
583 unsigned nr = count > 2047 ? 2047 : count;
584
585 BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U32, nr);
586 OUT_RINGp (chan, map, nr);
587
588 count -= nr;
589 map += nr;
590 }
591 return TRUE;
592 }
593
594 static INLINE void
595 nv50_draw_elements_inline(struct nv50_context *nv50,
596 void *map, unsigned indexSize,
597 unsigned start, unsigned count)
598 {
599 switch (indexSize) {
600 case 1:
601 nv50_draw_elements_inline_u08(nv50, map, start, count);
602 break;
603 case 2:
604 nv50_draw_elements_inline_u16(nv50, map, start, count);
605 break;
606 case 4:
607 nv50_draw_elements_inline_u32(nv50, map, start, count);
608 break;
609 }
610 }
611
612 void
613 nv50_draw_elements_instanced(struct pipe_context *pipe,
614 struct pipe_buffer *indexBuffer,
615 unsigned indexSize,
616 unsigned mode, unsigned start, unsigned count,
617 unsigned startInstance, unsigned instanceCount)
618 {
619 struct nv50_context *nv50 = nv50_context(pipe);
620 struct nouveau_grobj *tesla = nv50->screen->tesla;
621 struct nouveau_channel *chan = tesla->channel;
622 struct pipe_screen *pscreen = pipe->screen;
623 void *map;
624 unsigned i, nz_divisors;
625 unsigned step[16], pos[16];
626
627 map = pipe_buffer_map(pscreen, indexBuffer, PIPE_BUFFER_USAGE_CPU_READ);
628
629 if (!NV50_USING_LOATHED_EDGEFLAG(nv50))
630 nv50_upload_user_vbufs(nv50);
631
632 nv50_state_validate(nv50);
633
634 nz_divisors = init_per_instance_arrays(nv50, startInstance, pos, step);
635
636 BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2);
637 OUT_RING (chan, NV50_CB_AUX | (24 << 8));
638 OUT_RING (chan, startInstance);
639
640 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
641 OUT_RING (chan, nv50_prim(mode));
642
643 nv50_draw_elements_inline(nv50, map, indexSize, start, count);
644
645 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
646 OUT_RING (chan, 0);
647
648 for (i = 1; i < instanceCount; ++i) {
649 if (nz_divisors) /* any non-zero array divisors ? */
650 step_per_instance_arrays(nv50, pos, step);
651
652 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
653 OUT_RING (chan, nv50_prim(mode) | (1 << 28));
654
655 nv50_draw_elements_inline(nv50, map, indexSize, start, count);
656
657 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
658 OUT_RING (chan, 0);
659 }
660 nv50_unmap_vbufs(nv50);
661
662 so_ref(NULL, &nv50->state.instbuf);
663 }
664
665 void
666 nv50_draw_elements(struct pipe_context *pipe,
667 struct pipe_buffer *indexBuffer, unsigned indexSize,
668 unsigned mode, unsigned start, unsigned count)
669 {
670 struct nv50_context *nv50 = nv50_context(pipe);
671 struct nouveau_channel *chan = nv50->screen->tesla->channel;
672 struct nouveau_grobj *tesla = nv50->screen->tesla;
673 struct pipe_screen *pscreen = pipe->screen;
674 void *map;
675
676 nv50_state_validate(nv50);
677
678 BEGIN_RING(chan, tesla, 0x142c, 1);
679 OUT_RING (chan, 0);
680 BEGIN_RING(chan, tesla, 0x142c, 1);
681 OUT_RING (chan, 0);
682
683 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
684 OUT_RING (chan, nv50_prim(mode));
685
686 if (!nv50->vbo_fifo && indexSize == 4) {
687 BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32 | 0x30000, 0);
688 OUT_RING (chan, count);
689 nouveau_pushbuf_submit(chan, nouveau_bo(indexBuffer),
690 start << 2, count << 2);
691 } else
692 if (!nv50->vbo_fifo && indexSize == 2) {
693 unsigned vb_start = (start & ~1);
694 unsigned vb_end = (start + count + 1) & ~1;
695 unsigned dwords = (vb_end - vb_start) >> 1;
696
697 BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16_SETUP, 1);
698 OUT_RING (chan, ((start & 1) << 31) | count);
699 BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16 | 0x30000, 0);
700 OUT_RING (chan, dwords);
701 nouveau_pushbuf_submit(chan, nouveau_bo(indexBuffer),
702 vb_start << 1, dwords << 2);
703 BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16_SETUP, 1);
704 OUT_RING (chan, 0);
705 } else {
706 map = pipe_buffer_map(pscreen, indexBuffer,
707 PIPE_BUFFER_USAGE_CPU_READ);
708 nv50_draw_elements_inline(nv50, map, indexSize, start, count);
709 nv50_unmap_vbufs(nv50);
710 pipe_buffer_unmap(pscreen, indexBuffer);
711 }
712
713 BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
714 OUT_RING (chan, 0);
715 }
716
717 static INLINE boolean
718 nv50_vbo_static_attrib(struct nv50_context *nv50, unsigned attrib,
719 struct nouveau_stateobj **pso,
720 struct pipe_vertex_element *ve,
721 struct pipe_vertex_buffer *vb)
722
723 {
724 struct nouveau_stateobj *so;
725 struct nouveau_grobj *tesla = nv50->screen->tesla;
726 struct nouveau_bo *bo = nouveau_bo(vb->buffer);
727 float v[4];
728 int ret;
729
730 ret = nouveau_bo_map(bo, NOUVEAU_BO_RD);
731 if (ret)
732 return FALSE;
733
734 util_format_read_4f(ve->src_format, v, 0, (uint8_t *)bo->map +
735 (vb->buffer_offset + ve->src_offset), 0,
736 0, 0, 1, 1);
737 so = *pso;
738 if (!so)
739 *pso = so = so_new(nv50->vtxelt_nr, nv50->vtxelt_nr * 4, 0);
740
741 switch (ve->nr_components) {
742 case 4:
743 so_method(so, tesla, NV50TCL_VTX_ATTR_4F_X(attrib), 4);
744 so_data (so, fui(v[0]));
745 so_data (so, fui(v[1]));
746 so_data (so, fui(v[2]));
747 so_data (so, fui(v[3]));
748 break;
749 case 3:
750 so_method(so, tesla, NV50TCL_VTX_ATTR_3F_X(attrib), 3);
751 so_data (so, fui(v[0]));
752 so_data (so, fui(v[1]));
753 so_data (so, fui(v[2]));
754 break;
755 case 2:
756 so_method(so, tesla, NV50TCL_VTX_ATTR_2F_X(attrib), 2);
757 so_data (so, fui(v[0]));
758 so_data (so, fui(v[1]));
759 break;
760 case 1:
761 if (attrib == nv50->vertprog->cfg.edgeflag_in) {
762 so_method(so, tesla, NV50TCL_EDGEFLAG_ENABLE, 1);
763 so_data (so, v[0] ? 1 : 0);
764 }
765 so_method(so, tesla, NV50TCL_VTX_ATTR_1F(attrib), 1);
766 so_data (so, fui(v[0]));
767 break;
768 default:
769 nouveau_bo_unmap(bo);
770 return FALSE;
771 }
772
773 nouveau_bo_unmap(bo);
774 return TRUE;
775 }
776
777 void
778 nv50_vbo_validate(struct nv50_context *nv50)
779 {
780 struct nouveau_grobj *tesla = nv50->screen->tesla;
781 struct nouveau_stateobj *vtxbuf, *vtxfmt, *vtxattr;
782 unsigned i, n_ve;
783
784 /* don't validate if Gallium took away our buffers */
785 if (nv50->vtxbuf_nr == 0)
786 return;
787 nv50->vbo_fifo = 0;
788
789 for (i = 0; i < nv50->vtxbuf_nr; ++i)
790 if (nv50->vtxbuf[i].stride &&
791 !(nv50->vtxbuf[i].buffer->usage & PIPE_BUFFER_USAGE_VERTEX))
792 nv50->vbo_fifo = 0xffff;
793
794 if (NV50_USING_LOATHED_EDGEFLAG(nv50))
795 nv50->vbo_fifo = 0xffff; /* vertprog can't set edgeflag */
796
797 n_ve = MAX2(nv50->vtxelt_nr, nv50->state.vtxelt_nr);
798
799 vtxattr = NULL;
800 vtxbuf = so_new(n_ve * 2, n_ve * 5, nv50->vtxelt_nr * 4);
801 vtxfmt = so_new(1, n_ve, 0);
802 so_method(vtxfmt, tesla, NV50TCL_VERTEX_ARRAY_ATTRIB(0), n_ve);
803
804 for (i = 0; i < nv50->vtxelt_nr; i++) {
805 struct pipe_vertex_element *ve = &nv50->vtxelt[i];
806 struct pipe_vertex_buffer *vb =
807 &nv50->vtxbuf[ve->vertex_buffer_index];
808 struct nouveau_bo *bo = nouveau_bo(vb->buffer);
809 uint32_t hw = nv50_vbo_vtxelt_to_hw(ve);
810
811 if (!vb->stride &&
812 nv50_vbo_static_attrib(nv50, i, &vtxattr, ve, vb)) {
813 so_data(vtxfmt, hw | (1 << 4));
814
815 so_method(vtxbuf, tesla,
816 NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
817 so_data (vtxbuf, 0);
818
819 nv50->vbo_fifo &= ~(1 << i);
820 continue;
821 }
822
823 if (nv50->vbo_fifo) {
824 so_data (vtxfmt, hw |
825 (ve->instance_divisor ? (1 << 4) : i));
826 so_method(vtxbuf, tesla,
827 NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
828 so_data (vtxbuf, 0);
829 continue;
830 }
831 so_data(vtxfmt, hw | i);
832
833 so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_FORMAT(i), 3);
834 so_data (vtxbuf, 0x20000000 |
835 (ve->instance_divisor ? 0 : vb->stride));
836 so_reloc (vtxbuf, bo, vb->buffer_offset +
837 ve->src_offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART |
838 NOUVEAU_BO_RD | NOUVEAU_BO_HIGH, 0, 0);
839 so_reloc (vtxbuf, bo, vb->buffer_offset +
840 ve->src_offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART |
841 NOUVEAU_BO_RD | NOUVEAU_BO_LOW, 0, 0);
842
843 /* vertex array limits */
844 so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_LIMIT_HIGH(i), 2);
845 so_reloc (vtxbuf, bo, vb->buffer->size - 1,
846 NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD |
847 NOUVEAU_BO_HIGH, 0, 0);
848 so_reloc (vtxbuf, bo, vb->buffer->size - 1,
849 NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD |
850 NOUVEAU_BO_LOW, 0, 0);
851 }
852 for (; i < n_ve; ++i) {
853 so_data (vtxfmt, 0x7e080010);
854
855 so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
856 so_data (vtxbuf, 0);
857 }
858 nv50->state.vtxelt_nr = nv50->vtxelt_nr;
859
860 so_ref (vtxfmt, &nv50->state.vtxfmt);
861 so_ref (vtxbuf, &nv50->state.vtxbuf);
862 so_ref (vtxattr, &nv50->state.vtxattr);
863 so_ref (NULL, &vtxbuf);
864 so_ref (NULL, &vtxfmt);
865 so_ref (NULL, &vtxattr);
866 }
867
868 typedef void (*pfn_push)(struct nouveau_channel *, void *);
869
870 struct nv50_vbo_emitctx
871 {
872 pfn_push push[16];
873 uint8_t *map[16];
874 unsigned stride[16];
875 unsigned nr_ve;
876 unsigned vtx_dwords;
877 unsigned vtx_max;
878
879 float edgeflag;
880 unsigned ve_edgeflag;
881 };
882
883 static INLINE void
884 emit_vtx_next(struct nouveau_channel *chan, struct nv50_vbo_emitctx *emit)
885 {
886 unsigned i;
887
888 for (i = 0; i < emit->nr_ve; ++i) {
889 emit->push[i](chan, emit->map[i]);
890 emit->map[i] += emit->stride[i];
891 }
892 }
893
894 static INLINE void
895 emit_vtx(struct nouveau_channel *chan, struct nv50_vbo_emitctx *emit,
896 uint32_t vi)
897 {
898 unsigned i;
899
900 for (i = 0; i < emit->nr_ve; ++i)
901 emit->push[i](chan, emit->map[i] + emit->stride[i] * vi);
902 }
903
904 static INLINE boolean
905 nv50_map_vbufs(struct nv50_context *nv50)
906 {
907 int i;
908
909 for (i = 0; i < nv50->vtxbuf_nr; ++i) {
910 struct pipe_vertex_buffer *vb = &nv50->vtxbuf[i];
911 unsigned size = vb->stride * (vb->max_index + 1) + 16;
912
913 if (nouveau_bo(vb->buffer)->map)
914 continue;
915
916 size = vb->stride * (vb->max_index + 1) + 16;
917 size = MIN2(size, vb->buffer->size);
918 if (!size)
919 size = vb->buffer->size;
920
921 if (nouveau_bo_map_range(nouveau_bo(vb->buffer),
922 0, size, NOUVEAU_BO_RD))
923 break;
924 }
925
926 if (i == nv50->vtxbuf_nr)
927 return TRUE;
928 for (; i >= 0; --i)
929 nouveau_bo_unmap(nouveau_bo(nv50->vtxbuf[i].buffer));
930 return FALSE;
931 }
932
933 static void
934 emit_b32_1(struct nouveau_channel *chan, void *data)
935 {
936 uint32_t *v = data;
937
938 OUT_RING(chan, v[0]);
939 }
940
941 static void
942 emit_b32_2(struct nouveau_channel *chan, void *data)
943 {
944 uint32_t *v = data;
945
946 OUT_RING(chan, v[0]);
947 OUT_RING(chan, v[1]);
948 }
949
950 static void
951 emit_b32_3(struct nouveau_channel *chan, void *data)
952 {
953 uint32_t *v = data;
954
955 OUT_RING(chan, v[0]);
956 OUT_RING(chan, v[1]);
957 OUT_RING(chan, v[2]);
958 }
959
960 static void
961 emit_b32_4(struct nouveau_channel *chan, void *data)
962 {
963 uint32_t *v = data;
964
965 OUT_RING(chan, v[0]);
966 OUT_RING(chan, v[1]);
967 OUT_RING(chan, v[2]);
968 OUT_RING(chan, v[3]);
969 }
970
971 static void
972 emit_b16_1(struct nouveau_channel *chan, void *data)
973 {
974 uint16_t *v = data;
975
976 OUT_RING(chan, v[0]);
977 }
978
979 static void
980 emit_b16_3(struct nouveau_channel *chan, void *data)
981 {
982 uint16_t *v = data;
983
984 OUT_RING(chan, (v[1] << 16) | v[0]);
985 OUT_RING(chan, v[2]);
986 }
987
988 static void
989 emit_b08_1(struct nouveau_channel *chan, void *data)
990 {
991 uint8_t *v = data;
992
993 OUT_RING(chan, v[0]);
994 }
995
996 static void
997 emit_b08_3(struct nouveau_channel *chan, void *data)
998 {
999 uint8_t *v = data;
1000
1001 OUT_RING(chan, (v[2] << 16) | (v[1] << 8) | v[0]);
1002 }
1003
1004 static boolean
1005 emit_prepare(struct nv50_context *nv50, struct nv50_vbo_emitctx *emit,
1006 unsigned start)
1007 {
1008 unsigned i;
1009
1010 if (nv50_map_vbufs(nv50) == FALSE)
1011 return FALSE;
1012
1013 emit->ve_edgeflag = nv50->vertprog->cfg.edgeflag_in;
1014
1015 emit->edgeflag = 0.5f;
1016 emit->nr_ve = 0;
1017 emit->vtx_dwords = 0;
1018
1019 for (i = 0; i < nv50->vtxelt_nr; ++i) {
1020 struct pipe_vertex_element *ve;
1021 struct pipe_vertex_buffer *vb;
1022 unsigned n, size;
1023 const struct util_format_description *desc;
1024
1025 ve = &nv50->vtxelt[i];
1026 vb = &nv50->vtxbuf[ve->vertex_buffer_index];
1027 if (!(nv50->vbo_fifo & (1 << i)) || ve->instance_divisor)
1028 continue;
1029 n = emit->nr_ve++;
1030
1031 emit->stride[n] = vb->stride;
1032 emit->map[n] = (uint8_t *)nouveau_bo(vb->buffer)->map +
1033 vb->buffer_offset +
1034 (start * vb->stride + ve->src_offset);
1035
1036 desc = util_format_description(ve->src_format);
1037 assert(desc);
1038
1039 size = util_format_get_component_bits(
1040 ve->src_format, UTIL_FORMAT_COLORSPACE_RGB, 0);
1041
1042 assert(ve->nr_components > 0 && ve->nr_components <= 4);
1043
1044 /* It shouldn't be necessary to push the implicit 1s
1045 * for case 3 and size 8 cases 1, 2, 3.
1046 */
1047 switch (size) {
1048 default:
1049 NOUVEAU_ERR("unsupported vtxelt size: %u\n", size);
1050 return FALSE;
1051 case 32:
1052 switch (ve->nr_components) {
1053 case 1: emit->push[n] = emit_b32_1; break;
1054 case 2: emit->push[n] = emit_b32_2; break;
1055 case 3: emit->push[n] = emit_b32_3; break;
1056 case 4: emit->push[n] = emit_b32_4; break;
1057 }
1058 emit->vtx_dwords += ve->nr_components;
1059 break;
1060 case 16:
1061 switch (ve->nr_components) {
1062 case 1: emit->push[n] = emit_b16_1; break;
1063 case 2: emit->push[n] = emit_b32_1; break;
1064 case 3: emit->push[n] = emit_b16_3; break;
1065 case 4: emit->push[n] = emit_b32_2; break;
1066 }
1067 emit->vtx_dwords += (ve->nr_components + 1) >> 1;
1068 break;
1069 case 8:
1070 switch (ve->nr_components) {
1071 case 1: emit->push[n] = emit_b08_1; break;
1072 case 2: emit->push[n] = emit_b16_1; break;
1073 case 3: emit->push[n] = emit_b08_3; break;
1074 case 4: emit->push[n] = emit_b32_1; break;
1075 }
1076 emit->vtx_dwords += 1;
1077 break;
1078 }
1079 }
1080
1081 emit->vtx_max = 512 / emit->vtx_dwords;
1082 if (emit->ve_edgeflag < 16)
1083 emit->vtx_max = 1;
1084
1085 return TRUE;
1086 }
1087
1088 static INLINE void
1089 set_edgeflag(struct nouveau_channel *chan,
1090 struct nouveau_grobj *tesla,
1091 struct nv50_vbo_emitctx *emit, uint32_t index)
1092 {
1093 unsigned i = emit->ve_edgeflag;
1094
1095 if (i < 16) {
1096 float f = *((float *)(emit->map[i] + index * emit->stride[i]));
1097
1098 if (emit->edgeflag != f) {
1099 emit->edgeflag = f;
1100
1101 BEGIN_RING(chan, tesla, 0x15e4, 1);
1102 OUT_RING (chan, f ? 1 : 0);
1103 }
1104 }
1105 }
1106
1107 static boolean
1108 nv50_push_arrays(struct nv50_context *nv50, unsigned start, unsigned count)
1109 {
1110 struct nouveau_channel *chan = nv50->screen->base.channel;
1111 struct nouveau_grobj *tesla = nv50->screen->tesla;
1112 struct nv50_vbo_emitctx emit;
1113
1114 if (emit_prepare(nv50, &emit, start) == FALSE)
1115 return FALSE;
1116
1117 while (count) {
1118 unsigned i, dw, nr = MIN2(count, emit.vtx_max);
1119 dw = nr * emit.vtx_dwords;
1120
1121 set_edgeflag(chan, tesla, &emit, 0); /* nr will be 1 */
1122
1123 BEGIN_RING_NI(chan, tesla, NV50TCL_VERTEX_DATA, dw);
1124 for (i = 0; i < nr; ++i)
1125 emit_vtx_next(chan, &emit);
1126
1127 count -= nr;
1128 }
1129
1130 return TRUE;
1131 }
1132
1133 static boolean
1134 nv50_push_elements_u32(struct nv50_context *nv50, uint32_t *map, unsigned count)
1135 {
1136 struct nouveau_channel *chan = nv50->screen->base.channel;
1137 struct nouveau_grobj *tesla = nv50->screen->tesla;
1138 struct nv50_vbo_emitctx emit;
1139
1140 if (emit_prepare(nv50, &emit, 0) == FALSE)
1141 return FALSE;
1142
1143 while (count) {
1144 unsigned i, dw, nr = MIN2(count, emit.vtx_max);
1145 dw = nr * emit.vtx_dwords;
1146
1147 set_edgeflag(chan, tesla, &emit, *map);
1148
1149 BEGIN_RING_NI(chan, tesla, NV50TCL_VERTEX_DATA, dw);
1150 for (i = 0; i < nr; ++i)
1151 emit_vtx(chan, &emit, *map++);
1152
1153 count -= nr;
1154 }
1155
1156 return TRUE;
1157 }
1158
1159 static boolean
1160 nv50_push_elements_u16(struct nv50_context *nv50, uint16_t *map, unsigned count)
1161 {
1162 struct nouveau_channel *chan = nv50->screen->base.channel;
1163 struct nouveau_grobj *tesla = nv50->screen->tesla;
1164 struct nv50_vbo_emitctx emit;
1165
1166 if (emit_prepare(nv50, &emit, 0) == FALSE)
1167 return FALSE;
1168
1169 while (count) {
1170 unsigned i, dw, nr = MIN2(count, emit.vtx_max);
1171 dw = nr * emit.vtx_dwords;
1172
1173 set_edgeflag(chan, tesla, &emit, *map);
1174
1175 BEGIN_RING_NI(chan, tesla, NV50TCL_VERTEX_DATA, dw);
1176 for (i = 0; i < nr; ++i)
1177 emit_vtx(chan, &emit, *map++);
1178
1179 count -= nr;
1180 }
1181
1182 return TRUE;
1183 }
1184
1185 static boolean
1186 nv50_push_elements_u08(struct nv50_context *nv50, uint8_t *map, unsigned count)
1187 {
1188 struct nouveau_channel *chan = nv50->screen->base.channel;
1189 struct nouveau_grobj *tesla = nv50->screen->tesla;
1190 struct nv50_vbo_emitctx emit;
1191
1192 if (emit_prepare(nv50, &emit, 0) == FALSE)
1193 return FALSE;
1194
1195 while (count) {
1196 unsigned i, dw, nr = MIN2(count, emit.vtx_max);
1197 dw = nr * emit.vtx_dwords;
1198
1199 set_edgeflag(chan, tesla, &emit, *map);
1200
1201 BEGIN_RING_NI(chan, tesla, NV50TCL_VERTEX_DATA, dw);
1202 for (i = 0; i < nr; ++i)
1203 emit_vtx(chan, &emit, *map++);
1204
1205 count -= nr;
1206 }
1207
1208 return TRUE;
1209 }