d760b2c4da32cb3eebe6817684f1d9abfa9e68f7
[mesa.git] / src / gallium / drivers / i915 / i915_prim_vbuf.c
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * \file
30 * Build post-transformation, post-clipping vertex buffers and element
31 * lists by hooking into the end of the primitive pipeline and
32 * manipulating the vertex_id field in the vertex headers.
33 *
34 * XXX: work in progress
35 *
36 * \author José Fonseca <jrfonseca@tungstengraphics.com>
37 * \author Keith Whitwell <keith@tungstengraphics.com>
38 */
39
40
41 #include "draw/draw_context.h"
42 #include "draw/draw_vbuf.h"
43 #include "util/u_debug.h"
44 #include "util/u_inlines.h"
45 #include "util/u_math.h"
46 #include "util/u_memory.h"
47 #include "util/u_fifo.h"
48
49 #include "i915_context.h"
50 #include "i915_reg.h"
51 #include "i915_batch.h"
52 #include "i915_state.h"
53
54
55 #define VBUF_MAP_BUFFER
56
57 /**
58 * Primitive renderer for i915.
59 */
60 struct i915_vbuf_render {
61 struct vbuf_render base;
62
63 struct i915_context *i915;
64
65 /** Vertex size in bytes */
66 size_t vertex_size;
67
68 /** Software primitive */
69 unsigned prim;
70
71 /** Hardware primitive */
72 unsigned hwprim;
73
74 /** Genereate a vertex list */
75 unsigned fallback;
76
77 /* Stuff for the vbo */
78 struct i915_winsys_buffer *vbo;
79 size_t vbo_size; /**< current size of allocated buffer */
80 size_t vbo_alloc_size; /**< minimum buffer size to allocate */
81 size_t vbo_hw_offset; /**< offset that we program the hardware with */
82 size_t vbo_sw_offset; /**< offset that we work with */
83 size_t vbo_index; /**< index offset to be added to all indices */
84 void *vbo_ptr;
85 size_t vbo_max_used;
86 size_t vbo_max_index; /**< index offset to be added to all indices */
87
88 #ifndef VBUF_MAP_BUFFER
89 size_t map_used_start;
90 size_t map_used_end;
91 size_t map_size;
92 #endif
93 };
94
95
96 /**
97 * Basically a cast wrapper.
98 */
99 static INLINE struct i915_vbuf_render *
100 i915_vbuf_render(struct vbuf_render *render)
101 {
102 assert(render);
103 return (struct i915_vbuf_render *)render;
104 }
105
106 /**
107 * If vbo state differs between renderer and context
108 * push state to the context. This function pushes
109 * hw_offset to i915->vbo_offset and vbo to i915->vbo.
110 *
111 * Side effects:
112 * May updates context vbo_offset and vbo fields.
113 */
114 static void
115 i915_vbuf_update_vbo_state(struct vbuf_render *render)
116 {
117 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
118 struct i915_context *i915 = i915_render->i915;
119
120 if (i915->vbo != i915_render->vbo ||
121 i915->vbo_offset != i915_render->vbo_hw_offset) {
122 i915->vbo = i915_render->vbo;
123 i915->vbo_offset = i915_render->vbo_hw_offset;
124 i915->dirty |= I915_NEW_VBO;
125 }
126 }
127
128 /**
129 * Callback exported to the draw module.
130 * Returns the current vertex_info.
131 *
132 * Side effects:
133 * If state is dirty update derived state.
134 */
135 static const struct vertex_info *
136 i915_vbuf_render_get_vertex_info(struct vbuf_render *render)
137 {
138 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
139 struct i915_context *i915 = i915_render->i915;
140
141 if (i915->dirty) {
142 /* make sure we have up to date vertex layout */
143 i915_update_derived(i915);
144 }
145
146 return &i915->current.vertex_info;
147 }
148
149 /**
150 * Reserve space in the vbo for vertices.
151 *
152 * Side effects:
153 * None.
154 */
155 static boolean
156 i915_vbuf_render_reserve(struct i915_vbuf_render *i915_render, size_t size)
157 {
158 struct i915_context *i915 = i915_render->i915;
159
160 if (i915_render->vbo_size < size + i915_render->vbo_sw_offset)
161 return FALSE;
162
163 if (i915->vbo_flushed)
164 return FALSE;
165
166 return TRUE;
167 }
168
169 /**
170 * Allocate a new vbo buffer should there not be enough space for
171 * the requested number of vertices by the draw module.
172 *
173 * Side effects:
174 * Updates hw_offset, sw_offset, index and allocates a new buffer.
175 */
176 static void
177 i915_vbuf_render_new_buf(struct i915_vbuf_render *i915_render, size_t size)
178 {
179 struct i915_context *i915 = i915_render->i915;
180 struct i915_winsys *iws = i915->iws;
181
182 if (i915_render->vbo)
183 iws->buffer_destroy(iws, i915_render->vbo);
184
185 i915->vbo_flushed = 0;
186
187 i915_render->vbo_size = MAX2(size, i915_render->vbo_alloc_size);
188 i915_render->vbo_hw_offset = 0;
189 i915_render->vbo_sw_offset = 0;
190 i915_render->vbo_index = 0;
191
192 #ifndef VBUF_MAP_BUFFER
193 if (i915_render->vbo_size > i915_render->map_size) {
194 i915_render->map_size = i915_render->vbo_size;
195 FREE(i915_render->vbo_ptr);
196 i915_render->vbo_ptr = MALLOC(i915_render->map_size);
197 }
198 #endif
199
200 i915_render->vbo = iws->buffer_create(iws, i915_render->vbo_size,
201 I915_NEW_VERTEX);
202 }
203
204 /**
205 * Callback exported to the draw module.
206 *
207 * Side effects:
208 * Updates hw_offset, sw_offset, index and may allocate
209 * a new buffer. Also updates may update the vbo state
210 * on the i915 context.
211 */
212 static boolean
213 i915_vbuf_render_allocate_vertices(struct vbuf_render *render,
214 ushort vertex_size,
215 ushort nr_vertices)
216 {
217 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
218 size_t size = (size_t)vertex_size * (size_t)nr_vertices;
219 size_t offset;
220
221 /*
222 * Align sw_offset with first multiple of vertex size from hw_offset.
223 * Set index to be the multiples from from hw_offset to sw_offset.
224 * i915_vbuf_render_new_buf will reset index, sw_offset, hw_offset
225 * when it allocates a new buffer this is correct.
226 */
227 {
228 offset = i915_render->vbo_sw_offset - i915_render->vbo_hw_offset;
229 offset = util_align_npot(offset, vertex_size);
230 i915_render->vbo_sw_offset = i915_render->vbo_hw_offset + offset;
231 i915_render->vbo_index = offset / vertex_size;
232 }
233
234 if (!i915_vbuf_render_reserve(i915_render, size))
235 i915_vbuf_render_new_buf(i915_render, size);
236
237 /*
238 * If a new buffer has been alocated sw_offset,
239 * hw_offset & index will be reset by new_buf
240 */
241
242 i915_render->vertex_size = vertex_size;
243
244 i915_vbuf_update_vbo_state(render);
245
246 if (!i915_render->vbo)
247 return FALSE;
248 return TRUE;
249 }
250
251 static void *
252 i915_vbuf_render_map_vertices(struct vbuf_render *render)
253 {
254 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
255 struct i915_context *i915 = i915_render->i915;
256 struct i915_winsys *iws = i915->iws;
257
258 if (i915->vbo_flushed)
259 debug_printf("%s bad vbo flush occured stalling on hw\n", __FUNCTION__);
260
261 #ifdef VBUF_MAP_BUFFER
262 i915_render->vbo_ptr = iws->buffer_map(iws, i915_render->vbo, TRUE);
263 return (unsigned char *)i915_render->vbo_ptr + i915_render->vbo_sw_offset;
264 #else
265 (void)iws;
266 return (unsigned char *)i915_render->vbo_ptr;
267 #endif
268 }
269
270 static void
271 i915_vbuf_render_unmap_vertices(struct vbuf_render *render,
272 ushort min_index,
273 ushort max_index)
274 {
275 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
276 struct i915_context *i915 = i915_render->i915;
277 struct i915_winsys *iws = i915->iws;
278
279 i915_render->vbo_max_index = max_index;
280 i915_render->vbo_max_used = MAX2(i915_render->vbo_max_used, i915_render->vertex_size * (max_index + 1));
281 #ifdef VBUF_MAP_BUFFER
282 iws->buffer_unmap(iws, i915_render->vbo);
283 #else
284 i915_render->map_used_start = i915_render->vertex_size * min_index;
285 i915_render->map_used_end = i915_render->vertex_size * (max_index + 1);
286 iws->buffer_write(iws, i915_render->vbo,
287 i915_render->map_used_start + i915_render->vbo_sw_offset,
288 i915_render->map_used_end - i915_render->map_used_start,
289 (unsigned char *)i915_render->vbo_ptr + i915_render->map_used_start);
290
291 #endif
292 }
293
294 /**
295 * Ensure that the given max_index given is not larger ushort max.
296 * If it is larger then ushort max it advanced the hw_offset to the
297 * same position in the vbo as sw_offset and set index to zero.
298 *
299 * Side effects:
300 * On failure update hw_offset and index.
301 */
302 static void
303 i915_vbuf_ensure_index_bounds(struct vbuf_render *render,
304 unsigned max_index)
305 {
306 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
307
308 if (max_index + i915_render->vbo_index < ((1 << 17) - 1))
309 return;
310
311 i915_render->vbo_hw_offset = i915_render->vbo_sw_offset;
312 i915_render->vbo_index = 0;
313
314 i915_vbuf_update_vbo_state(render);
315 }
316
317 static boolean
318 i915_vbuf_render_set_primitive(struct vbuf_render *render,
319 unsigned prim)
320 {
321 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
322 i915_render->prim = prim;
323
324 switch(prim) {
325 case PIPE_PRIM_POINTS:
326 i915_render->hwprim = PRIM3D_POINTLIST;
327 i915_render->fallback = 0;
328 return TRUE;
329 case PIPE_PRIM_LINES:
330 i915_render->hwprim = PRIM3D_LINELIST;
331 i915_render->fallback = 0;
332 return TRUE;
333 case PIPE_PRIM_LINE_LOOP:
334 i915_render->hwprim = PRIM3D_LINELIST;
335 i915_render->fallback = PIPE_PRIM_LINE_LOOP;
336 return TRUE;
337 case PIPE_PRIM_LINE_STRIP:
338 i915_render->hwprim = PRIM3D_LINESTRIP;
339 i915_render->fallback = 0;
340 return TRUE;
341 case PIPE_PRIM_TRIANGLES:
342 i915_render->hwprim = PRIM3D_TRILIST;
343 i915_render->fallback = 0;
344 return TRUE;
345 case PIPE_PRIM_TRIANGLE_STRIP:
346 i915_render->hwprim = PRIM3D_TRISTRIP;
347 i915_render->fallback = 0;
348 return TRUE;
349 case PIPE_PRIM_TRIANGLE_FAN:
350 i915_render->hwprim = PRIM3D_TRIFAN;
351 i915_render->fallback = 0;
352 return TRUE;
353 case PIPE_PRIM_QUADS:
354 i915_render->hwprim = PRIM3D_TRILIST;
355 i915_render->fallback = PIPE_PRIM_QUADS;
356 return TRUE;
357 case PIPE_PRIM_QUAD_STRIP:
358 i915_render->hwprim = PRIM3D_TRILIST;
359 i915_render->fallback = PIPE_PRIM_QUAD_STRIP;
360 return TRUE;
361 case PIPE_PRIM_POLYGON:
362 i915_render->hwprim = PRIM3D_POLY;
363 i915_render->fallback = 0;
364 return TRUE;
365 default:
366 /* FIXME: Actually, can handle a lot more just fine... */
367 return FALSE;
368 }
369 }
370
371 /**
372 * Used for fallbacks in draw_arrays
373 */
374 static void
375 draw_arrays_generate_indices(struct vbuf_render *render,
376 unsigned start, uint nr,
377 unsigned type)
378 {
379 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
380 struct i915_context *i915 = i915_render->i915;
381 unsigned i;
382 unsigned end = start + nr + i915_render->vbo_index;
383 start += i915_render->vbo_index;
384
385 switch(type) {
386 case 0:
387 for (i = start; i+1 < end; i += 2)
388 OUT_BATCH((i+0) | (i+1) << 16);
389 if (i < end)
390 OUT_BATCH(i);
391 break;
392 case PIPE_PRIM_LINE_LOOP:
393 if (nr >= 2) {
394 for (i = start + 1; i < end; i++)
395 OUT_BATCH((i-0) | (i+0) << 16);
396 OUT_BATCH((i-0) | ( start) << 16);
397 }
398 break;
399 case PIPE_PRIM_QUADS:
400 for (i = start; i + 3 < end; i += 4) {
401 OUT_BATCH((i+0) | (i+1) << 16);
402 OUT_BATCH((i+3) | (i+1) << 16);
403 OUT_BATCH((i+2) | (i+3) << 16);
404 }
405 break;
406 case PIPE_PRIM_QUAD_STRIP:
407 for (i = start; i + 3 < end; i += 2) {
408 OUT_BATCH((i+0) | (i+1) << 16);
409 OUT_BATCH((i+3) | (i+2) << 16);
410 OUT_BATCH((i+0) | (i+3) << 16);
411 }
412 break;
413 default:
414 assert(0);
415 }
416 }
417
418 static unsigned
419 draw_arrays_calc_nr_indices(uint nr, unsigned type)
420 {
421 switch (type) {
422 case 0:
423 return nr;
424 case PIPE_PRIM_LINE_LOOP:
425 if (nr >= 2)
426 return nr * 2;
427 else
428 return 0;
429 case PIPE_PRIM_QUADS:
430 return (nr / 4) * 6;
431 case PIPE_PRIM_QUAD_STRIP:
432 return ((nr - 2) / 2) * 6;
433 default:
434 assert(0);
435 return 0;
436 }
437 }
438
439 static void
440 draw_arrays_fallback(struct vbuf_render *render,
441 unsigned start,
442 uint nr)
443 {
444 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
445 struct i915_context *i915 = i915_render->i915;
446 unsigned nr_indices;
447
448 nr_indices = draw_arrays_calc_nr_indices(nr, i915_render->fallback);
449 if (!nr_indices)
450 return;
451
452 i915_vbuf_ensure_index_bounds(render, start + nr_indices);
453
454 if (i915->dirty)
455 i915_update_derived(i915);
456
457 if (i915->hardware_dirty)
458 i915_emit_hardware_state(i915);
459
460 if (!BEGIN_BATCH(1 + (nr_indices + 1)/2, 1)) {
461 FLUSH_BATCH(NULL);
462
463 /* Make sure state is re-emitted after a flush:
464 */
465 i915_update_derived(i915);
466 i915_emit_hardware_state(i915);
467 i915->vbo_flushed = 1;
468
469 if (!BEGIN_BATCH(1 + (nr_indices + 1)/2, 1)) {
470 assert(0);
471 goto out;
472 }
473 }
474
475 OUT_BATCH(_3DPRIMITIVE |
476 PRIM_INDIRECT |
477 i915_render->hwprim |
478 PRIM_INDIRECT_ELTS |
479 nr_indices);
480
481 draw_arrays_generate_indices(render, start, nr, i915_render->fallback);
482
483 out:
484 return;
485 }
486
487 static void
488 i915_vbuf_render_draw_arrays(struct vbuf_render *render,
489 unsigned start,
490 uint nr)
491 {
492 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
493 struct i915_context *i915 = i915_render->i915;
494
495 if (i915_render->fallback) {
496 draw_arrays_fallback(render, start, nr);
497 return;
498 }
499
500 i915_vbuf_ensure_index_bounds(render, start + nr);
501 start += i915_render->vbo_index;
502
503 if (i915->dirty)
504 i915_update_derived(i915);
505
506 if (i915->hardware_dirty)
507 i915_emit_hardware_state(i915);
508
509 if (!BEGIN_BATCH(2, 0)) {
510 FLUSH_BATCH(NULL);
511
512 /* Make sure state is re-emitted after a flush:
513 */
514 i915_update_derived(i915);
515 i915_emit_hardware_state(i915);
516 i915->vbo_flushed = 1;
517
518 if (!BEGIN_BATCH(2, 0)) {
519 assert(0);
520 goto out;
521 }
522 }
523
524 OUT_BATCH(_3DPRIMITIVE |
525 PRIM_INDIRECT |
526 PRIM_INDIRECT_SEQUENTIAL |
527 i915_render->hwprim |
528 nr);
529 OUT_BATCH(start); /* Beginning vertex index */
530
531 out:
532 return;
533 }
534
535 /**
536 * Used for normal and fallback emitting of indices
537 * If type is zero normal operation assumed.
538 */
539 static void
540 draw_generate_indices(struct vbuf_render *render,
541 const ushort *indices,
542 uint nr_indices,
543 unsigned type)
544 {
545 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
546 struct i915_context *i915 = i915_render->i915;
547 unsigned i;
548 unsigned o = i915_render->vbo_index;
549
550 switch(type) {
551 case 0:
552 for (i = 0; i + 1 < nr_indices; i += 2) {
553 OUT_BATCH((o+indices[i]) | (o+indices[i+1]) << 16);
554 }
555 if (i < nr_indices) {
556 OUT_BATCH((o+indices[i]));
557 }
558 break;
559 case PIPE_PRIM_LINE_LOOP:
560 if (nr_indices >= 2) {
561 for (i = 1; i < nr_indices; i++)
562 OUT_BATCH((o+indices[i-1]) | (o+indices[i]) << 16);
563 OUT_BATCH((o+indices[i-1]) | (o+indices[0]) << 16);
564 }
565 break;
566 case PIPE_PRIM_QUADS:
567 for (i = 0; i + 3 < nr_indices; i += 4) {
568 OUT_BATCH((o+indices[i+0]) | (o+indices[i+1]) << 16);
569 OUT_BATCH((o+indices[i+3]) | (o+indices[i+1]) << 16);
570 OUT_BATCH((o+indices[i+2]) | (o+indices[i+3]) << 16);
571 }
572 break;
573 case PIPE_PRIM_QUAD_STRIP:
574 for (i = 0; i + 3 < nr_indices; i += 2) {
575 OUT_BATCH((o+indices[i+0]) | (o+indices[i+1]) << 16);
576 OUT_BATCH((o+indices[i+3]) | (o+indices[i+2]) << 16);
577 OUT_BATCH((o+indices[i+0]) | (o+indices[i+3]) << 16);
578 }
579 break;
580 default:
581 assert(0);
582 break;
583 }
584 }
585
586 static unsigned
587 draw_calc_nr_indices(uint nr_indices, unsigned type)
588 {
589 switch (type) {
590 case 0:
591 return nr_indices;
592 case PIPE_PRIM_LINE_LOOP:
593 if (nr_indices >= 2)
594 return nr_indices * 2;
595 else
596 return 0;
597 case PIPE_PRIM_QUADS:
598 return (nr_indices / 4) * 6;
599 case PIPE_PRIM_QUAD_STRIP:
600 return ((nr_indices - 2) / 2) * 6;
601 default:
602 assert(0);
603 return 0;
604 }
605 }
606
607 static void
608 i915_vbuf_render_draw_elements(struct vbuf_render *render,
609 const ushort *indices,
610 uint nr_indices)
611 {
612 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
613 struct i915_context *i915 = i915_render->i915;
614 unsigned save_nr_indices;
615
616 save_nr_indices = nr_indices;
617
618 nr_indices = draw_calc_nr_indices(nr_indices, i915_render->fallback);
619 if (!nr_indices)
620 return;
621
622 i915_vbuf_ensure_index_bounds(render, i915_render->vbo_max_index);
623
624 if (i915->dirty)
625 i915_update_derived(i915);
626
627 if (i915->hardware_dirty)
628 i915_emit_hardware_state(i915);
629
630 if (!BEGIN_BATCH(1 + (nr_indices + 1)/2, 1)) {
631 FLUSH_BATCH(NULL);
632
633 /* Make sure state is re-emitted after a flush:
634 */
635 i915_update_derived(i915);
636 i915_emit_hardware_state(i915);
637 i915->vbo_flushed = 1;
638
639 if (!BEGIN_BATCH(1 + (nr_indices + 1)/2, 1)) {
640 assert(0);
641 goto out;
642 }
643 }
644
645 OUT_BATCH(_3DPRIMITIVE |
646 PRIM_INDIRECT |
647 i915_render->hwprim |
648 PRIM_INDIRECT_ELTS |
649 nr_indices);
650 draw_generate_indices(render,
651 indices,
652 save_nr_indices,
653 i915_render->fallback);
654
655 out:
656 return;
657 }
658
659 static void
660 i915_vbuf_render_release_vertices(struct vbuf_render *render)
661 {
662 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
663
664 i915_render->vbo_sw_offset += i915_render->vbo_max_used;
665 i915_render->vbo_max_used = 0;
666
667 /*
668 * Micro optimization, by calling update here we the offset change
669 * will be picked up on the next pipe_context::draw_*.
670 */
671 i915_vbuf_update_vbo_state(render);
672 }
673
674 static void
675 i915_vbuf_render_destroy(struct vbuf_render *render)
676 {
677 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
678 FREE(i915_render);
679 }
680
681 /**
682 * Create a new primitive render.
683 */
684 static struct vbuf_render *
685 i915_vbuf_render_create(struct i915_context *i915)
686 {
687 struct i915_vbuf_render *i915_render = CALLOC_STRUCT(i915_vbuf_render);
688 struct i915_winsys *iws = i915->iws;
689 int i;
690
691 i915_render->i915 = i915;
692
693 i915_render->base.max_vertex_buffer_bytes = 16*4096;
694
695 /* NOTE: it must be such that state and vertices indices fit in a single
696 * batch buffer.
697 */
698 i915_render->base.max_indices = 16*1024;
699
700 i915_render->base.get_vertex_info = i915_vbuf_render_get_vertex_info;
701 i915_render->base.allocate_vertices = i915_vbuf_render_allocate_vertices;
702 i915_render->base.map_vertices = i915_vbuf_render_map_vertices;
703 i915_render->base.unmap_vertices = i915_vbuf_render_unmap_vertices;
704 i915_render->base.set_primitive = i915_vbuf_render_set_primitive;
705 i915_render->base.draw_elements = i915_vbuf_render_draw_elements;
706 i915_render->base.draw_arrays = i915_vbuf_render_draw_arrays;
707 i915_render->base.release_vertices = i915_vbuf_render_release_vertices;
708 i915_render->base.destroy = i915_vbuf_render_destroy;
709
710 #ifndef VBUF_MAP_BUFFER
711 i915_render->map_size = 0;
712 i915_render->map_used_start = 0;
713 i915_render->map_used_end = 0;
714 #endif
715
716 i915_render->vbo = NULL;
717 i915_render->vbo_ptr = NULL;
718 i915_render->vbo_size = 0;
719 i915_render->vbo_hw_offset = 0;
720 i915_render->vbo_sw_offset = 0;
721 i915_render->vbo_alloc_size = i915_render->base.max_vertex_buffer_bytes * 4;
722
723 #ifdef VBUF_USE_POOL
724 i915_render->pool_used = FALSE;
725 i915_render->pool_buffer_size = i915_render->vbo_alloc_size;
726 i915_render->pool_fifo = u_fifo_create(6);
727 for (i = 0; i < 6; i++)
728 u_fifo_add(i915_render->pool_fifo,
729 iws->buffer_create(iws, i915_render->pool_buffer_size,
730 I915_NEW_VERTEX));
731 #else
732 (void)i;
733 (void)iws;
734 #endif
735
736 return &i915_render->base;
737 }
738
739 /**
740 * Create a new primitive vbuf/render stage.
741 */
742 struct draw_stage *i915_draw_vbuf_stage(struct i915_context *i915)
743 {
744 struct vbuf_render *render;
745 struct draw_stage *stage;
746
747 render = i915_vbuf_render_create(i915);
748 if(!render)
749 return NULL;
750
751 stage = draw_vbuf_stage(i915->draw, render);
752 if(!stage) {
753 render->destroy(render);
754 return NULL;
755 }
756 /** TODO JB: this shouldn't be here */
757 draw_set_render(i915->draw, render);
758
759 return stage;
760 }