1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
28 #include "util/u_format.h"
29 #include "util/u_inlines.h"
30 #include "util/u_prim.h"
31 #include "util/u_time.h"
32 #include "indices/u_indices.h"
34 #include "svga_hw_reg.h"
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_draw.h"
38 #include "svga_state.h"
39 #include "svga_swtnl.h"
40 #include "svga_debug.h"
41 #include "svga_resource_buffer.h"
42 #include "util/u_upload_mgr.h"
45 * Determine the ranges to upload for the user-buffers referenced
46 * by the next draw command.
48 * TODO: It might be beneficial to support multiple ranges. In that case,
49 * the struct svga_buffer::uploaded member should be made an array or a
50 * list, since we need to account for the possibility that different ranges
51 * may be uploaded to different hardware buffers chosen by the utility
56 svga_user_buffer_range(struct svga_context
*svga
,
59 unsigned instance_count
)
61 const struct pipe_vertex_element
*ve
= svga
->curr
.velems
->velem
;
65 * Release old uploaded range (if not done already) and
66 * initialize new ranges.
69 for (i
=0; i
< svga
->curr
.velems
->count
; i
++) {
70 struct pipe_vertex_buffer
*vb
=
71 &svga
->curr
.vb
[ve
[i
].vertex_buffer_index
];
73 if (vb
->buffer
&& svga_buffer_is_user_buffer(vb
->buffer
)) {
74 struct svga_buffer
*buffer
= svga_buffer(vb
->buffer
);
76 pipe_resource_reference(&buffer
->uploaded
.buffer
, NULL
);
77 buffer
->uploaded
.start
= ~0;
78 buffer
->uploaded
.end
= 0;
82 for (i
=0; i
< svga
->curr
.velems
->count
; i
++) {
83 struct pipe_vertex_buffer
*vb
=
84 &svga
->curr
.vb
[ve
[i
].vertex_buffer_index
];
86 if (vb
->buffer
&& svga_buffer_is_user_buffer(vb
->buffer
)) {
87 struct svga_buffer
*buffer
= svga_buffer(vb
->buffer
);
89 unsigned instance_div
= ve
[i
].instance_divisor
;
90 unsigned elemSize
= util_format_get_blocksize(ve
[i
].src_format
);
92 svga
->dirty
|= SVGA_NEW_VBUFFER
;
95 first
= ve
[i
].src_offset
;
96 count
= (instance_count
+ instance_div
- 1) / instance_div
;
97 size
= vb
->stride
* (count
- 1) + elemSize
;
98 } else if (vb
->stride
) {
99 first
= vb
->stride
* start
+ ve
[i
].src_offset
;
100 size
= vb
->stride
* (count
- 1) + elemSize
;
102 /* Only a single vertex!
103 * Upload with the largest vertex size the hw supports,
106 first
= ve
[i
].src_offset
;
107 size
= MIN2(16, vb
->buffer
->width0
);
110 buffer
->uploaded
.start
= MIN2(buffer
->uploaded
.start
, first
);
111 buffer
->uploaded
.end
= MAX2(buffer
->uploaded
.end
, first
+ size
);
117 * svga_upload_user_buffers - upload parts of user buffers
119 * This function streams a part of a user buffer to hw and fills
120 * svga_buffer::uploaded with information on the upload.
124 svga_upload_user_buffers(struct svga_context
*svga
,
127 unsigned instance_count
)
129 const struct pipe_vertex_element
*ve
= svga
->curr
.velems
->velem
;
133 svga_user_buffer_range(svga
, start
, count
, instance_count
);
135 for (i
=0; i
< svga
->curr
.velems
->count
; i
++) {
136 struct pipe_vertex_buffer
*vb
=
137 &svga
->curr
.vb
[ve
[i
].vertex_buffer_index
];
139 if (vb
->buffer
&& svga_buffer_is_user_buffer(vb
->buffer
)) {
140 struct svga_buffer
*buffer
= svga_buffer(vb
->buffer
);
143 * Check if already uploaded. Otherwise go ahead and upload.
146 if (buffer
->uploaded
.buffer
)
149 ret
= u_upload_buffer( svga
->upload_vb
,
151 buffer
->uploaded
.start
,
152 buffer
->uploaded
.end
- buffer
->uploaded
.start
,
154 &buffer
->uploaded
.offset
,
155 &buffer
->uploaded
.buffer
);
161 debug_printf("%s: %d: orig buf %p upl buf %p ofs %d sofs %d"
166 buffer
->uploaded
.buffer
,
167 buffer
->uploaded
.offset
,
168 buffer
->uploaded
.start
,
169 buffer
->uploaded
.end
- buffer
->uploaded
.start
);
171 vb
->buffer_offset
= buffer
->uploaded
.offset
;
179 * svga_release_user_upl_buffers - release uploaded parts of user buffers
181 * This function releases the hw copy of the uploaded fraction of the
182 * user-buffer. It's important to do this as soon as all draw calls
183 * affecting the uploaded fraction are issued, as this allows for
184 * efficient reuse of the hardware surface backing the uploaded fraction.
186 * svga_buffer::source_offset is set to 0, and svga_buffer::uploaded::buffer
191 svga_release_user_upl_buffers(struct svga_context
*svga
)
196 nr
= svga
->curr
.num_vertex_buffers
;
198 for (i
= 0; i
< nr
; ++i
) {
199 struct pipe_vertex_buffer
*vb
= &svga
->curr
.vb
[i
];
201 if (vb
->buffer
&& svga_buffer_is_user_buffer(vb
->buffer
)) {
202 struct svga_buffer
*buffer
= svga_buffer(vb
->buffer
);
204 buffer
->uploaded
.start
= ~0;
205 buffer
->uploaded
.end
= 0;
206 if (buffer
->uploaded
.buffer
)
207 pipe_resource_reference(&buffer
->uploaded
.buffer
, NULL
);
214 static enum pipe_error
215 retry_draw_range_elements( struct svga_context
*svga
,
216 struct pipe_resource
*index_buffer
,
224 unsigned instance_count
,
227 enum pipe_error ret
= PIPE_OK
;
229 svga_hwtnl_set_unfilled( svga
->hwtnl
,
230 svga
->curr
.rast
->hw_unfilled
);
232 svga_hwtnl_set_flatshade( svga
->hwtnl
,
233 svga
->curr
.rast
->templ
.flatshade
,
234 svga
->curr
.rast
->templ
.flatshade_first
);
236 ret
= svga_upload_user_buffers( svga
, min_index
+ index_bias
,
237 max_index
- min_index
+ 1, instance_count
);
241 ret
= svga_update_state( svga
, SVGA_STATE_HW_DRAW
);
245 ret
= svga_hwtnl_draw_range_elements( svga
->hwtnl
,
246 index_buffer
, index_size
, index_bias
,
247 min_index
, max_index
,
248 prim
, start
, count
);
255 svga_context_flush( svga
, NULL
);
259 return retry_draw_range_elements( svga
,
260 index_buffer
, index_size
, index_bias
,
261 min_index
, max_index
,
263 instance_count
, FALSE
);
270 static enum pipe_error
271 retry_draw_arrays( struct svga_context
*svga
,
275 unsigned instance_count
,
280 svga_hwtnl_set_unfilled( svga
->hwtnl
,
281 svga
->curr
.rast
->hw_unfilled
);
283 svga_hwtnl_set_flatshade( svga
->hwtnl
,
284 svga
->curr
.rast
->templ
.flatshade
,
285 svga
->curr
.rast
->templ
.flatshade_first
);
287 ret
= svga_upload_user_buffers( svga
, start
, count
, instance_count
);
292 ret
= svga_update_state( svga
, SVGA_STATE_HW_DRAW
);
296 ret
= svga_hwtnl_draw_arrays( svga
->hwtnl
, prim
,
304 if (ret
== PIPE_ERROR_OUT_OF_MEMORY
&& do_retry
)
306 svga_context_flush( svga
, NULL
);
308 return retry_draw_arrays( svga
,
321 svga_draw_vbo(struct pipe_context
*pipe
, const struct pipe_draw_info
*info
)
323 struct svga_context
*svga
= svga_context( pipe
);
324 unsigned reduced_prim
= u_reduced_prim( info
->mode
);
325 unsigned count
= info
->count
;
326 enum pipe_error ret
= 0;
327 boolean needed_swtnl
;
329 if (!u_trim_pipe_prim( info
->mode
, &count
))
333 * Mark currently bound target surfaces as dirty
334 * doesn't really matter if it is done before drawing.
336 * TODO If we ever normaly return something other then
337 * true we should not mark it as dirty then.
339 svga_mark_surfaces_dirty(svga_context(pipe
));
341 if (svga
->curr
.reduced_prim
!= reduced_prim
) {
342 svga
->curr
.reduced_prim
= reduced_prim
;
343 svga
->dirty
|= SVGA_NEW_REDUCED_PRIMITIVE
;
346 needed_swtnl
= svga
->state
.sw
.need_swtnl
;
348 svga_update_state_retry( svga
, SVGA_STATE_NEED_SWTNL
);
351 if (svga
->curr
.vs
->base
.id
== svga
->debug
.disable_shader
||
352 svga
->curr
.fs
->base
.id
== svga
->debug
.disable_shader
)
356 if (svga
->state
.sw
.need_swtnl
) {
359 * We're switching from HW to SW TNL. SW TNL will require mapping all
360 * currently bound vertex buffers, some of which may already be
361 * referenced in the current command buffer as result of previous HW
362 * TNL. So flush now, to prevent the context to flush while a referred
363 * vertex buffer is mapped.
366 svga_context_flush(svga
, NULL
);
369 /* Avoid leaking the previous hwtnl bias to swtnl */
370 svga_hwtnl_set_index_bias( svga
->hwtnl
, 0 );
371 ret
= svga_swtnl_draw_vbo( svga
, info
);
374 if (info
->indexed
&& svga
->curr
.ib
.buffer
) {
377 assert(svga
->curr
.ib
.offset
% svga
->curr
.ib
.index_size
== 0);
378 offset
= svga
->curr
.ib
.offset
/ svga
->curr
.ib
.index_size
;
380 ret
= retry_draw_range_elements( svga
,
381 svga
->curr
.ib
.buffer
,
382 svga
->curr
.ib
.index_size
,
387 info
->start
+ offset
,
389 info
->instance_count
,
393 ret
= retry_draw_arrays( svga
,
397 info
->instance_count
,
402 /* XXX: Silence warnings, do something sensible here? */
405 svga_release_user_upl_buffers( svga
);
407 if (SVGA_DEBUG
& DEBUG_FLUSH
) {
408 svga_hwtnl_flush_retry( svga
);
409 svga_context_flush(svga
, NULL
);
414 void svga_init_draw_functions( struct svga_context
*svga
)
416 svga
->pipe
.draw_vbo
= svga_draw_vbo
;