X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fsvga%2Fsvga_pipe_draw.c;h=0b4d41bb807e833855a06205867e2a190595bbc6;hb=542194251c36e88601cb20b96a4094da5d0ae675;hp=71a552862e9f225a09b502cf8e96538ec310d91a;hpb=650e02003fbb5511ec758d993b7ec0a302ee2235;p=mesa.git diff --git a/src/gallium/drivers/svga/svga_pipe_draw.c b/src/gallium/drivers/svga/svga_pipe_draw.c index 71a552862e9..0b4d41bb807 100644 --- a/src/gallium/drivers/svga/svga_pipe_draw.c +++ b/src/gallium/drivers/svga/svga_pipe_draw.c @@ -25,7 +25,8 @@ #include "svga_cmd.h" -#include "pipe/p_inlines.h" +#include "util/u_format.h" +#include "util/u_inlines.h" #include "util/u_prim.h" #include "util/u_time.h" #include "indices/u_indices.h" @@ -33,23 +34,196 @@ #include "svga_hw_reg.h" #include "svga_context.h" #include "svga_screen.h" -#include "svga_winsys.h" #include "svga_draw.h" #include "svga_state.h" #include "svga_swtnl.h" #include "svga_debug.h" +#include "svga_resource_buffer.h" +#include "util/u_upload_mgr.h" + +/** + * Determine the ranges to upload for the user-buffers referenced + * by the next draw command. + * + * TODO: It might be beneficial to support multiple ranges. In that case, + * the struct svga_buffer::uploaded member should be made an array or a + * list, since we need to account for the possibility that different ranges + * may be uploaded to different hardware buffers chosen by the utility + * upload manager. + */ + +static void +svga_user_buffer_range(struct svga_context *svga, + unsigned start, + unsigned count, + unsigned instance_count) +{ + const struct pipe_vertex_element *ve = svga->curr.velems->velem; + int i; + + /* + * Release old uploaded range (if not done already) and + * initialize new ranges. + */ + + for (i=0; i < svga->curr.velems->count; i++) { + struct pipe_vertex_buffer *vb = + &svga->curr.vb[ve[i].vertex_buffer_index]; + + if (vb->buffer && svga_buffer_is_user_buffer(vb->buffer)) { + struct svga_buffer *buffer = svga_buffer(vb->buffer); + + pipe_resource_reference(&buffer->uploaded.buffer, NULL); + buffer->uploaded.start = ~0; + buffer->uploaded.end = 0; + } + } + + for (i=0; i < svga->curr.velems->count; i++) { + struct pipe_vertex_buffer *vb = + &svga->curr.vb[ve[i].vertex_buffer_index]; + + if (vb->buffer && svga_buffer_is_user_buffer(vb->buffer)) { + struct svga_buffer *buffer = svga_buffer(vb->buffer); + unsigned first, size; + unsigned instance_div = ve[i].instance_divisor; + unsigned elemSize = util_format_get_blocksize(ve->src_format); + + svga->dirty |= SVGA_NEW_VBUFFER; + + if (instance_div) { + first = ve[i].src_offset; + count = (instance_count + instance_div - 1) / instance_div; + size = vb->stride * (count - 1) + elemSize; + } else if (vb->stride) { + first = vb->stride * start + ve[i].src_offset; + size = vb->stride * (count - 1) + elemSize; + } else { + /* Only a single vertex! + * Upload with the largest vertex size the hw supports, + * if possible. + */ + first = ve[i].src_offset; + size = MIN2(16, vb->buffer->width0); + } + + buffer->uploaded.start = MIN2(buffer->uploaded.start, first); + buffer->uploaded.end = MAX2(buffer->uploaded.end, first + size); + } + } +} + +/** + * svga_upload_user_buffers - upload parts of user buffers + * + * This function streams a part of a user buffer to hw and fills + * svga_buffer::uploaded with information on the upload. + */ + +static int +svga_upload_user_buffers(struct svga_context *svga, + unsigned start, + unsigned count, + unsigned instance_count) +{ + const struct pipe_vertex_element *ve = svga->curr.velems->velem; + unsigned i; + int ret; + + svga_user_buffer_range(svga, start, count, instance_count); + + for (i=0; i < svga->curr.velems->count; i++) { + struct pipe_vertex_buffer *vb = + &svga->curr.vb[ve[i].vertex_buffer_index]; + + if (vb->buffer && svga_buffer_is_user_buffer(vb->buffer)) { + struct svga_buffer *buffer = svga_buffer(vb->buffer); + boolean flushed; + + /* + * Check if already uploaded. Otherwise go ahead and upload. + */ + + if (buffer->uploaded.buffer) + continue; + + ret = u_upload_buffer( svga->upload_vb, + 0, + buffer->uploaded.start, + buffer->uploaded.end - buffer->uploaded.start, + &buffer->b.b, + &buffer->uploaded.offset, + &buffer->uploaded.buffer, + &flushed); + + if (ret) + return ret; + + if (0) + debug_printf("%s: %d: orig buf %p upl buf %p ofs %d sofs %d" + " sz %d\n", + __FUNCTION__, + i, + buffer, + buffer->uploaded.buffer, + buffer->uploaded.offset, + buffer->uploaded.start, + buffer->uploaded.end - buffer->uploaded.start); + + vb->buffer_offset = buffer->uploaded.offset; + } + } + + return PIPE_OK; +} + +/** + * svga_release_user_upl_buffers - release uploaded parts of user buffers + * + * This function releases the hw copy of the uploaded fraction of the + * user-buffer. It's important to do this as soon as all draw calls + * affecting the uploaded fraction are issued, as this allows for + * efficient reuse of the hardware surface backing the uploaded fraction. + * + * svga_buffer::source_offset is set to 0, and svga_buffer::uploaded::buffer + * is set to 0. + */ + +static void +svga_release_user_upl_buffers(struct svga_context *svga) +{ + unsigned i; + unsigned nr; + + nr = svga->curr.num_vertex_buffers; + + for (i = 0; i < nr; ++i) { + struct pipe_vertex_buffer *vb = &svga->curr.vb[i]; + + if (vb->buffer && svga_buffer_is_user_buffer(vb->buffer)) { + struct svga_buffer *buffer = svga_buffer(vb->buffer); + + buffer->uploaded.start = ~0; + buffer->uploaded.end = 0; + if (buffer->uploaded.buffer) + pipe_resource_reference(&buffer->uploaded.buffer, NULL); + } + } +} static enum pipe_error retry_draw_range_elements( struct svga_context *svga, - struct pipe_buffer *index_buffer, + struct pipe_resource *index_buffer, unsigned index_size, + int index_bias, unsigned min_index, unsigned max_index, unsigned prim, unsigned start, unsigned count, + unsigned instance_count, boolean do_retry ) { enum pipe_error ret = 0; @@ -61,24 +235,22 @@ retry_draw_range_elements( struct svga_context *svga, svga->curr.rast->templ.flatshade, svga->curr.rast->templ.flatshade_first ); + ret = svga_upload_user_buffers( svga, min_index + index_bias, + max_index - min_index + 1, instance_count ); + if (ret != PIPE_OK) + goto retry; ret = svga_update_state( svga, SVGA_STATE_HW_DRAW ); if (ret) goto retry; ret = svga_hwtnl_draw_range_elements( svga->hwtnl, - index_buffer, index_size, + index_buffer, index_size, index_bias, min_index, max_index, - prim, start, count, 0 ); + prim, start, count ); if (ret) goto retry; - if (svga->curr.any_user_vertex_buffers) { - ret = svga_hwtnl_flush( svga->hwtnl ); - if (ret) - goto retry; - } - return PIPE_OK; retry: @@ -87,10 +259,10 @@ retry: if (do_retry) { return retry_draw_range_elements( svga, - index_buffer, index_size, + index_buffer, index_size, index_bias, min_index, max_index, prim, start, count, - FALSE ); + instance_count, FALSE ); } return ret; @@ -102,6 +274,7 @@ retry_draw_arrays( struct svga_context *svga, unsigned prim, unsigned start, unsigned count, + unsigned instance_count, boolean do_retry ) { enum pipe_error ret; @@ -113,6 +286,11 @@ retry_draw_arrays( struct svga_context *svga, svga->curr.rast->templ.flatshade, svga->curr.rast->templ.flatshade_first ); + ret = svga_upload_user_buffers( svga, start, count, instance_count ); + + if (ret != PIPE_OK) + goto retry; + ret = svga_update_state( svga, SVGA_STATE_HW_DRAW ); if (ret) goto retry; @@ -122,12 +300,6 @@ retry_draw_arrays( struct svga_context *svga, if (ret) goto retry; - if (svga->curr.any_user_vertex_buffers) { - ret = svga_hwtnl_flush( svga->hwtnl ); - if (ret) - goto retry; - } - return 0; retry: @@ -139,6 +311,7 @@ retry: prim, start, count, + instance_count, FALSE ); } @@ -146,23 +319,17 @@ retry: } - - - -static boolean -svga_draw_range_elements( struct pipe_context *pipe, - struct pipe_buffer *index_buffer, - unsigned index_size, - unsigned min_index, - unsigned max_index, - unsigned prim, unsigned start, unsigned count) +static void +svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) { struct svga_context *svga = svga_context( pipe ); - unsigned reduced_prim = u_reduced_prim(prim); + unsigned reduced_prim = u_reduced_prim( info->mode ); + unsigned count = info->count; enum pipe_error ret = 0; + boolean needed_swtnl; - if (!u_trim_pipe_prim( prim, &count )) - return TRUE; + if (!u_trim_pipe_prim( info->mode, &count )) + return; /* * Mark currently bound target surfaces as dirty @@ -178,84 +345,72 @@ svga_draw_range_elements( struct pipe_context *pipe, svga->dirty |= SVGA_NEW_REDUCED_PRIMITIVE; } + needed_swtnl = svga->state.sw.need_swtnl; + svga_update_state_retry( svga, SVGA_STATE_NEED_SWTNL ); #ifdef DEBUG if (svga->curr.vs->base.id == svga->debug.disable_shader || svga->curr.fs->base.id == svga->debug.disable_shader) - return 0; + return; #endif - if (svga->state.sw.need_swtnl) - { - ret = svga_swtnl_draw_range_elements( svga, - index_buffer, - index_size, - min_index, max_index, - prim, - start, count ); + if (svga->state.sw.need_swtnl) { + if (!needed_swtnl) { + /* + * We're switching from HW to SW TNL. SW TNL will require mapping all + * currently bound vertex buffers, some of which may already be + * referenced in the current command buffer as result of previous HW + * TNL. So flush now, to prevent the context to flush while a referred + * vertex buffer is mapped. + */ + + svga_context_flush(svga, NULL); + } + + /* Avoid leaking the previous hwtnl bias to swtnl */ + svga_hwtnl_set_index_bias( svga->hwtnl, 0 ); + ret = svga_swtnl_draw_vbo( svga, info ); } else { - if (index_buffer) { + if (info->indexed && svga->curr.ib.buffer) { + unsigned offset; + + assert(svga->curr.ib.offset % svga->curr.ib.index_size == 0); + offset = svga->curr.ib.offset / svga->curr.ib.index_size; + ret = retry_draw_range_elements( svga, - index_buffer, - index_size, - min_index, - max_index, - prim, - start, - count, + svga->curr.ib.buffer, + svga->curr.ib.index_size, + info->index_bias, + info->min_index, + info->max_index, + info->mode, + info->start + offset, + info->count, + info->instance_count, TRUE ); } else { - ret = retry_draw_arrays( svga, - prim, - start, - count, + ret = retry_draw_arrays( svga, + info->mode, + info->start, + info->count, + info->instance_count, TRUE ); } } - if (SVGA_DEBUG & DEBUG_FLUSH) { - static unsigned id; - debug_printf("%s %d\n", __FUNCTION__, id++); - if (id > 1300) - util_time_sleep( 2000 ); + svga_release_user_upl_buffers( svga ); + if (SVGA_DEBUG & DEBUG_FLUSH) { svga_hwtnl_flush_retry( svga ); svga_context_flush(svga, NULL); } - - return ret == PIPE_OK; -} - - -static boolean -svga_draw_elements( struct pipe_context *pipe, - struct pipe_buffer *index_buffer, - unsigned index_size, - unsigned prim, unsigned start, unsigned count) -{ - return svga_draw_range_elements( pipe, index_buffer, - index_size, - 0, 0xffffffff, - prim, start, count ); -} - -static boolean -svga_draw_arrays( struct pipe_context *pipe, - unsigned prim, unsigned start, unsigned count) -{ - return svga_draw_range_elements(pipe, NULL, 0, - start, start + count - 1, - prim, - start, count); } void svga_init_draw_functions( struct svga_context *svga ) { - svga->pipe.draw_arrays = svga_draw_arrays; - svga->pipe.draw_elements = svga_draw_elements; - svga->pipe.draw_range_elements = svga_draw_range_elements; + svga->pipe.draw_vbo = svga_draw_vbo; }