#include "util/u_draw.h"
#include "util/u_prim.h"
+#include <algorithm>
+#include <iostream>
/*
* Draw vertex arrays, with optional indexing, optional instancing.
*/
return;
}
- /* Update derived state, pass draw info to update function */
+ /* If indexed draw, force vertex validation since index buffer comes
+ * from draw info. */
+ if (info->index_size)
+ ctx->dirty |= SWR_NEW_VERTEX;
+
+ /* Update derived state, pass draw info to update function. */
swr_update_derived(pipe, info);
swr_update_draw_context(ctx);
+ struct pipe_draw_info resolved_info;
+ /* DrawTransformFeedback */
+ if (info->count_from_stream_output) {
+ // trick copied from softpipe to modify const struct *info
+ memcpy(&resolved_info, (void*)info, sizeof(struct pipe_draw_info));
+ resolved_info.count = ctx->so_primCounter * resolved_info.vertices_per_patch;
+ resolved_info.max_index = resolved_info.count - 1;
+ info = &resolved_info;
+ }
+
if (ctx->vs->pipe.stream_output.num_outputs) {
if (!ctx->vs->soFunc[info->mode]) {
STREAMOUT_COMPILE_STATE state = {0};
offsets[output_buffer] = so->output[i].dst_offset;
}
+ unsigned attrib_slot = so->output[i].register_index;
+ attrib_slot = swr_so_adjust_attrib(attrib_slot, ctx->vs);
+
state.stream.decl[num].bufferIndex = output_buffer;
- state.stream.decl[num].attribSlot = so->output[i].register_index - 1;
+ state.stream.decl[num].attribSlot = attrib_slot;
state.stream.decl[num].componentMask =
((1 << so->output[i].num_components) - 1)
<< so->output[i].start_component;
}
struct swr_vertex_element_state *velems = ctx->velems;
- velems->fsState.cutIndex = info->restart_index;
+ if (info->primitive_restart)
+ velems->fsState.cutIndex = info->restart_index;
+ else
+ velems->fsState.cutIndex = 0;
velems->fsState.bEnableCutIndex = info->primitive_restart;
velems->fsState.bPartialVertexBuffer = (info->min_index > 0);
* XXX setup provokingVertex & topologyProvokingVertex */
SWR_FRONTEND_STATE feState = {0};
- feState.vsVertexSize =
- VERTEX_ATTRIB_START_SLOT +
- + ctx->vs->info.base.num_outputs
- - (ctx->vs->info.base.writes_position ? 1 : 0);
+ // feState.vsVertexSize seeds the PA size that is used as an interface
+ // between all the shader stages, so it has to be large enough to
+ // incorporate all interfaces between stages
+
+ // max of frontend shaders num_outputs
+ feState.vsVertexSize = ctx->vs->info.base.num_outputs;
+ if (ctx->gs) {
+ feState.vsVertexSize = std::max(feState.vsVertexSize, (uint32_t)ctx->gs->info.base.num_outputs);
+ }
+ if (ctx->tcs) {
+ feState.vsVertexSize = std::max(feState.vsVertexSize, (uint32_t)ctx->tcs->info.base.num_outputs);
+ }
+ if (ctx->tes) {
+ feState.vsVertexSize = std::max(feState.vsVertexSize, (uint32_t)ctx->tes->info.base.num_outputs);
+ }
+
+
+ if (ctx->vs->info.base.num_outputs) {
+ // gs does not adjust for position in SGV slot at input from vs
+ if (!ctx->gs && !ctx->tcs && !ctx->tes)
+ feState.vsVertexSize--;
+ }
+
+ // other (non-SGV) slots start at VERTEX_ATTRIB_START_SLOT
+ feState.vsVertexSize += VERTEX_ATTRIB_START_SLOT;
+
+ // The PA in the clipper does not handle BE vertex sizes
+ // different from FE. Increase vertexsize only for the cases that needed it
+
+ // primid needs a slot
+ if (ctx->fs->info.base.uses_primid)
+ feState.vsVertexSize++;
+ // sprite coord enable
+ if (ctx->rasterizer->sprite_coord_enable)
+ feState.vsVertexSize++;
if (ctx->rasterizer->flatshade_first) {
feState.provokingVertex = {1, 0, 0};
if (info->index_size)
ctx->api.pfnSwrDrawIndexedInstanced(ctx->swrContext,
- swr_convert_prim_topology(info->mode),
+ swr_convert_prim_topology(info->mode, info->vertices_per_patch),
info->count,
info->instance_count,
info->start,
info->start_instance);
else
ctx->api.pfnSwrDrawInstanced(ctx->swrContext,
- swr_convert_prim_topology(info->mode),
+ swr_convert_prim_topology(info->mode, info->vertices_per_patch),
info->count,
info->instance_count,
info->start,
info->start_instance);
- /* On large client-buffer draw, we used client buffer directly, without
+ /* On client-buffer draw, we used client buffer directly, without
* copy. Block until draw is finished.
* VMD is an example application that benefits from this. */
- if (ctx->dirty & SWR_LARGE_CLIENT_DRAW) {
+ if (ctx->dirty & SWR_BLOCK_CLIENT_DRAW) {
struct swr_screen *screen = swr_screen(pipe->screen);
swr_fence_submit(ctx, screen->flush_fence);
swr_fence_finish(pipe->screen, NULL, screen->flush_fence, 0);
{
struct swr_context *ctx = swr_context(pipe);
struct swr_screen *screen = swr_screen(pipe->screen);
- struct pipe_surface *cb = ctx->framebuffer.cbufs[0];
- /* If the current renderTarget is the display surface, store tiles back to
- * the surface, in preparation for present (swr_flush_frontbuffer).
- * Other renderTargets get stored back when attachment changes or
- * swr_surface_destroy */
- if (cb && swr_resource(cb->texture)->display_target)
- swr_store_dirty_resource(pipe, cb->texture, SWR_TILE_RESOLVED);
+ for (int i=0; i < ctx->framebuffer.nr_cbufs; i++) {
+ struct pipe_surface *cb = ctx->framebuffer.cbufs[i];
+ if (cb) {
+ swr_store_dirty_resource(pipe, cb->texture, SWR_TILE_RESOLVED);
+ }
+ }
+ if (ctx->framebuffer.zsbuf) {
+ swr_store_dirty_resource(pipe, ctx->framebuffer.zsbuf->texture,
+ SWR_TILE_RESOLVED);
+ }
if (fence)
swr_fence_reference(pipe->screen, fence, screen->flush_fence);
struct SWR_SURFACE_STATE *renderTarget = &pDC->renderTargets[attachment];
/* Only proceed if there's a valid surface to store to */
- if (renderTarget->pBaseAddress) {
+ if (renderTarget->xpBaseAddress) {
swr_update_draw_context(ctx);
SWR_RECT full_rect =
{0, 0,
swr_draw_context *pDC = &ctx->swrDC;
SWR_SURFACE_STATE *renderTargets = pDC->renderTargets;
for (uint32_t i = 0; i < SWR_NUM_ATTACHMENTS; i++)
- if (renderTargets[i].pBaseAddress == spr->swr.pBaseAddress ||
- (spr->secondary.pBaseAddress &&
- renderTargets[i].pBaseAddress == spr->secondary.pBaseAddress)) {
+ if (renderTargets[i].xpBaseAddress == spr->swr.xpBaseAddress ||
+ (spr->secondary.xpBaseAddress &&
+ renderTargets[i].xpBaseAddress == spr->secondary.xpBaseAddress)) {
swr_store_render_target(pipe, i, post_tile_state);
/* Mesa thinks depth/stencil are fused, so we'll never get an