+static void
+swr_update_constants(struct swr_context *ctx, enum pipe_shader_type shaderType)
+{
+ swr_draw_context *pDC = &ctx->swrDC;
+
+ const float **constant;
+ uint32_t *num_constants;
+ struct swr_scratch_space *scratch;
+
+ switch (shaderType) {
+ case PIPE_SHADER_VERTEX:
+ constant = pDC->constantVS;
+ num_constants = pDC->num_constantsVS;
+ scratch = &ctx->scratch->vs_constants;
+ break;
+ case PIPE_SHADER_FRAGMENT:
+ constant = pDC->constantFS;
+ num_constants = pDC->num_constantsFS;
+ scratch = &ctx->scratch->fs_constants;
+ break;
+ case PIPE_SHADER_GEOMETRY:
+ constant = pDC->constantGS;
+ num_constants = pDC->num_constantsGS;
+ scratch = &ctx->scratch->gs_constants;
+ break;
+ default:
+ debug_printf("Unsupported shader type constants\n");
+ return;
+ }
+
+ for (UINT i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
+ const pipe_constant_buffer *cb = &ctx->constants[shaderType][i];
+ num_constants[i] = cb->buffer_size;
+ if (cb->buffer) {
+ constant[i] =
+ (const float *)(swr_resource_data(cb->buffer) +
+ cb->buffer_offset);
+ } else {
+ /* Need to copy these constants to scratch space */
+ if (cb->user_buffer && cb->buffer_size) {
+ const void *ptr =
+ ((const uint8_t *)cb->user_buffer + cb->buffer_offset);
+ uint32_t size = AlignUp(cb->buffer_size, 4);
+ ptr = swr_copy_to_scratch_space(ctx, scratch, ptr, size);
+ constant[i] = (const float *)ptr;
+ }
+ }
+ }
+}
+
+static bool
+swr_change_rt(struct swr_context *ctx,
+ unsigned attachment,
+ const struct pipe_surface *sf)
+{
+ swr_draw_context *pDC = &ctx->swrDC;
+ struct SWR_SURFACE_STATE *rt = &pDC->renderTargets[attachment];
+
+ /* Do nothing if the render target hasn't changed */
+ if ((!sf || !sf->texture) && (void*)(rt->xpBaseAddress) == nullptr)
+ return false;
+
+ /* Deal with disabling RT up front */
+ if (!sf || !sf->texture) {
+ /* If detaching attachment, mark tiles as RESOLVED so core
+ * won't try to load from non-existent target. */
+ swr_store_render_target(&ctx->pipe, attachment, SWR_TILE_RESOLVED);
+ *rt = {0};
+ return true;
+ }
+
+ const struct swr_resource *swr = swr_resource(sf->texture);
+ const SWR_SURFACE_STATE *swr_surface = &swr->swr;
+ SWR_FORMAT fmt = mesa_to_swr_format(sf->format);
+
+ if (attachment == SWR_ATTACHMENT_STENCIL && swr->secondary.xpBaseAddress) {
+ swr_surface = &swr->secondary;
+ fmt = swr_surface->format;
+ }
+
+ if (rt->xpBaseAddress == swr_surface->xpBaseAddress &&
+ rt->format == fmt &&
+ rt->lod == sf->u.tex.level &&
+ rt->arrayIndex == sf->u.tex.first_layer)
+ return false;
+
+ bool need_fence = false;
+
+ /* StoreTile for changed target */
+ if (rt->xpBaseAddress) {
+ /* If changing attachment to a new target, mark tiles as
+ * INVALID so they are reloaded from surface. */
+ swr_store_render_target(&ctx->pipe, attachment, SWR_TILE_INVALID);
+ need_fence = true;
+ } else {
+ /* if no previous attachment, invalidate tiles that may be marked
+ * RESOLVED because of an old attachment */
+ swr_invalidate_render_target(&ctx->pipe, attachment, sf->width, sf->height);
+ /* no need to set fence here */
+ }
+
+ /* Make new attachment */
+ *rt = *swr_surface;
+ rt->format = fmt;
+ rt->lod = sf->u.tex.level;
+ rt->arrayIndex = sf->u.tex.first_layer;
+
+ return need_fence;
+}
+
+/*
+ * for cases where resources are shared between contexts, invalidate
+ * this ctx's resource. so it can be fetched fresh. Old ctx's resource
+ * is already stored during a flush
+ */
+static inline void
+swr_invalidate_buffers_after_ctx_change(struct pipe_context *pipe)
+{
+ struct swr_context *ctx = swr_context(pipe);
+
+ for (uint32_t i = 0; i < ctx->framebuffer.nr_cbufs; i++) {
+ struct pipe_surface *cb = ctx->framebuffer.cbufs[i];
+ if (cb) {
+ struct swr_resource *res = swr_resource(cb->texture);
+ if (res->curr_pipe != pipe) {
+ /* if curr_pipe is NULL (first use), status should not be WRITE */
+ assert(res->curr_pipe || !(res->status & SWR_RESOURCE_WRITE));
+ if (res->status & SWR_RESOURCE_WRITE) {
+ swr_invalidate_render_target(pipe, i, cb->width, cb->height);
+ }
+ }
+ res->curr_pipe = pipe;
+ }
+ }
+ if (ctx->framebuffer.zsbuf) {
+ struct pipe_surface *zb = ctx->framebuffer.zsbuf;
+ if (zb) {
+ struct swr_resource *res = swr_resource(zb->texture);
+ if (res->curr_pipe != pipe) {
+ /* if curr_pipe is NULL (first use), status should not be WRITE */
+ assert(res->curr_pipe || !(res->status & SWR_RESOURCE_WRITE));
+ if (res->status & SWR_RESOURCE_WRITE) {
+ swr_invalidate_render_target(pipe, SWR_ATTACHMENT_DEPTH, zb->width, zb->height);
+ swr_invalidate_render_target(pipe, SWR_ATTACHMENT_STENCIL, zb->width, zb->height);
+ }
+ }
+ res->curr_pipe = pipe;
+ }
+ }
+}
+
+static inline void
+swr_user_vbuf_range(const struct pipe_draw_info *info,
+ const struct swr_vertex_element_state *velems,
+ const struct pipe_vertex_buffer *vb,
+ uint32_t i,
+ uint32_t *totelems,
+ uint32_t *base,
+ uint32_t *size)
+{
+ /* FIXME: The size is too large - we don't access the full extra stride. */
+ unsigned elems;
+ if (velems->instanced_bufs & (1U << i)) {
+ elems = info->instance_count / velems->min_instance_div[i] + 1;
+ *totelems = info->start_instance + elems;
+ *base = info->start_instance * vb->stride;
+ *size = elems * vb->stride;
+ } else if (vb->stride) {
+ elems = info->max_index - info->min_index + 1;
+ *totelems = (info->max_index + info->index_bias) + 1;
+ *base = (info->min_index + info->index_bias) * vb->stride;
+ *size = elems * vb->stride;
+ } else {
+ *totelems = 1;
+ *base = 0;
+ *size = velems->stream_pitch[i];
+ }
+}
+
+static void
+swr_update_poly_stipple(struct swr_context *ctx)
+{
+ struct swr_draw_context *pDC = &ctx->swrDC;
+
+ assert(sizeof(ctx->poly_stipple.pipe.stipple) == sizeof(pDC->polyStipple));
+ memcpy(pDC->polyStipple,
+ ctx->poly_stipple.pipe.stipple,
+ sizeof(ctx->poly_stipple.pipe.stipple));
+}
+