#define R600_NUM_ATOMS 52
-#define R600_MAX_VIEWPORTS 16
-
/* read caches */
#define R600_CONTEXT_INV_VERTEX_CACHE (R600_CONTEXT_PRIVATE_FLAG << 0)
#define R600_CONTEXT_INV_TEX_CACHE (R600_CONTEXT_PRIVATE_FLAG << 1)
r600_query.h \
r600_streamout.c \
r600_texture.c \
+ r600_viewport.c \
radeon_uvd.c \
radeon_uvd.h \
radeon_vce_40_2_2.c \
LIST_INITHEAD(&rctx->texture_buffers);
r600_init_context_texture_functions(rctx);
+ r600_init_viewport_functions(rctx);
r600_streamout_init(rctx);
r600_query_init(rctx);
cayman_init_msaa(&rctx->b);
#define DBG_MONOLITHIC_SHADERS (1llu << 47)
#define R600_MAP_BUFFER_ALIGNMENT 64
+#define R600_MAX_VIEWPORTS 16
struct r600_common_context;
struct r600_perfcounters;
+struct tgsi_shader_info;
struct radeon_shader_reloc {
char name[32];
int num_prims_gen_queries;
};
+struct r600_signed_scissor {
+ int minx;
+ int miny;
+ int maxx;
+ int maxy;
+};
+
+struct r600_scissors {
+ struct r600_atom atom;
+ unsigned dirty_mask;
+ struct pipe_scissor_state states[R600_MAX_VIEWPORTS];
+};
+
+struct r600_viewports {
+ struct r600_atom atom;
+ unsigned dirty_mask;
+ struct pipe_viewport_state states[R600_MAX_VIEWPORTS];
+ struct r600_signed_scissor as_scissor[R600_MAX_VIEWPORTS];
+};
+
struct r600_ring {
struct radeon_winsys_cs *cs;
void (*flush)(void *ctx, unsigned flags,
/* States. */
struct r600_streamout streamout;
+ struct r600_scissors scissors;
+ struct r600_viewports viewports;
+ bool scissor_enabled;
+ bool vs_writes_viewport_index;
/* Additional context states. */
unsigned flags; /* flush flags */
void r600_init_screen_texture_functions(struct r600_common_screen *rscreen);
void r600_init_context_texture_functions(struct r600_common_context *rctx);
+/* r600_viewport.c */
+void r600_set_scissor_enable(struct r600_common_context *rctx, bool enable);
+void r600_update_vs_writes_viewport_index(struct r600_common_context *rctx,
+ struct tgsi_shader_info *info);
+void r600_init_viewport_functions(struct r600_common_context *rctx);
+
/* cayman_msaa.c */
extern const uint32_t eg_sample_locs_2x[4];
extern const unsigned eg_max_dist_2x;
--- /dev/null
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "r600_cs.h"
+#include "tgsi/tgsi_scan.h"
+
+static void r600_set_scissor_states(struct pipe_context *ctx,
+ unsigned start_slot,
+ unsigned num_scissors,
+ const struct pipe_scissor_state *state)
+{
+ struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ int i;
+
+ for (i = 0; i < num_scissors; i++)
+ rctx->scissors.states[start_slot + i] = state[i];
+
+ if (!rctx->scissor_enabled)
+ return;
+
+ rctx->scissors.dirty_mask |= ((1 << num_scissors) - 1) << start_slot;
+ rctx->set_atom_dirty(rctx, &rctx->scissors.atom, true);
+}
+
+static void r600_get_scissor_from_viewport(const struct pipe_viewport_state *vp,
+ struct r600_signed_scissor *scissor)
+{
+ int tmp;
+
+ /* Convert (-1, -1) and (1, 1) from clip space into window space. */
+ scissor->minx = -vp->scale[0] + vp->translate[0];
+ scissor->miny = -vp->scale[1] + vp->translate[1];
+ scissor->maxx = vp->scale[0] + vp->translate[0];
+ scissor->maxy = vp->scale[1] + vp->translate[1];
+
+ /* r600_draw_rectangle sets this. Disable the scissor. */
+ if (scissor->minx == -1 && scissor->miny == -1 &&
+ scissor->maxx == 1 && scissor->maxy == 1) {
+ scissor->minx = scissor->miny = 0;
+ scissor->maxx = scissor->maxy = 16384;
+ }
+
+ /* Handle inverted viewports. */
+ if (scissor->minx > scissor->maxx) {
+ tmp = scissor->minx;
+ scissor->minx = scissor->maxx;
+ scissor->maxx = tmp;
+ }
+ if (scissor->miny > scissor->maxy) {
+ tmp = scissor->miny;
+ scissor->miny = scissor->maxy;
+ scissor->maxy = tmp;
+ }
+}
+
+static void r600_clamp_scissor(struct pipe_scissor_state *out,
+ struct r600_signed_scissor *scissor)
+{
+ out->minx = CLAMP(scissor->minx, 0, 16384);
+ out->miny = CLAMP(scissor->miny, 0, 16384);
+ out->maxx = CLAMP(scissor->maxx, 0, 16384);
+ out->maxy = CLAMP(scissor->maxy, 0, 16384);
+}
+
+static void r600_clip_scissor(struct pipe_scissor_state *out,
+ struct pipe_scissor_state *clip)
+{
+ out->minx = MAX2(out->minx, clip->minx);
+ out->miny = MAX2(out->miny, clip->miny);
+ out->maxx = MIN2(out->maxx, clip->maxx);
+ out->maxy = MIN2(out->maxy, clip->maxy);
+}
+
+static void r600_scissor_make_union(struct r600_signed_scissor *out,
+ struct r600_signed_scissor *in)
+{
+ out->minx = MIN2(out->minx, in->minx);
+ out->miny = MIN2(out->miny, in->miny);
+ out->maxx = MAX2(out->maxx, in->maxx);
+ out->maxy = MAX2(out->maxy, in->maxy);
+}
+
+static void r600_emit_one_scissor(struct radeon_winsys_cs *cs,
+ struct r600_signed_scissor *vp_scissor,
+ struct pipe_scissor_state *scissor)
+{
+ struct pipe_scissor_state final;
+
+ /* Since the guard band disables clipping, we have to clip per-pixel
+ * using a scissor.
+ */
+ r600_clamp_scissor(&final, vp_scissor);
+
+ if (scissor)
+ r600_clip_scissor(&final, scissor);
+
+ radeon_emit(cs, S_028250_TL_X(final.minx) |
+ S_028250_TL_Y(final.miny) |
+ S_028250_WINDOW_OFFSET_DISABLE(1));
+ radeon_emit(cs, S_028254_BR_X(final.maxx) |
+ S_028254_BR_Y(final.maxy));
+}
+
+/* the range is [-MAX, MAX] */
+#define R600_MAX_VIEWPORT_RANGE 32768
+
+static void r600_emit_guardband(struct r600_common_context *rctx,
+ struct r600_signed_scissor *vp_as_scissor)
+{
+ struct radeon_winsys_cs *cs = rctx->gfx.cs;
+ struct pipe_viewport_state vp;
+ float left, top, right, bottom, max_range, guardband_x, guardband_y;
+
+ /* Reconstruct the viewport transformation from the scissor. */
+ vp.translate[0] = (vp_as_scissor->minx + vp_as_scissor->maxx) / 2.0;
+ vp.translate[1] = (vp_as_scissor->miny + vp_as_scissor->maxy) / 2.0;
+ vp.scale[0] = vp_as_scissor->maxx - vp.translate[0];
+ vp.scale[1] = vp_as_scissor->maxy - vp.translate[1];
+
+ /* Treat a 0x0 viewport as 1x1 to prevent division by zero. */
+ if (vp_as_scissor->minx == vp_as_scissor->maxx)
+ vp.scale[0] = 0.5;
+ if (vp_as_scissor->miny == vp_as_scissor->maxy)
+ vp.scale[1] = 0.5;
+
+ /* Find the biggest guard band that is inside the supported viewport
+ * range. The guard band is specified as a horizontal and vertical
+ * distance from (0,0) in clip space.
+ *
+ * This is done by applying the inverse viewport transformation
+ * on the viewport limits to get those limits in clip space.
+ *
+ * Use a limit one pixel smaller to allow for some precision error.
+ */
+ max_range = R600_MAX_VIEWPORT_RANGE - 1;
+ left = (-max_range - vp.translate[0]) / vp.scale[0];
+ right = ( max_range - vp.translate[0]) / vp.scale[0];
+ top = (-max_range - vp.translate[1]) / vp.scale[1];
+ bottom = ( max_range - vp.translate[1]) / vp.scale[1];
+
+ assert(left <= -1 && top <= -1 && right >= 1 && bottom >= 1);
+
+ guardband_x = MIN2(-left, right);
+ guardband_y = MIN2(-top, bottom);
+
+ /* If any of the GB registers is updated, all of them must be updated. */
+ radeon_set_context_reg_seq(cs, CM_R_028BE8_PA_CL_GB_VERT_CLIP_ADJ, 4);
+ radeon_emit(cs, fui(guardband_y)); /* R_028BE8_PA_CL_GB_VERT_CLIP_ADJ */
+ radeon_emit(cs, fui(1.0)); /* R_028BEC_PA_CL_GB_VERT_DISC_ADJ */
+ radeon_emit(cs, fui(guardband_x)); /* R_028BF0_PA_CL_GB_HORZ_CLIP_ADJ */
+ radeon_emit(cs, fui(1.0)); /* R_028BF4_PA_CL_GB_HORZ_DISC_ADJ */
+}
+
+static void r600_emit_scissors(struct r600_common_context *rctx, struct r600_atom *atom)
+{
+ struct radeon_winsys_cs *cs = rctx->gfx.cs;
+ struct pipe_scissor_state *states = rctx->scissors.states;
+ unsigned mask = rctx->scissors.dirty_mask;
+ bool scissor_enabled = rctx->scissor_enabled;
+ struct r600_signed_scissor max_vp_scissor;
+ int i;
+
+ /* The simple case: Only 1 viewport is active. */
+ if (!rctx->vs_writes_viewport_index) {
+ struct r600_signed_scissor *vp = &rctx->viewports.as_scissor[0];
+
+ if (!(mask & 1))
+ return;
+
+ radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2);
+ r600_emit_one_scissor(cs, vp, scissor_enabled ? &states[0] : NULL);
+ r600_emit_guardband(rctx, vp);
+ rctx->scissors.dirty_mask &= ~1; /* clear one bit */
+ return;
+ }
+
+ /* Shaders can draw to any viewport. Make a union of all viewports. */
+ max_vp_scissor = rctx->viewports.as_scissor[0];
+ for (i = 1; i < R600_MAX_VIEWPORTS; i++)
+ r600_scissor_make_union(&max_vp_scissor,
+ &rctx->viewports.as_scissor[i]);
+
+ while (mask) {
+ int start, count, i;
+
+ u_bit_scan_consecutive_range(&mask, &start, &count);
+
+ radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL +
+ start * 4 * 2, count * 2);
+ for (i = start; i < start+count; i++) {
+ r600_emit_one_scissor(cs, &rctx->viewports.as_scissor[i],
+ scissor_enabled ? &states[i] : NULL);
+ }
+ }
+ r600_emit_guardband(rctx, &max_vp_scissor);
+ rctx->scissors.dirty_mask = 0;
+}
+
+static void r600_set_viewport_states(struct pipe_context *ctx,
+ unsigned start_slot,
+ unsigned num_viewports,
+ const struct pipe_viewport_state *state)
+{
+ struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ int i;
+
+ for (i = 0; i < num_viewports; i++) {
+ unsigned index = start_slot + i;
+
+ rctx->viewports.states[index] = state[i];
+ r600_get_scissor_from_viewport(&state[i],
+ &rctx->viewports.as_scissor[index]);
+ }
+
+ rctx->viewports.dirty_mask |= ((1 << num_viewports) - 1) << start_slot;
+ rctx->scissors.dirty_mask |= ((1 << num_viewports) - 1) << start_slot;
+ rctx->set_atom_dirty(rctx, &rctx->viewports.atom, true);
+ rctx->set_atom_dirty(rctx, &rctx->scissors.atom, true);
+}
+
+static void r600_emit_viewports(struct r600_common_context *rctx, struct r600_atom *atom)
+{
+ struct radeon_winsys_cs *cs = rctx->gfx.cs;
+ struct pipe_viewport_state *states = rctx->viewports.states;
+ unsigned mask = rctx->viewports.dirty_mask;
+
+ /* The simple case: Only 1 viewport is active. */
+ if (!rctx->vs_writes_viewport_index) {
+ if (!(mask & 1))
+ return;
+
+ radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE, 6);
+ radeon_emit(cs, fui(states[0].scale[0]));
+ radeon_emit(cs, fui(states[0].translate[0]));
+ radeon_emit(cs, fui(states[0].scale[1]));
+ radeon_emit(cs, fui(states[0].translate[1]));
+ radeon_emit(cs, fui(states[0].scale[2]));
+ radeon_emit(cs, fui(states[0].translate[2]));
+ rctx->viewports.dirty_mask &= ~1; /* clear one bit */
+ return;
+ }
+
+ while (mask) {
+ int start, count, i;
+
+ u_bit_scan_consecutive_range(&mask, &start, &count);
+
+ radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE +
+ start * 4 * 6, count * 6);
+ for (i = start; i < start+count; i++) {
+ radeon_emit(cs, fui(states[i].scale[0]));
+ radeon_emit(cs, fui(states[i].translate[0]));
+ radeon_emit(cs, fui(states[i].scale[1]));
+ radeon_emit(cs, fui(states[i].translate[1]));
+ radeon_emit(cs, fui(states[i].scale[2]));
+ radeon_emit(cs, fui(states[i].translate[2]));
+ }
+ }
+ rctx->viewports.dirty_mask = 0;
+}
+
+void r600_set_scissor_enable(struct r600_common_context *rctx, bool enable)
+{
+ if (rctx->scissor_enabled != enable) {
+ rctx->scissor_enabled = enable;
+ rctx->scissors.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ rctx->set_atom_dirty(rctx, &rctx->scissors.atom, true);
+ }
+}
+
+/**
+ * Normally, we only emit 1 viewport and 1 scissor if no shader is using
+ * the VIEWPORT_INDEX output, and emitting the other viewports and scissors
+ * is delayed. When a shader with VIEWPORT_INDEX appears, this should be
+ * called to emit the rest.
+ */
+void r600_update_vs_writes_viewport_index(struct r600_common_context *rctx,
+ struct tgsi_shader_info *info)
+{
+ if (!info)
+ return;
+
+ rctx->vs_writes_viewport_index = info->writes_viewport_index;
+ if (!rctx->vs_writes_viewport_index)
+ return;
+
+ if (rctx->scissors.dirty_mask)
+ rctx->set_atom_dirty(rctx, &rctx->scissors.atom, true);
+ if (rctx->viewports.dirty_mask)
+ rctx->set_atom_dirty(rctx, &rctx->viewports.atom, true);
+}
+
+void r600_init_viewport_functions(struct r600_common_context *rctx)
+{
+ rctx->scissors.atom.emit = r600_emit_scissors;
+ rctx->viewports.atom.emit = r600_emit_viewports;
+
+ rctx->b.set_scissor_states = r600_set_scissor_states;
+ rctx->b.set_viewport_states = r600_set_viewport_states;
+}
/*CIK+*/
#define R_0300FC_CP_STRMOUT_CNTL 0x0300FC
+#define CM_R_028BE8_PA_CL_GB_VERT_CLIP_ADJ 0x28be8
+#define R_02843C_PA_CL_VPORT_XSCALE 0x02843C
+
+#define R_028250_PA_SC_VPORT_SCISSOR_0_TL 0x028250
+#define S_028250_TL_X(x) (((x) & 0x7FFF) << 0)
+#define G_028250_TL_X(x) (((x) >> 0) & 0x7FFF)
+#define C_028250_TL_X 0xFFFF8000
+#define S_028250_TL_Y(x) (((x) & 0x7FFF) << 16)
+#define G_028250_TL_Y(x) (((x) >> 16) & 0x7FFF)
+#define C_028250_TL_Y 0x8000FFFF
+#define S_028250_WINDOW_OFFSET_DISABLE(x) (((x) & 0x1) << 31)
+#define G_028250_WINDOW_OFFSET_DISABLE(x) (((x) >> 31) & 0x1)
+#define C_028250_WINDOW_OFFSET_DISABLE 0x7FFFFFFF
+#define S_028254_BR_X(x) (((x) & 0x7FFF) << 0)
+#define G_028254_BR_X(x) (((x) >> 0) & 0x7FFF)
+#define C_028254_BR_X 0xFFFF8000
+#define S_028254_BR_Y(x) (((x) & 0x7FFF) << 16)
+#define G_028254_BR_Y(x) (((x) >> 16) & 0x7FFF)
+#define C_028254_BR_Y 0x8000FFFF
+
#endif
util_blitter_save_stencil_ref(sctx->blitter, &sctx->stencil_ref.state);
util_blitter_save_fragment_shader(sctx->blitter, sctx->ps_shader.cso);
util_blitter_save_sample_mask(sctx->blitter, sctx->sample_mask.sample_mask);
- util_blitter_save_viewport(sctx->blitter, &sctx->viewports.states[0]);
- util_blitter_save_scissor(sctx->blitter, &sctx->scissors.states[0]);
+ util_blitter_save_viewport(sctx->blitter, &sctx->b.viewports.states[0]);
+ util_blitter_save_scissor(sctx->blitter, &sctx->b.scissors.states[0]);
}
if (op & SI_SAVE_FRAMEBUFFER)
si_mark_atom_dirty(ctx, &ctx->b.render_cond_atom);
si_all_descriptors_begin_new_cs(ctx);
- ctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- ctx->viewports.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- si_mark_atom_dirty(ctx, &ctx->scissors.atom);
- si_mark_atom_dirty(ctx, &ctx->viewports.atom);
+ ctx->b.scissors.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ ctx->b.viewports.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ si_mark_atom_dirty(ctx, &ctx->b.scissors.atom);
+ si_mark_atom_dirty(ctx, &ctx->b.viewports.atom);
r600_postflush_resume_features(&ctx->b);
return 8;
case PIPE_CAP_MAX_VIEWPORTS:
- return SI_MAX_VIEWPORTS;
+ return R600_MAX_VIEWPORTS;
/* Timer queries, present when the clock frequency is non zero. */
case PIPE_CAP_QUERY_TIMESTAMP:
#define SI_IS_TRACE_POINT(x) (((x) & 0xcafe0000) == 0xcafe0000)
#define SI_GET_TRACE_POINT_ID(x) ((x) & 0xffff)
-#define SI_MAX_VIEWPORTS 16
#define SI_MAX_BORDER_COLORS 4096
struct si_compute;
uint16_t sample_mask;
};
-struct si_scissors {
- struct r600_atom atom;
- unsigned dirty_mask;
- struct pipe_scissor_state states[SI_MAX_VIEWPORTS];
-};
-
-struct si_signed_scissor {
- int minx;
- int miny;
- int maxx;
- int maxy;
-};
-
-struct si_viewports {
- struct r600_atom atom;
- unsigned dirty_mask;
- struct pipe_viewport_state states[SI_MAX_VIEWPORTS];
- struct si_signed_scissor as_scissor[SI_MAX_VIEWPORTS];
-};
-
/* A shader state consists of the shader selector, which is a constant state
* object shared by multiple contexts and shouldn't be modified, and
* the current shader variant selected for this context.
struct r600_atom clip_regs;
struct si_clip_state clip_state;
struct si_shader_data shader_userdata;
- struct si_scissors scissors;
- struct si_viewports viewports;
struct si_stencil_ref stencil_ref;
struct r600_atom spi_map;
}
/*
- * Clipping, scissors and viewport
+ * Clipping
*/
static void si_set_clip_state(struct pipe_context *ctx,
S_028AB4_REUSE_OFF(info->writes_viewport_index));
}
-static void si_set_scissor_states(struct pipe_context *ctx,
- unsigned start_slot,
- unsigned num_scissors,
- const struct pipe_scissor_state *state)
-{
- struct si_context *sctx = (struct si_context *)ctx;
- int i;
-
- for (i = 0; i < num_scissors; i++)
- sctx->scissors.states[start_slot + i] = state[i];
-
- if (!sctx->queued.named.rasterizer ||
- !sctx->queued.named.rasterizer->scissor_enable)
- return;
-
- sctx->scissors.dirty_mask |= ((1 << num_scissors) - 1) << start_slot;
- si_mark_atom_dirty(sctx, &sctx->scissors.atom);
-}
-
-static void si_get_scissor_from_viewport(const struct pipe_viewport_state *vp,
- struct si_signed_scissor *scissor)
-{
- int tmp;
-
- /* Convert (-1, -1) and (1, 1) from clip space into window space. */
- scissor->minx = -vp->scale[0] + vp->translate[0];
- scissor->miny = -vp->scale[1] + vp->translate[1];
- scissor->maxx = vp->scale[0] + vp->translate[0];
- scissor->maxy = vp->scale[1] + vp->translate[1];
-
- /* r600_draw_rectangle sets this. Disable the scissor. */
- if (scissor->minx == -1 && scissor->miny == -1 &&
- scissor->maxx == 1 && scissor->maxy == 1) {
- scissor->minx = scissor->miny = 0;
- scissor->maxx = scissor->maxy = 16384;
- }
-
- /* Handle inverted viewports. */
- if (scissor->minx > scissor->maxx) {
- tmp = scissor->minx;
- scissor->minx = scissor->maxx;
- scissor->maxx = tmp;
- }
- if (scissor->miny > scissor->maxy) {
- tmp = scissor->miny;
- scissor->miny = scissor->maxy;
- scissor->maxy = tmp;
- }
-}
-
-static void si_clamp_scissor(struct pipe_scissor_state *out,
- struct si_signed_scissor *scissor)
-{
- out->minx = CLAMP(scissor->minx, 0, 16384);
- out->miny = CLAMP(scissor->miny, 0, 16384);
- out->maxx = CLAMP(scissor->maxx, 0, 16384);
- out->maxy = CLAMP(scissor->maxy, 0, 16384);
-}
-
-static void si_clip_scissor(struct pipe_scissor_state *out,
- struct pipe_scissor_state *clip)
-{
- out->minx = MAX2(out->minx, clip->minx);
- out->miny = MAX2(out->miny, clip->miny);
- out->maxx = MIN2(out->maxx, clip->maxx);
- out->maxy = MIN2(out->maxy, clip->maxy);
-}
-
-static void si_scissor_make_union(struct si_signed_scissor *out,
- struct si_signed_scissor *in)
-{
- out->minx = MIN2(out->minx, in->minx);
- out->miny = MIN2(out->miny, in->miny);
- out->maxx = MAX2(out->maxx, in->maxx);
- out->maxy = MAX2(out->maxy, in->maxy);
-}
-
-static void si_emit_one_scissor(struct radeon_winsys_cs *cs,
- struct si_signed_scissor *vp_scissor,
- struct pipe_scissor_state *scissor)
-{
- struct pipe_scissor_state final;
-
- /* Since the guard band disables clipping, we have to clip per-pixel
- * using a scissor.
- */
- si_clamp_scissor(&final, vp_scissor);
-
- if (scissor)
- si_clip_scissor(&final, scissor);
-
- radeon_emit(cs, S_028250_TL_X(final.minx) |
- S_028250_TL_Y(final.miny) |
- S_028250_WINDOW_OFFSET_DISABLE(1));
- radeon_emit(cs, S_028254_BR_X(final.maxx) |
- S_028254_BR_Y(final.maxy));
-}
-
-/* the range is [-MAX, MAX] */
-#define SI_MAX_VIEWPORT_RANGE 32768
-
-static void si_emit_guardband(struct si_context *sctx,
- struct si_signed_scissor *vp_as_scissor)
-{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
- struct pipe_viewport_state vp;
- float left, top, right, bottom, max_range, guardband_x, guardband_y;
-
- /* Reconstruct the viewport transformation from the scissor. */
- vp.translate[0] = (vp_as_scissor->minx + vp_as_scissor->maxx) / 2.0;
- vp.translate[1] = (vp_as_scissor->miny + vp_as_scissor->maxy) / 2.0;
- vp.scale[0] = vp_as_scissor->maxx - vp.translate[0];
- vp.scale[1] = vp_as_scissor->maxy - vp.translate[1];
-
- /* Treat a 0x0 viewport as 1x1 to prevent division by zero. */
- if (vp_as_scissor->minx == vp_as_scissor->maxx)
- vp.scale[0] = 0.5;
- if (vp_as_scissor->miny == vp_as_scissor->maxy)
- vp.scale[1] = 0.5;
-
- /* Find the biggest guard band that is inside the supported viewport
- * range. The guard band is specified as a horizontal and vertical
- * distance from (0,0) in clip space.
- *
- * This is done by applying the inverse viewport transformation
- * on the viewport limits to get those limits in clip space.
- *
- * Use a limit one pixel smaller to allow for some precision error.
- */
- max_range = SI_MAX_VIEWPORT_RANGE - 1;
- left = (-max_range - vp.translate[0]) / vp.scale[0];
- right = ( max_range - vp.translate[0]) / vp.scale[0];
- top = (-max_range - vp.translate[1]) / vp.scale[1];
- bottom = ( max_range - vp.translate[1]) / vp.scale[1];
-
- assert(left <= -1 && top <= -1 && right >= 1 && bottom >= 1);
-
- guardband_x = MIN2(-left, right);
- guardband_y = MIN2(-top, bottom);
-
- /* If any of the GB registers is updated, all of them must be updated. */
- radeon_set_context_reg_seq(cs, R_028BE8_PA_CL_GB_VERT_CLIP_ADJ, 4);
- radeon_emit(cs, fui(guardband_y)); /* R_028BE8_PA_CL_GB_VERT_CLIP_ADJ */
- radeon_emit(cs, fui(1.0)); /* R_028BEC_PA_CL_GB_VERT_DISC_ADJ */
- radeon_emit(cs, fui(guardband_x)); /* R_028BF0_PA_CL_GB_HORZ_CLIP_ADJ */
- radeon_emit(cs, fui(1.0)); /* R_028BF4_PA_CL_GB_HORZ_DISC_ADJ */
-}
-
-static void si_emit_scissors(struct si_context *sctx, struct r600_atom *atom)
-{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
- struct pipe_scissor_state *states = sctx->scissors.states;
- unsigned mask = sctx->scissors.dirty_mask;
- bool scissor_enable = sctx->queued.named.rasterizer->scissor_enable;
- struct si_signed_scissor max_vp_scissor;
- int i;
-
- /* The simple case: Only 1 viewport is active. */
- if (!si_get_vs_info(sctx)->writes_viewport_index) {
- struct si_signed_scissor *vp = &sctx->viewports.as_scissor[0];
-
- if (!(mask & 1))
- return;
-
- radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2);
- si_emit_one_scissor(cs, vp, scissor_enable ? &states[0] : NULL);
- si_emit_guardband(sctx, vp);
- sctx->scissors.dirty_mask &= ~1; /* clear one bit */
- return;
- }
-
- /* Shaders can draw to any viewport. Make a union of all viewports. */
- max_vp_scissor = sctx->viewports.as_scissor[0];
- for (i = 1; i < SI_MAX_VIEWPORTS; i++)
- si_scissor_make_union(&max_vp_scissor,
- &sctx->viewports.as_scissor[i]);
-
- while (mask) {
- int start, count, i;
-
- u_bit_scan_consecutive_range(&mask, &start, &count);
-
- radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL +
- start * 4 * 2, count * 2);
- for (i = start; i < start+count; i++) {
- si_emit_one_scissor(cs, &sctx->viewports.as_scissor[i],
- scissor_enable ? &states[i] : NULL);
- }
- }
- si_emit_guardband(sctx, &max_vp_scissor);
- sctx->scissors.dirty_mask = 0;
-}
-
-static void si_set_viewport_states(struct pipe_context *ctx,
- unsigned start_slot,
- unsigned num_viewports,
- const struct pipe_viewport_state *state)
-{
- struct si_context *sctx = (struct si_context *)ctx;
- int i;
-
- for (i = 0; i < num_viewports; i++) {
- unsigned index = start_slot + i;
-
- sctx->viewports.states[index] = state[i];
- si_get_scissor_from_viewport(&state[i],
- &sctx->viewports.as_scissor[index]);
- }
-
- sctx->viewports.dirty_mask |= ((1 << num_viewports) - 1) << start_slot;
- sctx->scissors.dirty_mask |= ((1 << num_viewports) - 1) << start_slot;
- si_mark_atom_dirty(sctx, &sctx->viewports.atom);
- si_mark_atom_dirty(sctx, &sctx->scissors.atom);
-}
-
-static void si_emit_viewports(struct si_context *sctx, struct r600_atom *atom)
-{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
- struct pipe_viewport_state *states = sctx->viewports.states;
- unsigned mask = sctx->viewports.dirty_mask;
-
- /* The simple case: Only 1 viewport is active. */
- if (!si_get_vs_info(sctx)->writes_viewport_index) {
- if (!(mask & 1))
- return;
-
- radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE, 6);
- radeon_emit(cs, fui(states[0].scale[0]));
- radeon_emit(cs, fui(states[0].translate[0]));
- radeon_emit(cs, fui(states[0].scale[1]));
- radeon_emit(cs, fui(states[0].translate[1]));
- radeon_emit(cs, fui(states[0].scale[2]));
- radeon_emit(cs, fui(states[0].translate[2]));
- sctx->viewports.dirty_mask &= ~1; /* clear one bit */
- return;
- }
-
- while (mask) {
- int start, count, i;
-
- u_bit_scan_consecutive_range(&mask, &start, &count);
-
- radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE +
- start * 4 * 6, count * 6);
- for (i = start; i < start+count; i++) {
- radeon_emit(cs, fui(states[i].scale[0]));
- radeon_emit(cs, fui(states[i].translate[0]));
- radeon_emit(cs, fui(states[i].scale[1]));
- radeon_emit(cs, fui(states[i].translate[1]));
- radeon_emit(cs, fui(states[i].scale[2]));
- radeon_emit(cs, fui(states[i].translate[2]));
- }
- }
- sctx->viewports.dirty_mask = 0;
-}
-
/*
* inferred state between framebuffer and rasterizer
*/
(!old_rs || old_rs->multisample_enable != rs->multisample_enable))
si_mark_atom_dirty(sctx, &sctx->db_render_state);
- if (!old_rs || old_rs->scissor_enable != rs->scissor_enable) {
- sctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- si_mark_atom_dirty(sctx, &sctx->scissors.atom);
- }
+ r600_set_scissor_enable(&sctx->b, rs->scissor_enable);
si_pm4_bind_state(sctx, rasterizer, rs);
si_update_poly_offset_state(sctx);
si_init_external_atom(sctx, &sctx->b.render_cond_atom, &sctx->atoms.s.render_cond);
si_init_external_atom(sctx, &sctx->b.streamout.begin_atom, &sctx->atoms.s.streamout_begin);
si_init_external_atom(sctx, &sctx->b.streamout.enable_atom, &sctx->atoms.s.streamout_enable);
+ si_init_external_atom(sctx, &sctx->b.scissors.atom, &sctx->atoms.s.scissors);
+ si_init_external_atom(sctx, &sctx->b.viewports.atom, &sctx->atoms.s.viewports);
si_init_atom(sctx, &sctx->cache_flush, &sctx->atoms.s.cache_flush, si_emit_cache_flush);
si_init_atom(sctx, &sctx->framebuffer.atom, &sctx->atoms.s.framebuffer, si_emit_framebuffer_state);
si_init_atom(sctx, &sctx->blend_color.atom, &sctx->atoms.s.blend_color, si_emit_blend_color);
si_init_atom(sctx, &sctx->clip_regs, &sctx->atoms.s.clip_regs, si_emit_clip_regs);
si_init_atom(sctx, &sctx->clip_state.atom, &sctx->atoms.s.clip_state, si_emit_clip_state);
- si_init_atom(sctx, &sctx->scissors.atom, &sctx->atoms.s.scissors, si_emit_scissors);
- si_init_atom(sctx, &sctx->viewports.atom, &sctx->atoms.s.viewports, si_emit_viewports);
si_init_atom(sctx, &sctx->stencil_ref.atom, &sctx->atoms.s.stencil_ref, si_emit_stencil_ref);
sctx->b.b.create_blend_state = si_create_blend_state;
sctx->custom_blend_dcc_decompress = si_create_blend_custom(sctx, V_028808_CB_DCC_DECOMPRESS);
sctx->b.b.set_clip_state = si_set_clip_state;
- sctx->b.b.set_scissor_states = si_set_scissor_states;
- sctx->b.b.set_viewport_states = si_set_viewport_states;
sctx->b.b.set_stencil_ref = si_set_stencil_ref;
sctx->b.b.set_framebuffer_state = si_set_framebuffer_state;
return NULL;
}
-/**
- * Normally, we only emit 1 viewport and 1 scissor if no shader is using
- * the VIEWPORT_INDEX output, and emitting the other viewports and scissors
- * is delayed. When a shader with VIEWPORT_INDEX appears, this should be
- * called to emit the rest.
- */
-static void si_update_viewports_and_scissors(struct si_context *sctx)
-{
- struct tgsi_shader_info *info = si_get_vs_info(sctx);
-
- if (!info || !info->writes_viewport_index)
- return;
-
- if (sctx->scissors.dirty_mask)
- si_mark_atom_dirty(sctx, &sctx->scissors.atom);
- if (sctx->viewports.dirty_mask)
- si_mark_atom_dirty(sctx, &sctx->viewports.atom);
-}
-
static void si_bind_vs_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
sctx->vs_shader.cso = sel;
sctx->vs_shader.current = sel ? sel->first_variant : NULL;
si_mark_atom_dirty(sctx, &sctx->clip_regs);
- si_update_viewports_and_scissors(sctx);
+ r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
}
static void si_bind_gs_shader(struct pipe_context *ctx, void *state)
if (enable_changed)
si_shader_change_notify(sctx);
- si_update_viewports_and_scissors(sctx);
+ r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
}
static void si_bind_tcs_shader(struct pipe_context *ctx, void *state)
si_shader_change_notify(sctx);
sctx->last_tes_sh_base = -1; /* invalidate derived tess state */
}
- si_update_viewports_and_scissors(sctx);
+ r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
}
static void si_bind_ps_shader(struct pipe_context *ctx, void *state)