util_range_add(&r600_resource(pipe_buffer)->valid_buffer_range,
0, pipe_buffer->width0);
- surf->cb_color_cmask = surf->cb_color_base;
- surf->cb_color_cmask_slice = 0;
surf->cb_color_fmask = surf->cb_color_base;
surf->cb_color_fmask_slice = 0;
}
if (rtex->fmask.size) {
color_info |= S_028C70_COMPRESSION(1);
}
- if (rtex->cmask.size) {
- color_info |= S_028C70_FAST_CLEAR(1);
- }
base_offset = r600_resource_va(rctx->b.b.screen, pipe_tex);
} else {
surf->cb_color_fmask = surf->cb_color_base;
}
- if (rtex->cmask.size) {
- uint64_t va = r600_resource_va(rctx->b.b.screen, &rtex->cmask_buffer->b.b);
- surf->cb_color_cmask = (va + rtex->cmask.offset) >> 8;
- } else {
- surf->cb_color_cmask = surf->cb_color_base;
- }
surf->cb_color_fmask_slice = S_028C88_TILE_MAX(rtex->fmask.slice_tile_max);
- surf->cb_color_cmask_slice = S_028C80_TILE_MAX(rtex->cmask.slice_tile_max);
surf->color_initialized = true;
}
struct pipe_framebuffer_state *state = &rctx->framebuffer.state;
unsigned nr_cbufs = state->nr_cbufs;
unsigned i, tl, br;
+ struct r600_texture *tex = NULL;
+ struct r600_surface *cb = NULL;
/* XXX support more colorbuffers once we need them */
assert(nr_cbufs <= 8);
/* Colorbuffers. */
for (i = 0; i < nr_cbufs; i++) {
- struct r600_surface *cb = (struct r600_surface*)state->cbufs[i];
- struct r600_texture *tex;
unsigned reloc, cmask_reloc;
+ cb = (struct r600_surface*)state->cbufs[i];
if (!cb) {
r600_write_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
S_028C70_FORMAT(V_028C70_COLOR_INVALID));
radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
radeon_emit(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
- radeon_emit(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
+ radeon_emit(cs, cb->cb_color_info | tex->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
radeon_emit(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
radeon_emit(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */
- radeon_emit(cs, cb->cb_color_cmask); /* R_028C7C_CB_COLOR0_CMASK */
- radeon_emit(cs, cb->cb_color_cmask_slice); /* R_028C80_CB_COLOR0_CMASK_SLICE */
+ radeon_emit(cs, tex->cmask.base_address_reg); /* R_028C7C_CB_COLOR0_CMASK */
+ radeon_emit(cs, tex->cmask.slice_tile_max); /* R_028C80_CB_COLOR0_CMASK_SLICE */
radeon_emit(cs, cb->cb_color_fmask); /* R_028C84_CB_COLOR0_FMASK */
radeon_emit(cs, cb->cb_color_fmask_slice); /* R_028C88_CB_COLOR0_FMASK_SLICE */
radeon_emit(cs, tex->color_clear_value[0]); /* R_028C8C_CB_COLOR0_CLEAR_WORD0 */
/* set CB_COLOR1_INFO for possible dual-src blending */
if (i == 1 && state->cbufs[0]) {
r600_write_context_reg(cs, R_028C70_CB_COLOR0_INFO + 1 * 0x3C,
- ((struct r600_surface*)state->cbufs[0])->cb_color_info);
+ cb->cb_color_info | tex->cb_color_info);
if (!rctx->keep_tiling_flags) {
unsigned reloc = r600_context_bo_reloc(&rctx->b,
memcpy(clear_value, &uc, 2 * sizeof(uint32_t));
}
-static void evergreen_check_alloc_cmask(struct pipe_context *ctx,
- struct pipe_surface *cbuf)
-{
- struct r600_context *rctx = (struct r600_context *)ctx;
- struct r600_texture *tex = (struct r600_texture *)cbuf->texture;
- struct r600_surface *surf = (struct r600_surface *)cbuf;
-
- if (tex->cmask_buffer)
- return;
-
- r600_texture_init_cmask(&rctx->screen->b, tex);
-
- /* update colorbuffer state bits */
- if (tex->cmask_buffer != NULL) {
- uint64_t va = r600_resource_va(rctx->b.b.screen, &tex->cmask_buffer->b.b);
- surf->cb_color_cmask = va >> 8;
- surf->cb_color_cmask_slice = S_028C80_TILE_MAX(tex->cmask.slice_tile_max);
- surf->cb_color_info |= S_028C70_FAST_CLEAR(1);
- }
-}
-
static void r600_try_fast_color_clear(struct r600_context *rctx, unsigned *buffers,
const union pipe_color_union *color)
{
}
/* ensure CMASK is enabled */
- evergreen_check_alloc_cmask(&rctx->b.b, fb->cbufs[i]);
+ r600_texture_alloc_cmask_separate(&rctx->screen->b, tex);
if (tex->cmask.size == 0) {
continue;
}
unsigned size;
unsigned alignment;
unsigned slice_tile_max;
+ unsigned base_address_reg;
};
struct r600_texture {
struct r600_fmask_info fmask;
struct r600_cmask_info cmask;
struct r600_resource *cmask_buffer;
+ unsigned cb_color_info; /* fast clear enable bit */
unsigned color_clear_value[2];
/* Depth buffer compression and fast clear. */
unsigned cb_color_attrib; /* EG and later */
unsigned cb_color_fmask; /* CB_COLORn_FMASK (EG and later) or CB_COLORn_FRAG (r600) */
unsigned cb_color_fmask_slice; /* EG and later */
- unsigned cb_color_cmask; /* CB_COLORn_CMASK (EG and later) or CB_COLORn_TILE (r600) */
- unsigned cb_color_cmask_slice; /* EG and later */
+ unsigned cb_color_cmask; /* CB_COLORn_TILE (r600 only) */
unsigned cb_color_mask; /* R600 only */
struct r600_resource *cb_buffer_fmask; /* Used for FMASK relocations. R600 only */
struct r600_resource *cb_buffer_cmask; /* Used for CMASK relocations. R600 only */
void r600_texture_get_cmask_info(struct r600_common_screen *rscreen,
struct r600_texture *rtex,
struct r600_cmask_info *out);
-void r600_texture_init_cmask(struct r600_common_screen *rscreen,
- struct r600_texture *rtex);
+void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen,
+ struct r600_texture *rtex);
bool r600_init_flushed_depth_texture(struct pipe_context *ctx,
struct pipe_resource *texture,
struct r600_texture **staging);
rtex->cmask.offset = align(rtex->size, rtex->cmask.alignment);
rtex->size = rtex->cmask.offset + rtex->cmask.size;
+
+ if (rscreen->chip_class >= SI)
+ rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
+ else
+ rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
}
-void r600_texture_init_cmask(struct r600_common_screen *rscreen,
- struct r600_texture *rtex)
+void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen,
+ struct r600_texture *rtex)
{
+ if (rtex->cmask_buffer)
+ return;
+
assert(rtex->cmask.size == 0);
r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
PIPE_USAGE_DEFAULT, rtex->cmask.size);
if (rtex->cmask_buffer == NULL) {
rtex->cmask.size = 0;
+ return;
}
+
+ /* update colorbuffer state bits */
+ rtex->cmask.base_address_reg =
+ r600_resource_va(&rscreen->b, &rtex->cmask_buffer->b.b) >> 8;
+
+ if (rscreen->chip_class >= SI)
+ rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
+ else
+ rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
}
static unsigned si_texture_htile_alloc_size(struct r600_common_screen *rscreen,
struct r600_texture *rtex;
struct r600_resource *resource;
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
+ uint64_t va;
rtex = CALLOC_STRUCT(r600_texture);
if (rtex == NULL)
rtex->cmask.offset, rtex->cmask.size, 0xCCCCCCCC);
}
+ /* Initialize the CMASK base register value. */
+ va = r600_resource_va(&rscreen->b, &rtex->resource.b.b);
+ rtex->cmask.base_address_reg = (va + rtex->cmask.offset) >> 8;
+
if (rscreen->debug_flags & DBG_VM) {
fprintf(stderr, "VM start=0x%"PRIu64" end=0x%"PRIu64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
r600_resource_va(screen, &rtex->resource.b.b),
#define CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0 0x28c18
#define CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0 0x28c28
+#define EG_S_028C70_FAST_CLEAR(x) (((x) & 0x1) << 17)
+#define SI_S_028C70_FAST_CLEAR(x) (((x) & 0x1) << 13)
+
/*CIK+*/
#define R_0300FC_CP_STRMOUT_CNTL 0x0300FC
}
}
- if (rtex->cmask.size) {
- color_info |= S_028C70_FAST_CLEAR(1);
- }
-
offset += r600_resource_va(sctx->b.b.screen, surf->base.texture);
surf->cb_color_base = offset >> 8;
surf->cb_color_info = color_info;
surf->cb_color_attrib = color_attrib;
- if (rtex->cmask.size) {
- surf->cb_color_cmask = (offset + rtex->cmask.offset) >> 8;
- surf->cb_color_cmask_slice = S_028C80_TILE_MAX(rtex->cmask.slice_tile_max);
- }
if (rtex->fmask.size) {
surf->cb_color_fmask = (offset + rtex->fmask.offset) >> 8;
surf->cb_color_fmask_slice = S_028C88_TILE_MAX(rtex->fmask.slice_tile_max);
struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
unsigned i, nr_cbufs = state->nr_cbufs;
+ struct r600_texture *tex = NULL;
+ struct r600_surface *cb = NULL;
/* Colorbuffers. */
for (i = 0; i < nr_cbufs; i++) {
- struct r600_surface *cb = (struct r600_surface*)state->cbufs[i];
- struct r600_texture *tex;
-
+ cb = (struct r600_surface*)state->cbufs[i];
if (!cb) {
r600_write_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
S_028C70_FORMAT(V_028C70_COLOR_INVALID));
radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
radeon_emit(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
- radeon_emit(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
+ radeon_emit(cs, cb->cb_color_info | tex->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
radeon_emit(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
radeon_emit(cs, 0); /* R_028C78 unused */
- radeon_emit(cs, cb->cb_color_cmask); /* R_028C7C_CB_COLOR0_CMASK */
- radeon_emit(cs, cb->cb_color_cmask_slice); /* R_028C80_CB_COLOR0_CMASK_SLICE */
+ radeon_emit(cs, tex->cmask.base_address_reg); /* R_028C7C_CB_COLOR0_CMASK */
+ radeon_emit(cs, tex->cmask.slice_tile_max); /* R_028C80_CB_COLOR0_CMASK_SLICE */
radeon_emit(cs, cb->cb_color_fmask); /* R_028C84_CB_COLOR0_FMASK */
radeon_emit(cs, cb->cb_color_fmask_slice); /* R_028C88_CB_COLOR0_FMASK_SLICE */
radeon_emit(cs, tex->color_clear_value[0]); /* R_028C8C_CB_COLOR0_CLEAR_WORD0 */
/* set CB_COLOR1_INFO for possible dual-src blending */
if (i == 1 && state->cbufs[0]) {
r600_write_context_reg(cs, R_028C70_CB_COLOR0_INFO + 1 * 0x3C,
- ((struct r600_surface*)state->cbufs[0])->cb_color_info);
+ cb->cb_color_info | tex->cb_color_info);
i++;
}
for (; i < 8 ; i++) {