.factor = 0u,
.pattern = 0u,
},
+ .cull_mode = 0u,
+ .front_face = 0u,
+ .primitive_topology = 0u,
};
static void
uint32_t copy_mask = src->mask;
uint32_t dest_mask = 0;
- /* Make sure to copy the number of viewports/scissors because they can
- * only be specified at pipeline creation time.
- */
- dest->viewport.count = src->viewport.count;
- dest->scissor.count = src->scissor.count;
dest->discard_rectangle.count = src->discard_rectangle.count;
dest->sample_location.count = src->sample_location.count;
if (copy_mask & RADV_DYNAMIC_VIEWPORT) {
+ if (dest->viewport.count != src->viewport.count) {
+ dest->viewport.count = src->viewport.count;
+ dest_mask |= RADV_DYNAMIC_VIEWPORT;
+ }
+
if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
src->viewport.count * sizeof(VkViewport))) {
typed_memcpy(dest->viewport.viewports,
}
if (copy_mask & RADV_DYNAMIC_SCISSOR) {
+ if (dest->scissor.count != src->scissor.count) {
+ dest->scissor.count = src->scissor.count;
+ dest_mask |= RADV_DYNAMIC_SCISSOR;
+ }
+
if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
src->scissor.count * sizeof(VkRect2D))) {
typed_memcpy(dest->scissor.scissors,
}
}
+ if (copy_mask & RADV_DYNAMIC_CULL_MODE) {
+ if (dest->cull_mode != src->cull_mode) {
+ dest->cull_mode = src->cull_mode;
+ dest_mask |= RADV_DYNAMIC_CULL_MODE;
+ }
+ }
+
+ if (copy_mask & RADV_DYNAMIC_FRONT_FACE) {
+ if (dest->front_face != src->front_face) {
+ dest->front_face = src->front_face;
+ dest_mask |= RADV_DYNAMIC_FRONT_FACE;
+ }
+ }
+
+ if (copy_mask & RADV_DYNAMIC_PRIMITIVE_TOPOLOGY) {
+ if (dest->primitive_topology != src->primitive_topology) {
+ dest->primitive_topology = src->primitive_topology;
+ dest_mask |= RADV_DYNAMIC_PRIMITIVE_TOPOLOGY;
+ }
+ }
+
+ if (copy_mask & RADV_DYNAMIC_DEPTH_TEST_ENABLE) {
+ if (dest->depth_test_enable != src->depth_test_enable) {
+ dest->depth_test_enable = src->depth_test_enable;
+ dest_mask |= RADV_DYNAMIC_DEPTH_TEST_ENABLE;
+ }
+ }
+
+ if (copy_mask & RADV_DYNAMIC_DEPTH_WRITE_ENABLE) {
+ if (dest->depth_write_enable != src->depth_write_enable) {
+ dest->depth_write_enable = src->depth_write_enable;
+ dest_mask |= RADV_DYNAMIC_DEPTH_WRITE_ENABLE;
+ }
+ }
+
+ if (copy_mask & RADV_DYNAMIC_DEPTH_COMPARE_OP) {
+ if (dest->depth_compare_op != src->depth_compare_op) {
+ dest->depth_compare_op = src->depth_compare_op;
+ dest_mask |= RADV_DYNAMIC_DEPTH_COMPARE_OP;
+ }
+ }
+
+ if (copy_mask & RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE) {
+ if (dest->depth_bounds_test_enable != src->depth_bounds_test_enable) {
+ dest->depth_bounds_test_enable = src->depth_bounds_test_enable;
+ dest_mask |= RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE;
+ }
+ }
+
+ if (copy_mask & RADV_DYNAMIC_STENCIL_TEST_ENABLE) {
+ if (dest->stencil_test_enable != src->stencil_test_enable) {
+ dest->stencil_test_enable = src->stencil_test_enable;
+ dest_mask |= RADV_DYNAMIC_STENCIL_TEST_ENABLE;
+ }
+ }
+
+ if (copy_mask & RADV_DYNAMIC_STENCIL_OP) {
+ if (memcmp(&dest->stencil_op, &src->stencil_op,
+ sizeof(src->stencil_op))) {
+ dest->stencil_op = src->stencil_op;
+ dest_mask |= RADV_DYNAMIC_STENCIL_OP;
+ }
+ }
+
cmd_buffer->state.dirty |= dest_mask;
}
}
}
+static void
+radv_destroy_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
+{
+ list_del(&cmd_buffer->pool_link);
+
+ list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
+ &cmd_buffer->upload.list, list) {
+ cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
+ list_del(&up->list);
+ free(up);
+ }
+
+ if (cmd_buffer->upload.upload_bo)
+ cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
+
+ if (cmd_buffer->cs)
+ cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
+
+ for (unsigned i = 0; i < MAX_BIND_POINTS; i++)
+ free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
+
+ vk_object_base_finish(&cmd_buffer->base);
+ vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
+}
+
static VkResult radv_create_cmd_buffer(
struct radv_device * device,
struct radv_cmd_pool * pool,
if (cmd_buffer == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ vk_object_base_init(&device->vk, &cmd_buffer->base,
+ VK_OBJECT_TYPE_COMMAND_BUFFER);
+
cmd_buffer->device = device;
cmd_buffer->pool = pool;
cmd_buffer->level = level;
- if (pool) {
- list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
- cmd_buffer->queue_family_index = pool->queue_family_index;
-
- } else {
- /* Init the pool_link so we can safely call list_del when we destroy
- * the command buffer
- */
- list_inithead(&cmd_buffer->pool_link);
- cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
- }
+ list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
+ cmd_buffer->queue_family_index = pool->queue_family_index;
ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
if (!cmd_buffer->cs) {
- vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
+ radv_destroy_cmd_buffer(cmd_buffer);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
return VK_SUCCESS;
}
-static void
-radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer)
-{
- list_del(&cmd_buffer->pool_link);
-
- list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
- &cmd_buffer->upload.list, list) {
- cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
- list_del(&up->list);
- free(up);
- }
-
- if (cmd_buffer->upload.upload_bo)
- cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
- cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
-
- for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
- free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
-
- vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
-}
-
static VkResult
radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
{
memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings));
- for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
+ for (unsigned i = 0; i < MAX_BIND_POINTS; i++) {
cmd_buffer->descriptors[i].dirty = 0;
cmd_buffer->descriptors[i].valid = 0;
cmd_buffer->descriptors[i].push_dirty = false;
float shifted_pos_x = user_locs[i].x - 0.5;
float shifted_pos_y = user_locs[i].y - 0.5;
- int32_t scaled_pos_x = floor(shifted_pos_x * 16);
- int32_t scaled_pos_y = floor(shifted_pos_y * 16);
+ int32_t scaled_pos_x = floorf(shifted_pos_x * 16);
+ int32_t scaled_pos_y = floorf(shifted_pos_y * 16);
sample_locs[i].x = CLAMP(scaled_pos_x, -8, 7);
sample_locs[i].y = CLAMP(scaled_pos_y, -8, 7);
static void
radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer)
{
- struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
- struct radv_multisample_state *ms = &pipeline->graphics.ms;
struct radv_sample_locations_state *sample_location =
&cmd_buffer->state.dynamic.sample_location;
uint32_t num_samples = (uint32_t)sample_location->per_pixel;
num_samples);
/* Compute the maximum sample distance from the specified locations. */
- for (uint32_t i = 0; i < num_samples; i++) {
- VkOffset2D offset = sample_locs[0][i];
- max_sample_dist = MAX2(max_sample_dist,
- MAX2(abs(offset.x), abs(offset.y)));
+ for (unsigned i = 0; i < 4; ++i) {
+ for (uint32_t j = 0; j < num_samples; j++) {
+ VkOffset2D offset = sample_locs[i][j];
+ max_sample_dist = MAX2(max_sample_dist,
+ MAX2(abs(offset.x), abs(offset.y)));
+ }
}
/* Emit the specified user sample locations. */
}
/* Emit the maximum sample distance and the centroid priority. */
- uint32_t pa_sc_aa_config = ms->pa_sc_aa_config;
-
- pa_sc_aa_config &= C_028BE0_MAX_SAMPLE_DIST;
- pa_sc_aa_config |= S_028BE0_MAX_SAMPLE_DIST(max_sample_dist);
-
- radeon_set_context_reg_seq(cs, R_028BE0_PA_SC_AA_CONFIG, 1);
- radeon_emit(cs, pa_sc_aa_config);
+ radeon_set_context_reg_rmw(cs, R_028BE0_PA_SC_AA_CONFIG,
+ S_028BE0_MAX_SAMPLE_DIST(max_sample_dist),
+ ~C_028BE0_MAX_SAMPLE_DIST);
radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
radeon_emit(cs, centroid_priority);
pipeline->graphics.can_use_guardband)
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
+ if (!cmd_buffer->state.emitted_pipeline ||
+ cmd_buffer->state.emitted_pipeline->graphics.pa_su_sc_mode_cntl !=
+ pipeline->graphics.pa_su_sc_mode_cntl)
+ cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_CULL_MODE |
+ RADV_CMD_DIRTY_DYNAMIC_FRONT_FACE;
+
+ if (!cmd_buffer->state.emitted_pipeline)
+ cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY;
+
+ if (!cmd_buffer->state.emitted_pipeline ||
+ cmd_buffer->state.emitted_pipeline->graphics.db_depth_control !=
+ pipeline->graphics.db_depth_control)
+ cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE |
+ RADV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE |
+ RADV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP |
+ RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE |
+ RADV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE |
+ RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP;
+
+ if (!cmd_buffer->state.emitted_pipeline)
+ cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP;
+
radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
if (!cmd_buffer->state.emitted_pipeline ||
unsigned width = cmd_buffer->state.dynamic.line_width * 8;
radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
- S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
+ S_028A08_WIDTH(CLAMP(width, 0, 0xFFFF)));
}
static void
radv_emit_line_stipple(struct radv_cmd_buffer *cmd_buffer)
{
struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
- struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
uint32_t auto_reset_cntl = 1;
- if (pipeline->graphics.topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP)
+ if (d->primitive_topology == V_008958_DI_PT_LINESTRIP)
auto_reset_cntl = 2;
radeon_set_context_reg(cmd_buffer->cs, R_028A0C_PA_SC_LINE_STIPPLE,
S_028A0C_AUTO_RESET_CNTL(auto_reset_cntl));
}
+static void
+radv_emit_culling(struct radv_cmd_buffer *cmd_buffer, uint32_t states)
+{
+ unsigned pa_su_sc_mode_cntl = cmd_buffer->state.pipeline->graphics.pa_su_sc_mode_cntl;
+ struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
+
+ if (states & RADV_CMD_DIRTY_DYNAMIC_CULL_MODE) {
+ pa_su_sc_mode_cntl &= C_028814_CULL_FRONT;
+ pa_su_sc_mode_cntl |= S_028814_CULL_FRONT(!!(d->cull_mode & VK_CULL_MODE_FRONT_BIT));
+
+ pa_su_sc_mode_cntl &= C_028814_CULL_BACK;
+ pa_su_sc_mode_cntl |= S_028814_CULL_BACK(!!(d->cull_mode & VK_CULL_MODE_BACK_BIT));
+ }
+
+ if (states & RADV_CMD_DIRTY_DYNAMIC_FRONT_FACE) {
+ pa_su_sc_mode_cntl &= C_028814_FACE;
+ pa_su_sc_mode_cntl |= S_028814_FACE(d->front_face);
+ }
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028814_PA_SU_SC_MODE_CNTL,
+ pa_su_sc_mode_cntl);
+}
+
+static void
+radv_emit_primitive_topology(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
+ radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device,
+ cmd_buffer->cs,
+ R_030908_VGT_PRIMITIVE_TYPE, 1,
+ d->primitive_topology);
+ } else {
+ radeon_set_config_reg(cmd_buffer->cs,
+ R_008958_VGT_PRIMITIVE_TYPE,
+ d->primitive_topology);
+ }
+}
+
+static void
+radv_emit_depth_control(struct radv_cmd_buffer *cmd_buffer, uint32_t states)
+{
+ unsigned db_depth_control = cmd_buffer->state.pipeline->graphics.db_depth_control;
+ struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
+
+ if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE) {
+ db_depth_control &= C_028800_Z_ENABLE;
+ db_depth_control |= S_028800_Z_ENABLE(d->depth_test_enable ? 1 : 0);
+ }
+
+ if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE) {
+ db_depth_control &= C_028800_Z_WRITE_ENABLE;
+ db_depth_control |= S_028800_Z_WRITE_ENABLE(d->depth_write_enable ? 1 : 0);
+ }
+
+ if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP) {
+ db_depth_control &= C_028800_ZFUNC;
+ db_depth_control |= S_028800_ZFUNC(d->depth_compare_op);
+ }
+
+ if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE) {
+ db_depth_control &= C_028800_DEPTH_BOUNDS_ENABLE;
+ db_depth_control |= S_028800_DEPTH_BOUNDS_ENABLE(d->depth_bounds_test_enable ? 1 : 0);
+ }
+
+ if (states & RADV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE) {
+ db_depth_control &= C_028800_STENCIL_ENABLE;
+ db_depth_control |= S_028800_STENCIL_ENABLE(d->stencil_test_enable ? 1 : 0);
+
+ db_depth_control &= C_028800_BACKFACE_ENABLE;
+ db_depth_control |= S_028800_BACKFACE_ENABLE(d->stencil_test_enable ? 1 : 0);
+ }
+
+ if (states & RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP) {
+ db_depth_control &= C_028800_STENCILFUNC;
+ db_depth_control |= S_028800_STENCILFUNC(d->stencil_op.front.compare_op);
+
+ db_depth_control &= C_028800_STENCILFUNC_BF;
+ db_depth_control |= S_028800_STENCILFUNC_BF(d->stencil_op.back.compare_op);
+ }
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028800_DB_DEPTH_CONTROL,
+ db_depth_control);
+}
+
+static void
+radv_emit_stencil_control(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
+
+ radeon_set_context_reg(cmd_buffer->cs, R_02842C_DB_STENCIL_CONTROL,
+ S_02842C_STENCILFAIL(si_translate_stencil_op(d->stencil_op.front.fail_op)) |
+ S_02842C_STENCILZPASS(si_translate_stencil_op(d->stencil_op.front.pass_op)) |
+ S_02842C_STENCILZFAIL(si_translate_stencil_op(d->stencil_op.front.depth_fail_op)) |
+ S_02842C_STENCILFAIL_BF(si_translate_stencil_op(d->stencil_op.back.fail_op)) |
+ S_02842C_STENCILZPASS_BF(si_translate_stencil_op(d->stencil_op.back.pass_op)) |
+ S_02842C_STENCILZFAIL_BF(si_translate_stencil_op(d->stencil_op.back.depth_fail_op)));
+}
+
static void
radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
int index,
cb_color_info &= C_028C70_DCC_ENABLE;
}
+ if (!radv_layout_can_fast_clear(image, layout, in_render_loop,
+ radv_image_queue_family_mask(image,
+ cmd_buffer->queue_family_index,
+ cmd_buffer->queue_family_index))) {
+ cb_color_info &= C_028C70_COMPRESSION;
+ }
+
if (radv_image_is_tc_compat_cmask(image) &&
(radv_is_fmask_decompress_pipeline(cmd_buffer) ||
radv_is_dcc_decompress_pipeline(cmd_buffer))) {
cb_color_info &= C_028C70_FMASK_COMPRESS_1FRAG_ONLY;
}
+ if (radv_image_has_fmask(image) &&
+ (radv_is_fmask_decompress_pipeline(cmd_buffer) ||
+ radv_is_hw_resolve_pipeline(cmd_buffer))) {
+ /* Make sure FMASK is enabled if it has been cleared because:
+ *
+ * 1) it's required for FMASK_DECOMPRESS operations to avoid
+ * GPU hangs
+ * 2) it's necessary for CB_RESOLVE which can read compressed
+ * FMASK data anyways.
+ */
+ cb_color_info |= S_028C70_COMPRESSION(1);
+ }
+
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
radeon_emit(cmd_buffer->cs, cb->cb_color_base);
uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset;
if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) {
- radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0));
+ radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG_INDEX, 3, 0));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) {
- radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating));
+ radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG_INDEX, 3, cmd_buffer->state.predicating));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
bool gfx10_perfect = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10 && has_perfect_queries;
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
+ /* Always enable PERFECT_ZPASS_COUNTS due to issues with partially
+ * covered tiles, discards, and early depth testing. For more details,
+ * see https://gitlab.freedesktop.org/mesa/mesa/-/issues/3218 */
db_count_control =
- S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) |
+ S_028004_PERFECT_ZPASS_COUNTS(1) |
S_028004_DISABLE_CONSERVATIVE_ZPASS_COUNTS(gfx10_perfect) |
S_028004_SAMPLE_RATE(sample_rate) |
S_028004_ZPASS_ENABLE(1) |
if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE)
radv_emit_line_stipple(cmd_buffer);
+ if (states & (RADV_CMD_DIRTY_DYNAMIC_CULL_MODE |
+ RADV_CMD_DIRTY_DYNAMIC_FRONT_FACE))
+ radv_emit_culling(cmd_buffer, states);
+
+ if (states & RADV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY)
+ radv_emit_primitive_topology(cmd_buffer);
+
+ if (states & (RADV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE |
+ RADV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE |
+ RADV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP |
+ RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE |
+ RADV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE |
+ RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP))
+ radv_emit_depth_control(cmd_buffer, states);
+
+ if (states & RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP)
+ radv_emit_stencil_control(cmd_buffer);
+
cmd_buffer->state.dirty &= ~states;
}
uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
uint32_t offset;
struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer;
- uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i];
unsigned num_records;
+ unsigned stride;
if (!buffer)
continue;
offset = cmd_buffer->vertex_bindings[i].offset;
va += offset + buffer->offset;
- num_records = buffer->size - offset;
+ if (cmd_buffer->vertex_bindings[i].size) {
+ num_records = cmd_buffer->vertex_bindings[i].size;
+ } else {
+ num_records = buffer->size - offset;
+ }
+
+ if (cmd_buffer->state.pipeline->graphics.uses_dynamic_stride) {
+ stride = cmd_buffer->vertex_bindings[i].stride;
+ } else {
+ stride = cmd_buffer->state.pipeline->binding_stride[i];
+ }
+
if (cmd_buffer->device->physical_device->rad_info.chip_class != GFX8 && stride)
num_records /= stride;
- desc[0] = va;
- desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
- desc[2] = num_records;
- desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
- S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
- S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
- S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
+ uint32_t rsrc_word3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+ S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+ S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
/* OOB_SELECT chooses the out-of-bounds check:
*/
int oob_select = stride ? V_008F0C_OOB_SELECT_STRUCTURED : V_008F0C_OOB_SELECT_RAW;
- desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_UINT) |
- S_008F0C_OOB_SELECT(oob_select) |
- S_008F0C_RESOURCE_LEVEL(1);
+ rsrc_word3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_UINT) |
+ S_008F0C_OOB_SELECT(oob_select) |
+ S_008F0C_RESOURCE_LEVEL(1);
} else {
- desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) |
- S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+ rsrc_word3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
}
+
+ desc[0] = va;
+ desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
+ desc[2] = num_records;
+ desc[3] = rsrc_word3;
}
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
if (cmd_buffer->device->physical_device->use_ngg_streamout)
size = buffer->size - sb[i].offset;
- desc[0] = va;
- desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
- desc[2] = size;
- desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
- S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
- S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
- S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
+ uint32_t rsrc_word3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+ S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+ S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
- desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
- S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
- S_008F0C_RESOURCE_LEVEL(1);
+ rsrc_word3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
+ S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
+ S_008F0C_RESOURCE_LEVEL(1);
} else {
- desc[3] |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+ rsrc_word3 |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
}
+
+ desc[0] = va;
+ desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
+ desc[2] = size;
+ desc[3] = rsrc_word3;
}
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
{
struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
struct radv_cmd_state *state = &cmd_buffer->state;
+ unsigned topology = state->dynamic.primitive_topology;
struct radeon_cmdbuf *cs = cmd_buffer->cs;
unsigned ia_multi_vgt_param;
si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw,
indirect_draw,
count_from_stream_output,
- draw_vertex_count);
+ draw_vertex_count,
+ topology);
if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
if (info->chip_class == GFX9) {
RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_INV_L2;
+ if (flush_CB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ if (flush_DB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+ break;
+ case VK_ACCESS_MEMORY_WRITE_BIT:
+ flush_bits |= RADV_CMD_FLAG_INV_L2 |
+ RADV_CMD_FLAG_WB_L2 |
+ RADV_CMD_FLAG_FLUSH_AND_INV_CB |
+ RADV_CMD_FLAG_FLUSH_AND_INV_DB;
+
if (flush_CB_meta)
flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
if (flush_DB_meta)
flush_bits |= RADV_CMD_FLAG_INV_VCACHE;
/* Unlike LLVM, ACO uses SMEM for SSBOs and we have to
* invalidate the scalar cache. */
- if (cmd_buffer->device->physical_device->use_aco &&
- cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8)
+ if (!cmd_buffer->device->physical_device->use_llvm)
flush_bits |= RADV_CMD_FLAG_INV_SCACHE;
if (!image_is_coherent)
if (flush_DB_meta)
flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
break;
+ case VK_ACCESS_MEMORY_READ_BIT:
+ flush_bits |= RADV_CMD_FLAG_INV_VCACHE |
+ RADV_CMD_FLAG_INV_SCACHE |
+ RADV_CMD_FLAG_INV_L2;
+ if (flush_CB)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
+ if (flush_CB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ if (flush_DB)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
+ if (flush_DB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+ break;
default:
break;
}
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
result = radv_reset_cmd_buffer(cmd_buffer);
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->level = pAllocateInfo->level;
pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
list_del(&cmd_buffer->pool_link);
list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
} else
- radv_cmd_buffer_destroy(cmd_buffer);
+ radv_destroy_cmd_buffer(cmd_buffer);
}
}
}
void radv_CmdBindVertexBuffers(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer* pBuffers,
+ const VkDeviceSize* pOffsets)
+{
+ radv_CmdBindVertexBuffers2EXT(commandBuffer, firstBinding,
+ bindingCount, pBuffers, pOffsets,
+ NULL, NULL);
+}
+
+void radv_CmdBindVertexBuffers2EXT(
VkCommandBuffer commandBuffer,
uint32_t firstBinding,
uint32_t bindingCount,
const VkBuffer* pBuffers,
- const VkDeviceSize* pOffsets)
+ const VkDeviceSize* pOffsets,
+ const VkDeviceSize* pSizes,
+ const VkDeviceSize* pStrides)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_vertex_binding *vb = cmd_buffer->vertex_bindings;
assert(firstBinding + bindingCount <= MAX_VBS);
for (uint32_t i = 0; i < bindingCount; i++) {
+ RADV_FROM_HANDLE(radv_buffer, buffer, pBuffers[i]);
uint32_t idx = firstBinding + i;
+ VkDeviceSize size = pSizes ? pSizes[i] : 0;
+ VkDeviceSize stride = pStrides ? pStrides[i] : 0;
+ /* pSizes and pStrides are optional. */
if (!changed &&
- (vb[idx].buffer != radv_buffer_from_handle(pBuffers[i]) ||
- vb[idx].offset != pOffsets[i])) {
+ (vb[idx].buffer != buffer ||
+ vb[idx].offset != pOffsets[i] ||
+ vb[idx].size != size ||
+ vb[idx].stride != stride)) {
changed = true;
}
- vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
+ vb[idx].buffer = buffer;
vb[idx].offset = pOffsets[i];
+ vb[idx].size = size;
+ vb[idx].stride = stride;
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- vb[idx].buffer->bo);
+ if (buffer) {
+ radv_cs_add_buffer(cmd_buffer->device->ws,
+ cmd_buffer->cs, vb[idx].buffer->bo);
+ }
}
if (!changed) {
assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
if (!cmd_buffer->device->use_global_bo_list) {
- for (unsigned j = 0; j < set->layout->buffer_count; ++j)
+ for (unsigned j = 0; j < set->buffer_count; ++j)
if (set->descriptors[j])
radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j]);
}
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
- if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
- return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ VkResult result = cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs);
+ if (result != VK_SUCCESS)
+ return vk_error(cmd_buffer->device->instance, result);
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
/* Prefetch all pipeline shaders at first draw time. */
cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
- if ((cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI10 ||
- cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI12 ||
- cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI14) &&
+ if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX10 &&
cmd_buffer->state.emitted_pipeline &&
radv_pipeline_has_ngg(cmd_buffer->state.emitted_pipeline) &&
!radv_pipeline_has_ngg(cmd_buffer->state.pipeline)) {
assert(firstViewport < MAX_VIEWPORTS);
assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
- if (!memcmp(state->dynamic.viewport.viewports + firstViewport,
+ if (total_count <= state->dynamic.viewport.count &&
+ !memcmp(state->dynamic.viewport.viewports + firstViewport,
pViewports, viewportCount * sizeof(*pViewports))) {
return;
}
+ if (state->dynamic.viewport.count < total_count)
+ state->dynamic.viewport.count = total_count;
+
memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
viewportCount * sizeof(*pViewports));
assert(firstScissor < MAX_SCISSORS);
assert(total_count >= 1 && total_count <= MAX_SCISSORS);
- if (!memcmp(state->dynamic.scissor.scissors + firstScissor, pScissors,
+ if (total_count <= state->dynamic.scissor.count &&
+ !memcmp(state->dynamic.scissor.scissors + firstScissor, pScissors,
scissorCount * sizeof(*pScissors))) {
return;
}
+ if (state->dynamic.scissor.count < total_count)
+ state->dynamic.scissor.count = total_count;
+
memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
scissorCount * sizeof(*pScissors));
state->dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE;
}
+void radv_CmdSetCullModeEXT(
+ VkCommandBuffer commandBuffer,
+ VkCullModeFlags cullMode)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+
+ if (state->dynamic.cull_mode == cullMode)
+ return;
+
+ state->dynamic.cull_mode = cullMode;
+
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_CULL_MODE;
+}
+
+void radv_CmdSetFrontFaceEXT(
+ VkCommandBuffer commandBuffer,
+ VkFrontFace frontFace)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+
+ if (state->dynamic.front_face == frontFace)
+ return;
+
+ state->dynamic.front_face = frontFace;
+
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_FRONT_FACE;
+}
+
+void radv_CmdSetPrimitiveTopologyEXT(
+ VkCommandBuffer commandBuffer,
+ VkPrimitiveTopology primitiveTopology)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ unsigned primitive_topology = si_translate_prim(primitiveTopology);
+
+ if (state->dynamic.primitive_topology == primitive_topology)
+ return;
+
+ state->dynamic.primitive_topology = primitive_topology;
+
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY;
+}
+
+void radv_CmdSetViewportWithCountEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t viewportCount,
+ const VkViewport* pViewports)
+{
+ radv_CmdSetViewport(commandBuffer, 0, viewportCount, pViewports);
+}
+
+void radv_CmdSetScissorWithCountEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t scissorCount,
+ const VkRect2D* pScissors)
+{
+ radv_CmdSetScissor(commandBuffer, 0, scissorCount, pScissors);
+}
+
+void radv_CmdSetDepthTestEnableEXT(
+ VkCommandBuffer commandBuffer,
+ VkBool32 depthTestEnable)
+
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+
+ if (state->dynamic.depth_test_enable == depthTestEnable)
+ return;
+
+ state->dynamic.depth_test_enable = depthTestEnable;
+
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE;
+}
+
+void radv_CmdSetDepthWriteEnableEXT(
+ VkCommandBuffer commandBuffer,
+ VkBool32 depthWriteEnable)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+
+ if (state->dynamic.depth_write_enable == depthWriteEnable)
+ return;
+
+ state->dynamic.depth_write_enable = depthWriteEnable;
+
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE;
+}
+
+void radv_CmdSetDepthCompareOpEXT(
+ VkCommandBuffer commandBuffer,
+ VkCompareOp depthCompareOp)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+
+ if (state->dynamic.depth_compare_op == depthCompareOp)
+ return;
+
+ state->dynamic.depth_compare_op = depthCompareOp;
+
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP;
+}
+
+void radv_CmdSetDepthBoundsTestEnableEXT(
+ VkCommandBuffer commandBuffer,
+ VkBool32 depthBoundsTestEnable)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+
+ if (state->dynamic.depth_bounds_test_enable == depthBoundsTestEnable)
+ return;
+
+ state->dynamic.depth_bounds_test_enable = depthBoundsTestEnable;
+
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE;
+}
+
+void radv_CmdSetStencilTestEnableEXT(
+ VkCommandBuffer commandBuffer,
+ VkBool32 stencilTestEnable)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+
+ if (state->dynamic.stencil_test_enable == stencilTestEnable)
+ return;
+
+ state->dynamic.stencil_test_enable = stencilTestEnable;
+
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE;
+}
+
+void radv_CmdSetStencilOpEXT(
+ VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ VkStencilOp failOp,
+ VkStencilOp passOp,
+ VkStencilOp depthFailOp,
+ VkCompareOp compareOp)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ bool front_same =
+ state->dynamic.stencil_op.front.fail_op == failOp &&
+ state->dynamic.stencil_op.front.pass_op == passOp &&
+ state->dynamic.stencil_op.front.depth_fail_op == depthFailOp &&
+ state->dynamic.stencil_op.front.compare_op == compareOp;
+ bool back_same =
+ state->dynamic.stencil_op.back.fail_op == failOp &&
+ state->dynamic.stencil_op.back.pass_op == passOp &&
+ state->dynamic.stencil_op.back.depth_fail_op == depthFailOp &&
+ state->dynamic.stencil_op.back.compare_op == compareOp;
+
+ if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) &&
+ (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same))
+ return;
+
+ if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
+ state->dynamic.stencil_op.front.fail_op = failOp;
+ state->dynamic.stencil_op.front.pass_op = passOp;
+ state->dynamic.stencil_op.front.depth_fail_op = depthFailOp;
+ state->dynamic.stencil_op.front.compare_op = compareOp;
+ }
+
+ if (faceMask & VK_STENCIL_FACE_BACK_BIT) {
+ state->dynamic.stencil_op.back.fail_op = failOp;
+ state->dynamic.stencil_op.back.pass_op = passOp;
+ state->dynamic.stencil_op.back.depth_fail_op = depthFailOp;
+ state->dynamic.stencil_op.back.compare_op = compareOp;
+ }
+
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP;
+}
+
void radv_CmdExecuteCommands(
VkCommandBuffer commandBuffer,
uint32_t commandBufferCount,
RADV_FROM_HANDLE(radv_device, device, _device);
struct radv_cmd_pool *pool;
- pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+ pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &pool->base,
+ VK_OBJECT_TYPE_COMMAND_POOL);
+
if (pAllocator)
pool->alloc = *pAllocator;
else
- pool->alloc = device->alloc;
+ pool->alloc = device->vk.alloc;
list_inithead(&pool->cmd_buffers);
list_inithead(&pool->free_cmd_buffers);
list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
&pool->cmd_buffers, pool_link) {
- radv_cmd_buffer_destroy(cmd_buffer);
+ radv_destroy_cmd_buffer(cmd_buffer);
}
list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
&pool->free_cmd_buffers, pool_link) {
- radv_cmd_buffer_destroy(cmd_buffer);
+ radv_destroy_cmd_buffer(cmd_buffer);
}
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_base_finish(&pool->base);
+ vk_free2(&device->vk.alloc, pAllocator, pool);
}
VkResult radv_ResetCommandPool(
list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
&pool->free_cmd_buffers, pool_link) {
- radv_cmd_buffer_destroy(cmd_buffer);
+ radv_destroy_cmd_buffer(cmd_buffer);
}
}
if (size != image->planes[0].surface.dcc_size) {
state->flush_bits |=
radv_fill_buffer(cmd_buffer, image->bo,
- image->offset + image->dcc_offset + size,
+ image->offset + image->planes[0].surface.dcc_offset + size,
image->planes[0].surface.dcc_size - size,
0xffffffff);
}