#include "radv_shader.h"
#include "radv_cs.h"
#include "sid.h"
-#include "gfx9d.h"
#include "vk_format.h"
+#include "vk_util.h"
#include "radv_debug.h"
#include "radv_meta.h"
VkImageLayout dst_layout,
uint32_t src_family,
uint32_t dst_family,
- const VkImageSubresourceRange *range);
+ const VkImageSubresourceRange *range,
+ struct radv_sample_locations_state *sample_locs);
const struct radv_dynamic_state default_dynamic_state = {
.viewport = {
dest->viewport.count = src->viewport.count;
dest->scissor.count = src->scissor.count;
dest->discard_rectangle.count = src->discard_rectangle.count;
+ dest->sample_location.count = src->sample_location.count;
if (copy_mask & RADV_DYNAMIC_VIEWPORT) {
if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
}
}
+ if (copy_mask & RADV_DYNAMIC_SAMPLE_LOCATIONS) {
+ if (dest->sample_location.per_pixel != src->sample_location.per_pixel ||
+ dest->sample_location.grid_size.width != src->sample_location.grid_size.width ||
+ dest->sample_location.grid_size.height != src->sample_location.grid_size.height ||
+ memcmp(&dest->sample_location.locations,
+ &src->sample_location.locations,
+ src->sample_location.count * sizeof(VkSampleLocationEXT))) {
+ dest->sample_location.per_pixel = src->sample_location.per_pixel;
+ dest->sample_location.grid_size = src->sample_location.grid_size;
+ typed_memcpy(dest->sample_location.locations,
+ src->sample_location.locations,
+ src->sample_location.count);
+ dest_mask |= RADV_DYNAMIC_SAMPLE_LOCATIONS;
+ }
+ }
+
cmd_buffer->state.dirty |= dest_mask;
}
bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
{
return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
- cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
+ cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
}
enum ring_type radv_queue_family_to_ring(int f) {
static VkResult
radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
{
-
cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
cmd_buffer->record_result = VK_SUCCESS;
+ memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings));
+
for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
cmd_buffer->descriptors[i].dirty = 0;
cmd_buffer->descriptors[i].valid = 0;
unsigned fence_offset, eop_bug_offset;
void *fence_ptr;
- radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0, &fence_offset,
+ radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 8, &fence_offset,
&fence_ptr);
+
cmd_buffer->gfx9_fence_va =
radv_buffer_get_va(cmd_buffer->upload.upload_bo);
cmd_buffer->gfx9_fence_va += fence_offset;
- /* Allocate a buffer for the EOP bug on GFX9. */
- radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 0,
- &eop_bug_offset, &fence_ptr);
- cmd_buffer->gfx9_eop_bug_va =
- radv_buffer_get_va(cmd_buffer->upload.upload_bo);
- cmd_buffer->gfx9_eop_bug_va += eop_bug_offset;
+ if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
+ /* Allocate a buffer for the EOP bug on GFX9. */
+ radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 8,
+ &eop_bug_offset, &fence_ptr);
+ cmd_buffer->gfx9_eop_bug_va =
+ radv_buffer_get_va(cmd_buffer->upload.upload_bo);
+ cmd_buffer->gfx9_eop_bug_va += eop_bug_offset;
+ }
}
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
RADEON_DOMAIN_GTT,
RADEON_FLAG_CPU_ACCESS|
RADEON_FLAG_NO_INTERPROCESS_SHARING |
- RADEON_FLAG_32BIT);
+ RADEON_FLAG_32BIT,
+ RADV_BO_PRIORITY_UPLOAD_BUFFER);
if (!bo) {
cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
unsigned *out_offset,
void **ptr)
{
+ assert(util_is_power_of_two_nonzero(alignment));
+
uint64_t offset = align(cmd_buffer->upload.offset, alignment);
if (offset + size > cmd_buffer->upload.size) {
if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
enum radv_cmd_flush_bits flags)
{
if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
- uint32_t *ptr = NULL;
- uint64_t va = 0;
-
assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
RADV_CMD_FLAG_CS_PARTIAL_FLUSH));
- if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
- va = cmd_buffer->gfx9_fence_va;
- ptr = &cmd_buffer->gfx9_fence_idx;
- }
-
radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4);
/* Force wait for graphics or compute engines to be idle. */
si_cs_emit_cache_flush(cmd_buffer->cs,
cmd_buffer->device->physical_device->rad_info.chip_class,
- ptr, va,
+ &cmd_buffer->gfx9_fence_idx,
+ cmd_buffer->gfx9_fence_va,
radv_cmd_buffer_uses_mec(cmd_buffer),
flags, cmd_buffer->gfx9_eop_bug_va);
}
for_each_bit(i, descriptors_state->valid) {
struct radv_descriptor_set *set = descriptors_state->sets[i];
- data[i * 2] = (uintptr_t)set;
- data[i * 2 + 1] = (uintptr_t)set >> 32;
+ data[i * 2] = (uint64_t)(uintptr_t)set;
+ data[i * 2 + 1] = (uint64_t)(uintptr_t)set >> 32;
}
radv_emit_write_data_packet(cmd_buffer, va, MAX_SETS * 2, data);
return;
assert(loc->num_sgprs == 1);
- assert(!loc->indirect);
radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
base_reg + loc->sgpr_idx * 4, va, false);
}
}
+/**
+ * Convert the user sample locations to hardware sample locations (the values
+ * that will be emitted by PA_SC_AA_SAMPLE_LOCS_PIXEL_*).
+ */
+static void
+radv_convert_user_sample_locs(struct radv_sample_locations_state *state,
+ uint32_t x, uint32_t y, VkOffset2D *sample_locs)
+{
+ uint32_t x_offset = x % state->grid_size.width;
+ uint32_t y_offset = y % state->grid_size.height;
+ uint32_t num_samples = (uint32_t)state->per_pixel;
+ VkSampleLocationEXT *user_locs;
+ uint32_t pixel_offset;
+
+ pixel_offset = (x_offset + y_offset * state->grid_size.width) * num_samples;
+
+ assert(pixel_offset <= MAX_SAMPLE_LOCATIONS);
+ user_locs = &state->locations[pixel_offset];
+
+ for (uint32_t i = 0; i < num_samples; i++) {
+ float shifted_pos_x = user_locs[i].x - 0.5;
+ float shifted_pos_y = user_locs[i].y - 0.5;
+
+ int32_t scaled_pos_x = floor(shifted_pos_x * 16);
+ int32_t scaled_pos_y = floor(shifted_pos_y * 16);
+
+ sample_locs[i].x = CLAMP(scaled_pos_x, -8, 7);
+ sample_locs[i].y = CLAMP(scaled_pos_y, -8, 7);
+ }
+}
+
+/**
+ * Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask based on hardware sample
+ * locations.
+ */
+static void
+radv_compute_sample_locs_pixel(uint32_t num_samples, VkOffset2D *sample_locs,
+ uint32_t *sample_locs_pixel)
+{
+ for (uint32_t i = 0; i < num_samples; i++) {
+ uint32_t sample_reg_idx = i / 4;
+ uint32_t sample_loc_idx = i % 4;
+ int32_t pos_x = sample_locs[i].x;
+ int32_t pos_y = sample_locs[i].y;
+
+ uint32_t shift_x = 8 * sample_loc_idx;
+ uint32_t shift_y = shift_x + 4;
+
+ sample_locs_pixel[sample_reg_idx] |= (pos_x & 0xf) << shift_x;
+ sample_locs_pixel[sample_reg_idx] |= (pos_y & 0xf) << shift_y;
+ }
+}
+
+/**
+ * Compute the PA_SC_CENTROID_PRIORITY_* mask based on the top left hardware
+ * sample locations.
+ */
+static uint64_t
+radv_compute_centroid_priority(struct radv_cmd_buffer *cmd_buffer,
+ VkOffset2D *sample_locs,
+ uint32_t num_samples)
+{
+ uint32_t centroid_priorities[num_samples];
+ uint32_t sample_mask = num_samples - 1;
+ uint32_t distances[num_samples];
+ uint64_t centroid_priority = 0;
+
+ /* Compute the distances from center for each sample. */
+ for (int i = 0; i < num_samples; i++) {
+ distances[i] = (sample_locs[i].x * sample_locs[i].x) +
+ (sample_locs[i].y * sample_locs[i].y);
+ }
+
+ /* Compute the centroid priorities by looking at the distances array. */
+ for (int i = 0; i < num_samples; i++) {
+ uint32_t min_idx = 0;
+
+ for (int j = 1; j < num_samples; j++) {
+ if (distances[j] < distances[min_idx])
+ min_idx = j;
+ }
+
+ centroid_priorities[i] = min_idx;
+ distances[min_idx] = 0xffffffff;
+ }
+
+ /* Compute the final centroid priority. */
+ for (int i = 0; i < 8; i++) {
+ centroid_priority |=
+ centroid_priorities[i & sample_mask] << (i * 4);
+ }
+
+ return centroid_priority << 32 | centroid_priority;
+}
+
+/**
+ * Emit the sample locations that are specified with VK_EXT_sample_locations.
+ */
+static void
+radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct radv_multisample_state *ms = &pipeline->graphics.ms;
+ struct radv_sample_locations_state *sample_location =
+ &cmd_buffer->state.dynamic.sample_location;
+ uint32_t num_samples = (uint32_t)sample_location->per_pixel;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ uint32_t sample_locs_pixel[4][2] = {};
+ VkOffset2D sample_locs[4][8]; /* 8 is the max. sample count supported */
+ uint32_t max_sample_dist = 0;
+ uint64_t centroid_priority;
+
+ if (!cmd_buffer->state.dynamic.sample_location.count)
+ return;
+
+ /* Convert the user sample locations to hardware sample locations. */
+ radv_convert_user_sample_locs(sample_location, 0, 0, sample_locs[0]);
+ radv_convert_user_sample_locs(sample_location, 1, 0, sample_locs[1]);
+ radv_convert_user_sample_locs(sample_location, 0, 1, sample_locs[2]);
+ radv_convert_user_sample_locs(sample_location, 1, 1, sample_locs[3]);
+
+ /* Compute the PA_SC_AA_SAMPLE_LOCS_PIXEL_* mask. */
+ for (uint32_t i = 0; i < 4; i++) {
+ radv_compute_sample_locs_pixel(num_samples, sample_locs[i],
+ sample_locs_pixel[i]);
+ }
+
+ /* Compute the PA_SC_CENTROID_PRIORITY_* mask. */
+ centroid_priority =
+ radv_compute_centroid_priority(cmd_buffer, sample_locs[0],
+ num_samples);
+
+ /* Compute the maximum sample distance from the specified locations. */
+ for (uint32_t i = 0; i < num_samples; i++) {
+ VkOffset2D offset = sample_locs[0][i];
+ max_sample_dist = MAX2(max_sample_dist,
+ MAX2(abs(offset.x), abs(offset.y)));
+ }
+
+ /* Emit the specified user sample locations. */
+ switch (num_samples) {
+ case 2:
+ case 4:
+ radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]);
+ radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]);
+ radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]);
+ radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]);
+ break;
+ case 8:
+ radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_pixel[0][0]);
+ radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_pixel[1][0]);
+ radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_pixel[2][0]);
+ radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_pixel[3][0]);
+ radeon_set_context_reg(cs, R_028BFC_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1, sample_locs_pixel[0][1]);
+ radeon_set_context_reg(cs, R_028C0C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1, sample_locs_pixel[1][1]);
+ radeon_set_context_reg(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1, sample_locs_pixel[2][1]);
+ radeon_set_context_reg(cs, R_028C2C_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1, sample_locs_pixel[3][1]);
+ break;
+ default:
+ unreachable("invalid number of samples");
+ }
+
+ /* Emit the maximum sample distance and the centroid priority. */
+ uint32_t pa_sc_aa_config = ms->pa_sc_aa_config;
+
+ pa_sc_aa_config &= C_028BE0_MAX_SAMPLE_DIST;
+ pa_sc_aa_config |= S_028BE0_MAX_SAMPLE_DIST(max_sample_dist);
+
+ radeon_set_context_reg_seq(cs, R_028BE0_PA_SC_AA_CONFIG, 1);
+ radeon_emit(cs, pa_sc_aa_config);
+
+ radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
+ radeon_emit(cs, centroid_priority);
+ radeon_emit(cs, centroid_priority >> 32);
+
+ /* GFX9: Flush DFSM when the AA mode changes. */
+ if (cmd_buffer->device->dfsm_allowed) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
+ }
+
+ cmd_buffer->state.context_roll_without_scissor_emitted = true;
+}
+
+static void
+radv_emit_inline_push_consts(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline,
+ gl_shader_stage stage,
+ int idx, int count, uint32_t *values)
+{
+ struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
+ uint32_t base_reg = pipeline->user_data_0[stage];
+ if (loc->sgpr_idx == -1)
+ return;
+
+ assert(loc->num_sgprs == count);
+
+ radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, count);
+ radeon_emit_array(cmd_buffer->cs, values, count);
+}
+
static void
radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
struct radv_pipeline *pipeline)
radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0);
- radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples);
+ radv_emit_default_sample_locations(cmd_buffer->cs, num_samples);
/* GFX9: Flush DFSM when the AA mode changes. */
if (cmd_buffer->device->dfsm_allowed) {
cmd_buffer->state.context_roll_without_scissor_emitted = true;
}
+static void
+radv_update_binning_state(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline)
+{
+ const struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
+
+
+ if (pipeline->device->physical_device->rad_info.chip_class < GFX9)
+ return;
+
+ if (old_pipeline &&
+ old_pipeline->graphics.binning.pa_sc_binner_cntl_0 == pipeline->graphics.binning.pa_sc_binner_cntl_0 &&
+ old_pipeline->graphics.binning.db_dfsm_control == pipeline->graphics.binning.db_dfsm_control)
+ return;
+
+ bool binning_flush = false;
+ if (cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA12 ||
+ cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA20 ||
+ cmd_buffer->device->physical_device->rad_info.family == CHIP_RAVEN2 ||
+ cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+ binning_flush = !old_pipeline ||
+ G_028C44_BINNING_MODE(old_pipeline->graphics.binning.pa_sc_binner_cntl_0) !=
+ G_028C44_BINNING_MODE(pipeline->graphics.binning.pa_sc_binner_cntl_0);
+ }
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028C44_PA_SC_BINNER_CNTL_0,
+ pipeline->graphics.binning.pa_sc_binner_cntl_0 |
+ S_028C44_FLUSH_ON_BINNING_TRANSITION(!!binning_flush));
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+ radeon_set_context_reg(cmd_buffer->cs, R_028038_DB_DFSM_CONTROL,
+ pipeline->graphics.binning.db_dfsm_control);
+ } else {
+ radeon_set_context_reg(cmd_buffer->cs, R_028060_DB_DFSM_CONTROL,
+ pipeline->graphics.binning.db_dfsm_control);
+ }
+
+ cmd_buffer->state.context_roll_without_scissor_emitted = true;
+}
+
+
static void
radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
struct radv_shader_variant *shader)
if (mask & RADV_PREFETCH_GS) {
radv_emit_shader_prefetch(cmd_buffer,
pipeline->shaders[MESA_SHADER_GEOMETRY]);
- radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
+ if (radv_pipeline_has_gs_copy_shader(pipeline))
+ radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
}
if (mask & RADV_PREFETCH_PS)
return;
radv_update_multisample_state(cmd_buffer, pipeline);
+ radv_update_binning_state(cmd_buffer, pipeline);
cmd_buffer->scratch_size_needed =
MAX2(cmd_buffer->scratch_size_needed,
pipeline->shaders[i]->bo);
}
- if (radv_pipeline_has_gs(pipeline))
+ if (radv_pipeline_has_gs_copy_shader(pipeline))
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
pipeline->gs_copy_shader->bo);
radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
int index,
struct radv_attachment_info *att,
- struct radv_image *image,
+ struct radv_image_view *iview,
VkImageLayout layout)
{
- bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
+ bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8;
struct radv_color_buffer_info *cb = &att->cb;
uint32_t cb_color_info = cb->cb_color_info;
+ struct radv_image *image = iview->image;
if (!radv_layout_dcc_compressed(image, layout,
radv_image_queue_family_mask(image,
cb_color_info &= C_028C70_DCC_ENABLE;
}
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ if (radv_image_is_tc_compat_cmask(image) &&
+ (radv_is_fmask_decompress_pipeline(cmd_buffer) ||
+ radv_is_dcc_decompress_pipeline(cmd_buffer))) {
+ /* If this bit is set, the FMASK decompression operation
+ * doesn't occur (DCC_COMPRESS also implies FMASK_DECOMPRESS).
+ */
+ cb_color_info &= C_028C70_FMASK_COMPRESS_1FRAG_ONLY;
+ }
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
+ radeon_emit(cmd_buffer->cs, cb->cb_color_base);
+ radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cmd_buffer->cs, cb->cb_color_view);
+ radeon_emit(cmd_buffer->cs, cb_color_info);
+ radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
+ radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
+ radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
+ radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
+ radeon_emit(cmd_buffer->cs, 0);
+
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 1);
+ radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028E40_CB_COLOR0_BASE_EXT + index * 4,
+ cb->cb_color_base >> 32);
+ radeon_set_context_reg(cmd_buffer->cs, R_028E60_CB_COLOR0_CMASK_BASE_EXT + index * 4,
+ cb->cb_color_cmask >> 32);
+ radeon_set_context_reg(cmd_buffer->cs, R_028E80_CB_COLOR0_FMASK_BASE_EXT + index * 4,
+ cb->cb_color_fmask >> 32);
+ radeon_set_context_reg(cmd_buffer->cs, R_028EA0_CB_COLOR0_DCC_BASE_EXT + index * 4,
+ cb->cb_dcc_base >> 32);
+ radeon_set_context_reg(cmd_buffer->cs, R_028EC0_CB_COLOR0_ATTRIB2 + index * 4,
+ cb->cb_color_attrib2);
+ radeon_set_context_reg(cmd_buffer->cs, R_028EE0_CB_COLOR0_ATTRIB3 + index * 4,
+ cb->cb_color_attrib3);
+ } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
radeon_emit(cmd_buffer->cs, cb->cb_color_base);
radeon_emit(cmd_buffer->cs, S_028C64_BASE_256B(cb->cb_color_base >> 32));
radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32));
radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
- S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch));
+ cb->cb_mrt_epitch);
} else {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
radeon_emit(cmd_buffer->cs, cb->cb_color_base);
}
}
- if (radv_image_has_dcc(image)) {
+ if (radv_dcc_enabled(image, iview->base_mip)) {
/* Drawing with DCC enabled also compresses colorbuffers. */
- radv_update_dcc_metadata(cmd_buffer, image, true);
+ VkImageSubresourceRange range = {
+ .aspectMask = iview->aspect_mask,
+ .baseMipLevel = iview->base_mip,
+ .levelCount = iview->level_count,
+ .baseArrayLayer = iview->base_layer,
+ .layerCount = iview->layer_count,
+ };
+
+ radv_update_dcc_metadata(cmd_buffer, image, &range, true);
}
}
uint32_t db_z_info = ds->db_z_info;
uint32_t db_z_info_reg;
- if (!radv_image_is_tc_compat_htile(image))
+ if (!cmd_buffer->device->physical_device->has_tc_compat_zrange_bug ||
+ !radv_image_is_tc_compat_htile(image))
return;
if (!radv_layout_has_htile(image, layout,
db_z_info &= C_028040_ZRANGE_PRECISION;
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
db_z_info_reg = R_028038_DB_Z_INFO;
} else {
db_z_info_reg = R_028040_DB_Z_INFO;
radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
-
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+ radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
+ radeon_set_context_reg(cmd_buffer->cs, R_02801C_DB_DEPTH_SIZE_XY, ds->db_depth_size);
+
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 7);
+ radeon_emit(cmd_buffer->cs, S_02803C_RESOURCE_LEVEL(1));
+ radeon_emit(cmd_buffer->cs, db_z_info);
+ radeon_emit(cmd_buffer->cs, db_stencil_info);
+ radeon_emit(cmd_buffer->cs, ds->db_z_read_base);
+ radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base);
+ radeon_emit(cmd_buffer->cs, ds->db_z_read_base);
+ radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base);
+
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_READ_BASE_HI, 5);
+ radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32);
+ radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32);
+ radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32);
+ radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32);
+ radeon_emit(cmd_buffer->cs, ds->db_htile_data_base >> 32);
+ } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
radeon_emit(cmd_buffer->cs, ds->db_htile_data_base);
radeon_emit(cmd_buffer->cs, S_028018_BASE_HI(ds->db_htile_data_base >> 32));
if (!framebuffer || !subpass)
return;
- att_idx = subpass->depth_stencil_attachment.attachment;
- if (att_idx == VK_ATTACHMENT_UNUSED)
+ if (!subpass->depth_stencil_attachment)
return;
+ att_idx = subpass->depth_stencil_attachment->attachment;
att = &framebuffer->attachments[att_idx];
if (att->attachment->image != image)
return;
*/
if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
ds_clear_value.depth == 0.0) {
- VkImageLayout layout = subpass->depth_stencil_attachment.layout;
+ VkImageLayout layout = subpass->depth_stencil_attachment->layout;
radv_update_zrange_precision(cmd_buffer, &att->ds, image,
layout, false);
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
++reg_count;
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, cmd_buffer->state.predicating));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
S_370_WR_CONFIRM(1) |
S_370_ENGINE_SEL(V_370_PFP));
{
struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(image->bo);
+
+ if (!cmd_buffer->device->physical_device->has_tc_compat_zrange_bug)
+ return;
+
va += image->offset + image->tc_compat_zrange_offset;
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
S_370_WR_CONFIRM(1) |
S_370_ENGINE_SEL(V_370_PFP));
struct radv_image *image,
VkClearDepthStencilValue ds_clear_value)
{
- uint64_t va = radv_buffer_get_va(image->bo);
- va += image->offset + image->tc_compat_zrange_offset;
uint32_t cond_val;
/* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last
uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset;
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
+ if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) {
radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
*/
void
radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image, bool value)
+ struct radv_image *image,
+ const VkImageSubresourceRange *range, bool value)
{
uint64_t pred_val = value;
- uint64_t va = radv_buffer_get_va(image->bo);
- va += image->offset + image->fce_pred_offset;
+ uint64_t va = radv_image_get_fce_pred_va(image, range->baseMipLevel);
+ uint32_t level_count = radv_get_levelCount(image, range);
+ uint32_t count = 2 * level_count;
- assert(radv_image_has_dcc(image));
+ assert(radv_dcc_enabled(image, range->baseMipLevel));
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) |
S_370_WR_CONFIRM(1) |
S_370_ENGINE_SEL(V_370_PFP));
radeon_emit(cmd_buffer->cs, va);
radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, pred_val);
- radeon_emit(cmd_buffer->cs, pred_val >> 32);
+
+ for (uint32_t l = 0; l < level_count; l++) {
+ radeon_emit(cmd_buffer->cs, pred_val);
+ radeon_emit(cmd_buffer->cs, pred_val >> 32);
+ }
}
/**
*/
void
radv_update_dcc_metadata(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image, bool value)
+ struct radv_image *image,
+ const VkImageSubresourceRange *range, bool value)
{
uint64_t pred_val = value;
- uint64_t va = radv_buffer_get_va(image->bo);
- va += image->offset + image->dcc_pred_offset;
+ uint64_t va = radv_image_get_dcc_pred_va(image, range->baseMipLevel);
+ uint32_t level_count = radv_get_levelCount(image, range);
+ uint32_t count = 2 * level_count;
- assert(radv_image_has_dcc(image));
+ assert(radv_dcc_enabled(image, range->baseMipLevel));
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM) |
S_370_WR_CONFIRM(1) |
S_370_ENGINE_SEL(V_370_PFP));
radeon_emit(cmd_buffer->cs, va);
radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, pred_val);
- radeon_emit(cmd_buffer->cs, pred_val >> 32);
+
+ for (uint32_t l = 0; l < level_count; l++) {
+ radeon_emit(cmd_buffer->cs, pred_val);
+ radeon_emit(cmd_buffer->cs, pred_val >> 32);
+ }
}
/**
static void
radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
+ const VkImageSubresourceRange *range,
uint32_t color_values[2])
{
struct radeon_cmdbuf *cs = cmd_buffer->cs;
- uint64_t va = radv_buffer_get_va(image->bo);
-
- va += image->offset + image->clear_value_offset;
+ uint64_t va = radv_image_get_fast_clear_va(image, range->baseMipLevel);
+ uint32_t level_count = radv_get_levelCount(image, range);
+ uint32_t count = 2 * level_count;
- assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
+ assert(radv_image_has_cmask(image) ||
+ radv_dcc_enabled(image, range->baseMipLevel));
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, cmd_buffer->state.predicating));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
S_370_WR_CONFIRM(1) |
S_370_ENGINE_SEL(V_370_PFP));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
- radeon_emit(cs, color_values[0]);
- radeon_emit(cs, color_values[1]);
+
+ for (uint32_t l = 0; l < level_count; l++) {
+ radeon_emit(cs, color_values[0]);
+ radeon_emit(cs, color_values[1]);
+ }
}
/**
*/
void
radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
+ const struct radv_image_view *iview,
int cb_idx,
uint32_t color_values[2])
{
- assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
+ struct radv_image *image = iview->image;
+ VkImageSubresourceRange range = {
+ .aspectMask = iview->aspect_mask,
+ .baseMipLevel = iview->base_mip,
+ .levelCount = iview->level_count,
+ .baseArrayLayer = iview->base_layer,
+ .layerCount = iview->layer_count,
+ };
- radv_set_color_clear_metadata(cmd_buffer, image, color_values);
+ assert(radv_image_has_cmask(image) ||
+ radv_dcc_enabled(image, iview->base_mip));
+
+ radv_set_color_clear_metadata(cmd_buffer, image, &range, color_values);
radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx,
color_values);
*/
static void
radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
+ struct radv_image_view *iview,
int cb_idx)
{
struct radeon_cmdbuf *cs = cmd_buffer->cs;
- uint64_t va = radv_buffer_get_va(image->bo);
+ struct radv_image *image = iview->image;
+ uint64_t va = radv_image_get_fast_clear_va(image, iview->base_mip);
- va += image->offset + image->clear_value_offset;
-
- if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image))
+ if (!radv_image_has_cmask(image) &&
+ !radv_dcc_enabled(image, iview->base_mip))
return;
uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
+ if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) {
radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
radeon_emit(cs, 2);
} else {
- /* TODO: Figure out how to use LOAD_CONTEXT_REG on SI/CIK. */
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
COPY_DATA_DST_SEL(COPY_DATA_REG) |
int i;
struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
- unsigned num_bpp64_colorbufs = 0;
/* this may happen for inherited secondary recording */
if (!framebuffer)
int idx = subpass->color_attachments[i].attachment;
struct radv_attachment_info *att = &framebuffer->attachments[idx];
- struct radv_image *image = att->attachment->image;
+ struct radv_image_view *iview = att->attachment;
VkImageLayout layout = subpass->color_attachments[i].layout;
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
- assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
- radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
+ assert(att->attachment->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT |
+ VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT));
+ radv_emit_fb_color_state(cmd_buffer, i, att, iview, layout);
- radv_load_color_clear_metadata(cmd_buffer, image, i);
-
- if (image->surface.bpe >= 8)
- num_bpp64_colorbufs++;
+ radv_load_color_clear_metadata(cmd_buffer, iview, i);
}
- if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
- int idx = subpass->depth_stencil_attachment.attachment;
- VkImageLayout layout = subpass->depth_stencil_attachment.layout;
+ if (subpass->depth_stencil_attachment) {
+ int idx = subpass->depth_stencil_attachment->attachment;
+ VkImageLayout layout = subpass->depth_stencil_attachment->layout;
struct radv_attachment_info *att = &framebuffer->attachments[idx];
struct radv_image *image = att->attachment->image;
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
- MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
+ ASSERTED uint32_t queue_mask = radv_image_queue_family_mask(image,
cmd_buffer->queue_family_index,
cmd_buffer->queue_family_index);
/* We currently don't support writing decompressed HTILE */
}
radv_load_ds_clear_metadata(cmd_buffer, image);
} else {
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
+ if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9)
radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
else
radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
S_028208_BR_X(framebuffer->width) |
S_028208_BR_Y(framebuffer->height));
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
- uint8_t watermark = 4; /* Default value for VI. */
-
- /* For optimal DCC performance. */
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
- if (num_bpp64_colorbufs >= 5) {
- watermark = 8;
- } else {
- watermark = 6;
- }
- }
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) {
+ bool disable_constant_encode =
+ cmd_buffer->device->physical_device->has_dcc_constant_encode;
+ enum chip_class chip_class =
+ cmd_buffer->device->physical_device->rad_info.chip_class;
+ uint8_t watermark = chip_class >= GFX10 ? 6 : 4;
radeon_set_context_reg(cmd_buffer->cs, R_028424_CB_DCC_CONTROL,
- S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
- S_028424_OVERWRITE_COMBINER_WATERMARK(watermark));
+ S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(chip_class <= GFX9) |
+ S_028424_OVERWRITE_COMBINER_WATERMARK(watermark) |
+ S_028424_DISABLE_CONSTANT_ENCODE_REG(disable_constant_encode));
}
- if (cmd_buffer->device->dfsm_allowed) {
+ if (cmd_buffer->device->pbb_allowed) {
radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
}
if (state->index_type != state->last_index_type) {
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
- radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
+ radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device,
+ cs, R_03090C_VGT_INDEX_TYPE,
2, state->index_type);
} else {
radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
uint32_t db_count_control;
if(!cmd_buffer->state.active_occlusion_queries) {
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
has_perfect_queries) {
} else {
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
+ bool gfx10_perfect = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10 && has_perfect_queries;
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
db_count_control =
S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) |
+ S_028004_DISABLE_CONSERVATIVE_ZPASS_COUNTS(gfx10_perfect) |
S_028004_SAMPLE_RATE(sample_rate) |
S_028004_ZPASS_ENABLE(1) |
S_028004_SLICE_EVEN_ENABLE(1) |
if (states & RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE)
radv_emit_discard_rectangle(cmd_buffer);
+ if (states & RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS)
+ radv_emit_sample_locations(cmd_buffer);
+
cmd_buffer->state.dirty &= ~states;
}
if (flush_indirect_descriptors)
radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs,
MAX_SETS * MESA_SHADER_STAGES * 4);
radv_get_descriptors_state(cmd_buffer, bind_point);
struct radv_pipeline_layout *layout = pipeline->layout;
struct radv_shader_variant *shader, *prev_shader;
+ bool need_push_constants = false;
unsigned offset;
void *ptr;
uint64_t va;
(!layout->push_constant_size && !layout->dynamic_offset_count))
return;
- if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
- 16 * layout->dynamic_offset_count,
- 256, &offset, &ptr))
- return;
+ radv_foreach_stage(stage, stages) {
+ if (!pipeline->shaders[stage])
+ continue;
- memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
- memcpy((char*)ptr + layout->push_constant_size,
- descriptors_state->dynamic_buffers,
- 16 * layout->dynamic_offset_count);
+ need_push_constants |= pipeline->shaders[stage]->info.info.loads_push_constants;
+ need_push_constants |= pipeline->shaders[stage]->info.info.loads_dynamic_offsets;
- va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
- va += offset;
+ uint8_t base = pipeline->shaders[stage]->info.info.base_inline_push_consts;
+ uint8_t count = pipeline->shaders[stage]->info.info.num_inline_push_consts;
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
- cmd_buffer->cs, MESA_SHADER_STAGES * 4);
+ radv_emit_inline_push_consts(cmd_buffer, pipeline, stage,
+ AC_UD_INLINE_PUSH_CONSTANTS,
+ count,
+ (uint32_t *)&cmd_buffer->push_constants[base * 4]);
+ }
- prev_shader = NULL;
- radv_foreach_stage(stage, stages) {
- shader = radv_get_shader(pipeline, stage);
+ if (need_push_constants) {
+ if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
+ 16 * layout->dynamic_offset_count,
+ 256, &offset, &ptr))
+ return;
+
+ memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
+ memcpy((char*)ptr + layout->push_constant_size,
+ descriptors_state->dynamic_buffers,
+ 16 * layout->dynamic_offset_count);
+
+ va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
+ va += offset;
+
+ ASSERTED unsigned cdw_max =
+ radeon_check_space(cmd_buffer->device->ws,
+ cmd_buffer->cs, MESA_SHADER_STAGES * 4);
+
+ prev_shader = NULL;
+ radv_foreach_stage(stage, stages) {
+ shader = radv_get_shader(pipeline, stage);
- /* Avoid redundantly emitting the address for merged stages. */
- if (shader && shader != prev_shader) {
- radv_emit_userdata_address(cmd_buffer, pipeline, stage,
- AC_UD_PUSH_CONSTANTS, va);
+ /* Avoid redundantly emitting the address for merged stages. */
+ if (shader && shader != prev_shader) {
+ radv_emit_userdata_address(cmd_buffer, pipeline, stage,
+ AC_UD_PUSH_CONSTANTS, va);
- prev_shader = shader;
+ prev_shader = shader;
+ }
}
+ assert(cmd_buffer->cs->cdw <= cdw_max);
}
cmd_buffer->push_constant_stages &= ~stages;
- assert(cmd_buffer->cs->cdw <= cdw_max);
}
static void
{
if ((pipeline_is_dirty ||
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
- cmd_buffer->state.pipeline->vertex_elements.count &&
+ cmd_buffer->state.pipeline->num_vertex_bindings &&
radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) {
struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
unsigned vb_offset;
void *vb_ptr;
uint32_t i = 0;
- uint32_t count = velems->count;
+ uint32_t count = cmd_buffer->state.pipeline->num_vertex_bindings;
uint64_t va;
/* allocate some descriptor state for vertex buffers */
for (i = 0; i < count; i++) {
uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
uint32_t offset;
- int vb = velems->binding[i];
- struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer;
- uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
+ struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer;
+ uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i];
+
+ if (!buffer)
+ continue;
va = radv_buffer_get_va(buffer->bo);
- offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i];
+ offset = cmd_buffer->vertex_bindings[i].offset;
va += offset + buffer->offset;
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
- if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride)
+ if (cmd_buffer->device->physical_device->rad_info.chip_class <= GFX7 && stride)
desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
else
desc[2] = buffer->size - offset;
- desc[3] = velems->rsrc_word3[i];
+ desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+ S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+ S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+ desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_UINT) |
+ S_008F0C_OOB_SELECT(1) |
+ S_008F0C_RESOURCE_LEVEL(1);
+ } else {
+ desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+ }
}
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
base_reg + loc->sgpr_idx * 4, va, false);
}
- if (pipeline->gs_copy_shader) {
+ if (radv_pipeline_has_gs_copy_shader(pipeline)) {
loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_STREAMOUT_BUFFERS];
if (loc->sgpr_idx != -1) {
base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
/* Set the descriptor.
*
- * On VI, the format must be non-INVALID, otherwise
+ * On GFX8, the format must be non-INVALID, otherwise
* the buffer will be considered not bound and store
* instructions will be no-ops.
*/
desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
- S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
- S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+ desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
+ S_008F0C_OOB_SELECT(3) |
+ S_008F0C_RESOURCE_LEVEL(1);
+ } else {
+ desc[3] |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+ }
}
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
uint64_t strmout_buffer_offset;
};
+static uint32_t
+radv_get_primitive_reset_index(struct radv_cmd_buffer *cmd_buffer)
+{
+ switch (cmd_buffer->state.index_type) {
+ case V_028A7C_VGT_INDEX_8:
+ return 0xffu;
+ case V_028A7C_VGT_INDEX_16:
+ return 0xffffu;
+ case V_028A7C_VGT_INDEX_32:
+ return 0xffffffffu;
+ default:
+ unreachable("invalid index type");
+ }
+}
+
static void
-radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer,
- const struct radv_draw_info *draw_info)
+si_emit_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
+ bool instanced_draw, bool indirect_draw,
+ bool count_from_stream_output,
+ uint32_t draw_vertex_count)
{
struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
struct radv_cmd_state *state = &cmd_buffer->state;
struct radeon_cmdbuf *cs = cmd_buffer->cs;
- uint32_t ia_multi_vgt_param;
- int32_t primitive_reset_en;
+ unsigned ia_multi_vgt_param;
- /* Draw state. */
ia_multi_vgt_param =
- si_get_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1,
- draw_info->indirect,
- draw_info->indirect ? 0 : draw_info->count);
+ si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw,
+ indirect_draw,
+ count_from_stream_output,
+ draw_vertex_count);
if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
- if (info->chip_class >= GFX9) {
- radeon_set_uconfig_reg_idx(cs,
+ if (info->chip_class == GFX9) {
+ radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device,
+ cs,
R_030960_IA_MULTI_VGT_PARAM,
4, ia_multi_vgt_param);
- } else if (info->chip_class >= CIK) {
+ } else if (info->chip_class >= GFX7) {
radeon_set_context_reg_idx(cs,
R_028AA8_IA_MULTI_VGT_PARAM,
1, ia_multi_vgt_param);
}
state->last_ia_multi_vgt_param = ia_multi_vgt_param;
}
+}
+
+static void
+radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer,
+ const struct radv_draw_info *draw_info)
+{
+ struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ int32_t primitive_reset_en;
+
+ /* Draw state. */
+ if (info->chip_class < GFX10) {
+ si_emit_ia_multi_vgt_param(cmd_buffer, draw_info->instance_count > 1,
+ draw_info->indirect,
+ !!draw_info->strmout_buffer,
+ draw_info->indirect ? 0 : draw_info->count);
+ }
/* Primitive restart. */
primitive_reset_en =
if (primitive_reset_en) {
uint32_t primitive_reset_index =
- state->index_type ? 0xffffffffu : 0xffffu;
+ radv_get_primitive_reset_index(cmd_buffer);
if (primitive_reset_index != state->last_primitive_reset_index) {
radeon_set_context_reg(cs,
case VK_ACCESS_SHADER_WRITE_BIT:
case VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT:
case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
- flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
+ flush_bits |= RADV_CMD_FLAG_WB_L2;
break;
case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
case VK_ACCESS_TRANSFER_WRITE_BIT:
flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB |
- RADV_CMD_FLAG_INV_GLOBAL_L2;
+ RADV_CMD_FLAG_INV_L2;
if (flush_CB_meta)
flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
if (!radv_image_has_htile(image))
flush_DB_meta = false;
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ /* TODO: implement shader coherent for GFX10 */
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
if (image->info.samples == 1 &&
(image->usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) &&
case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
break;
case VK_ACCESS_UNIFORM_READ_BIT:
- flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1;
+ flush_bits |= RADV_CMD_FLAG_INV_VCACHE | RADV_CMD_FLAG_INV_SCACHE;
break;
case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
case VK_ACCESS_TRANSFER_READ_BIT:
case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
- flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 |
- RADV_CMD_FLAG_INV_GLOBAL_L2;
+ flush_bits |= RADV_CMD_FLAG_INV_VCACHE |
+ RADV_CMD_FLAG_INV_L2;
break;
case VK_ACCESS_SHADER_READ_BIT:
- flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1;
+ flush_bits |= RADV_CMD_FLAG_INV_VCACHE;
if (!image_is_coherent)
- flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2;
+ flush_bits |= RADV_CMD_FLAG_INV_L2;
break;
case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
if (flush_CB)
NULL);
}
+uint32_t
+radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ uint32_t subpass_id = state->subpass - state->pass->subpasses;
+
+ /* The id of this subpass shouldn't exceed the number of subpasses in
+ * this render pass minus 1.
+ */
+ assert(subpass_id < state->pass->subpass_count);
+ return subpass_id;
+}
+
+static struct radv_sample_locations_state *
+radv_get_attachment_sample_locations(struct radv_cmd_buffer *cmd_buffer,
+ uint32_t att_idx,
+ bool begin_subpass)
+{
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ uint32_t subpass_id = radv_get_subpass_id(cmd_buffer);
+ struct radv_image_view *view = state->framebuffer->attachments[att_idx].attachment;
+
+ if (view->image->info.samples == 1)
+ return NULL;
+
+ if (state->pass->attachments[att_idx].first_subpass_idx == subpass_id) {
+ /* Return the initial sample locations if this is the initial
+ * layout transition of the given subpass attachemnt.
+ */
+ if (state->attachments[att_idx].sample_location.count > 0)
+ return &state->attachments[att_idx].sample_location;
+ } else {
+ /* Otherwise return the subpass sample locations if defined. */
+ if (state->subpass_sample_locs) {
+ /* Because the driver sets the current subpass before
+ * initial layout transitions, we should use the sample
+ * locations from the previous subpass to avoid an
+ * off-by-one problem. Otherwise, use the sample
+ * locations for the current subpass for final layout
+ * transitions.
+ */
+ if (begin_subpass)
+ subpass_id--;
+
+ for (uint32_t i = 0; i < state->num_subpass_sample_locs; i++) {
+ if (state->subpass_sample_locs[i].subpass_idx == subpass_id)
+ return &state->subpass_sample_locs[i].sample_location;
+ }
+ }
+ }
+
+ return NULL;
+}
+
static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
- struct radv_subpass_attachment att)
+ struct radv_subpass_attachment att,
+ bool begin_subpass)
{
unsigned idx = att.attachment;
struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
+ struct radv_sample_locations_state *sample_locs;
VkImageSubresourceRange range;
range.aspectMask = 0;
range.baseMipLevel = view->base_mip;
range.baseArrayLayer = view->base_layer;
range.layerCount = cmd_buffer->state.framebuffer->layers;
- if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
+ if (cmd_buffer->state.subpass->view_mask) {
/* If the current subpass uses multiview, the driver might have
* performed a fast color/depth clear to the whole image
* (including all layers). To make sure the driver will
range.layerCount = util_last_bit(cmd_buffer->state.subpass->view_mask);
}
+ /* Get the subpass sample locations for the given attachment, if NULL
+ * is returned the driver will use the default HW locations.
+ */
+ sample_locs = radv_get_attachment_sample_locations(cmd_buffer, idx,
+ begin_subpass);
+
radv_handle_image_transition(cmd_buffer,
view->image,
cmd_buffer->state.attachments[idx].current_layout,
- att.layout, 0, 0, &range);
+ att.layout, 0, 0, &range, sample_locs);
cmd_buffer->state.attachments[idx].current_layout = att.layout;
void
radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
- const struct radv_subpass *subpass, bool transitions)
+ const struct radv_subpass *subpass)
{
- if (transitions) {
- radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
+ cmd_buffer->state.subpass = subpass;
- for (unsigned i = 0; i < subpass->color_count; ++i) {
- if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
- radv_handle_subpass_image_transition(cmd_buffer,
- subpass->color_attachments[i]);
- }
+ cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
+}
- for (unsigned i = 0; i < subpass->input_count; ++i) {
- radv_handle_subpass_image_transition(cmd_buffer,
- subpass->input_attachments[i]);
- }
+static VkResult
+radv_cmd_state_setup_sample_locations(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_render_pass *pass,
+ const VkRenderPassBeginInfo *info)
+{
+ const struct VkRenderPassSampleLocationsBeginInfoEXT *sample_locs =
+ vk_find_struct_const(info->pNext,
+ RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ struct radv_framebuffer *framebuffer = state->framebuffer;
- if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
- radv_handle_subpass_image_transition(cmd_buffer,
- subpass->depth_stencil_attachment);
- }
+ if (!sample_locs) {
+ state->subpass_sample_locs = NULL;
+ return VK_SUCCESS;
}
- cmd_buffer->state.subpass = subpass;
+ for (uint32_t i = 0; i < sample_locs->attachmentInitialSampleLocationsCount; i++) {
+ const VkAttachmentSampleLocationsEXT *att_sample_locs =
+ &sample_locs->pAttachmentInitialSampleLocations[i];
+ uint32_t att_idx = att_sample_locs->attachmentIndex;
+ struct radv_attachment_info *att = &framebuffer->attachments[att_idx];
+ struct radv_image *image = att->attachment->image;
- cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
+ assert(vk_format_is_depth_or_stencil(image->vk_format));
+
+ /* From the Vulkan spec 1.1.108:
+ *
+ * "If the image referenced by the framebuffer attachment at
+ * index attachmentIndex was not created with
+ * VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT
+ * then the values specified in sampleLocationsInfo are
+ * ignored."
+ */
+ if (!(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT))
+ continue;
+
+ const VkSampleLocationsInfoEXT *sample_locs_info =
+ &att_sample_locs->sampleLocationsInfo;
+
+ state->attachments[att_idx].sample_location.per_pixel =
+ sample_locs_info->sampleLocationsPerPixel;
+ state->attachments[att_idx].sample_location.grid_size =
+ sample_locs_info->sampleLocationGridSize;
+ state->attachments[att_idx].sample_location.count =
+ sample_locs_info->sampleLocationsCount;
+ typed_memcpy(&state->attachments[att_idx].sample_location.locations[0],
+ sample_locs_info->pSampleLocations,
+ sample_locs_info->sampleLocationsCount);
+ }
+
+ state->subpass_sample_locs = vk_alloc(&cmd_buffer->pool->alloc,
+ sample_locs->postSubpassSampleLocationsCount *
+ sizeof(state->subpass_sample_locs[0]),
+ 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (state->subpass_sample_locs == NULL) {
+ cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ return cmd_buffer->record_result;
+ }
+
+ state->num_subpass_sample_locs = sample_locs->postSubpassSampleLocationsCount;
+
+ for (uint32_t i = 0; i < sample_locs->postSubpassSampleLocationsCount; i++) {
+ const VkSubpassSampleLocationsEXT *subpass_sample_locs_info =
+ &sample_locs->pPostSubpassSampleLocations[i];
+ const VkSampleLocationsInfoEXT *sample_locs_info =
+ &subpass_sample_locs_info->sampleLocationsInfo;
+
+ state->subpass_sample_locs[i].subpass_idx =
+ subpass_sample_locs_info->subpassIndex;
+ state->subpass_sample_locs[i].sample_location.per_pixel =
+ sample_locs_info->sampleLocationsPerPixel;
+ state->subpass_sample_locs[i].sample_location.grid_size =
+ sample_locs_info->sampleLocationGridSize;
+ state->subpass_sample_locs[i].sample_location.count =
+ sample_locs_info->sampleLocationsCount;
+ typed_memcpy(&state->subpass_sample_locs[i].sample_location.locations[0],
+ sample_locs_info->pSampleLocations,
+ sample_locs_info->sampleLocationsCount);
+ }
+
+ return VK_SUCCESS;
}
static VkResult
}
state->attachments[i].current_layout = att->initial_layout;
+ state->attachments[i].sample_location.count = 0;
}
return VK_SUCCESS;
if (result != VK_SUCCESS)
return result;
- radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
+ radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
}
if (unlikely(cmd_buffer->device->trace_bo)) {
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER;
}
+static uint32_t
+vk_to_index_type(VkIndexType type)
+{
+ switch (type) {
+ case VK_INDEX_TYPE_UINT8_EXT:
+ return V_028A7C_VGT_INDEX_8;
+ case VK_INDEX_TYPE_UINT16:
+ return V_028A7C_VGT_INDEX_16;
+ case VK_INDEX_TYPE_UINT32:
+ return V_028A7C_VGT_INDEX_32;
+ default:
+ unreachable("invalid index type");
+ }
+}
+
+static uint32_t
+radv_get_vgt_index_size(uint32_t type)
+{
+ switch (type) {
+ case V_028A7C_VGT_INDEX_8:
+ return 1;
+ case V_028A7C_VGT_INDEX_16:
+ return 2;
+ case V_028A7C_VGT_INDEX_32:
+ return 4;
+ default:
+ unreachable("invalid index type");
+ }
+}
+
void radv_CmdBindIndexBuffer(
VkCommandBuffer commandBuffer,
VkBuffer buffer,
cmd_buffer->state.index_buffer = index_buffer;
cmd_buffer->state.index_offset = offset;
- cmd_buffer->state.index_type = indexType; /* vk matches hw */
+ cmd_buffer->state.index_type = vk_to_index_type(indexType);
cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
cmd_buffer->state.index_va += index_buffer->offset + offset;
- int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
- cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift;
+ int index_size = radv_get_vgt_index_size(indexType);
+ cmd_buffer->state.max_index_count = (index_buffer->size - offset) / index_size;
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo);
}
dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
- S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
- S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
- S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+ dst[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
+ S_008F0C_OOB_SELECT(3) |
+ S_008F0C_RESOURCE_LEVEL(1);
+ } else {
+ dst[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+ }
+
cmd_buffer->push_constant_stages |=
set->layout->dynamic_shader_stages;
}
pipelineBindPoint))
return;
+ /* Check that there are no inline uniform block updates when calling vkCmdPushDescriptorSetKHR()
+ * because it is invalid, according to Vulkan spec.
+ */
+ for (int i = 0; i < descriptorWriteCount; i++) {
+ ASSERTED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
+ assert(writeset->descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
+ }
+
radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
radv_descriptor_set_to_handle(push_set),
descriptorWriteCount, pDescriptorWrites, 0, NULL);
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
- if (cmd_buffer->device->physical_device->rad_info.chip_class == SI)
- cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
+ if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6)
+ cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WB_L2;
+
+ /* Make sure to sync all pending active queries at the end of
+ * command buffer.
+ */
+ cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
+
si_emit_cache_flush(cmd_buffer);
}
si_cp_dma_wait_for_idle(cmd_buffer);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
+ vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
- MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount;
+ ASSERTED const uint32_t total_count = firstViewport + viewportCount;
assert(firstViewport < MAX_VIEWPORTS);
assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
- MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount;
+ ASSERTED const uint32_t total_count = firstScissor + scissorCount;
assert(firstScissor < MAX_SCISSORS);
assert(total_count >= 1 && total_count <= MAX_SCISSORS);
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
- MAYBE_UNUSED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
+ ASSERTED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE;
}
+void radv_CmdSetSampleLocationsEXT(
+ VkCommandBuffer commandBuffer,
+ const VkSampleLocationsInfoEXT* pSampleLocationsInfo)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+
+ assert(pSampleLocationsInfo->sampleLocationsCount <= MAX_SAMPLE_LOCATIONS);
+
+ state->dynamic.sample_location.per_pixel = pSampleLocationsInfo->sampleLocationsPerPixel;
+ state->dynamic.sample_location.grid_size = pSampleLocationsInfo->sampleLocationGridSize;
+ state->dynamic.sample_location.count = pSampleLocationsInfo->sampleLocationsCount;
+ typed_memcpy(&state->dynamic.sample_location.locations[0],
+ pSampleLocationsInfo->pSampleLocations,
+ pSampleLocationsInfo->sampleLocationsCount);
+
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS;
+}
+
void radv_CmdExecuteCommands(
VkCommandBuffer commandBuffer,
uint32_t commandBufferCount,
if (secondary->sample_positions_needed)
primary->sample_positions_needed = true;
+ if (!secondary->state.framebuffer &&
+ (primary->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)) {
+ /* Emit the framebuffer state from primary if secondary
+ * has been recorded without a framebuffer, otherwise
+ * fast color/depth clears can't work.
+ */
+ radv_emit_framebuffer_state(primary);
+ }
+
primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
}
}
+static void
+radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer,
+ uint32_t subpass_id)
+{
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ struct radv_subpass *subpass = &state->pass->subpasses[subpass_id];
+
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
+ cmd_buffer->cs, 4096);
+
+ radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
+
+ radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
+
+ for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
+ const uint32_t a = subpass->attachments[i].attachment;
+ if (a == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ radv_handle_subpass_image_transition(cmd_buffer,
+ subpass->attachments[i],
+ true);
+ }
+
+ radv_cmd_buffer_clear_subpass(cmd_buffer);
+
+ assert(cmd_buffer->cs->cdw <= cdw_max);
+}
+
+static void
+radv_cmd_buffer_end_subpass(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ const struct radv_subpass *subpass = state->subpass;
+ uint32_t subpass_id = radv_get_subpass_id(cmd_buffer);
+
+ radv_cmd_buffer_resolve_subpass(cmd_buffer);
+
+ for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
+ const uint32_t a = subpass->attachments[i].attachment;
+ if (a == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ if (state->pass->attachments[a].last_subpass_idx != subpass_id)
+ continue;
+
+ VkImageLayout layout = state->pass->attachments[a].final_layout;
+ struct radv_subpass_attachment att = { a, layout };
+ radv_handle_subpass_image_transition(cmd_buffer, att, false);
+ }
+}
+
void radv_CmdBeginRenderPass(
VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo* pRenderPassBegin,
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
-
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
- cmd_buffer->cs, 2048);
- MAYBE_UNUSED VkResult result;
+ VkResult result;
cmd_buffer->state.framebuffer = framebuffer;
cmd_buffer->state.pass = pass;
if (result != VK_SUCCESS)
return;
- radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true);
- assert(cmd_buffer->cs->cdw <= cdw_max);
+ result = radv_cmd_state_setup_sample_locations(cmd_buffer, pass, pRenderPassBegin);
+ if (result != VK_SUCCESS)
+ return;
- radv_cmd_buffer_clear_subpass(cmd_buffer);
+ radv_cmd_buffer_begin_subpass(cmd_buffer, 0);
}
void radv_CmdBeginRenderPass2KHR(
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- radv_cmd_buffer_resolve_subpass(cmd_buffer);
-
- radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs,
- 2048);
-
- radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true);
- radv_cmd_buffer_clear_subpass(cmd_buffer);
+ uint32_t prev_subpass = radv_get_subpass_id(cmd_buffer);
+ radv_cmd_buffer_end_subpass(cmd_buffer);
+ radv_cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1);
}
void radv_CmdNextSubpass2KHR(
radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
}
- if (pipeline->gs_copy_shader) {
+ if (radv_pipeline_has_gs_copy_shader(pipeline)) {
struct radv_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
if (loc->sgpr_idx != -1) {
uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
}
if (info->indexed) {
- int index_size = state->index_type ? 4 : 2;
+ int index_size = radv_get_vgt_index_size(state->index_type);
uint64_t index_va;
index_va = state->index_va;
if (cmd_buffer->state.dirty & used_states)
return true;
+ uint32_t primitive_reset_index =
+ radv_get_primitive_reset_index(cmd_buffer);
+
if (info->indexed && state->pipeline->graphics.prim_restart_enable &&
- (state->index_type ? 0xffffffffu : 0xffffu) != state->last_primitive_reset_index)
+ primitive_reset_index != state->last_primitive_reset_index)
return true;
return false;
if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
radv_emit_index_buffer(cmd_buffer);
} else {
- /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
+ /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
* so the state must be re-emitted before the next indexed
* draw.
*/
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
cmd_buffer->state.last_index_type = -1;
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
}
struct radeon_info *rad_info =
&cmd_buffer->device->physical_device->rad_info;
bool has_prefetch =
- cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
+ cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
bool pipeline_is_dirty =
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
- MAYBE_UNUSED unsigned cdw_max =
+ ASSERTED unsigned cdw_max =
radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, 4096);
if (likely(!info->indirect)) {
- /* SI-CI treat instance_count==0 as instance_count==1. There is
+ /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
* no workaround for indirect draws, but we can at least skip
* direct draws.
*/
radv_draw(cmd_buffer, &info);
}
-void radv_CmdDrawIndirectCountAMD(
- VkCommandBuffer commandBuffer,
- VkBuffer _buffer,
- VkDeviceSize offset,
- VkBuffer _countBuffer,
- VkDeviceSize countBufferOffset,
- uint32_t maxDrawCount,
- uint32_t stride)
-{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
- RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
- struct radv_draw_info info = {};
-
- info.count = maxDrawCount;
- info.indirect = buffer;
- info.indirect_offset = offset;
- info.count_buffer = count_buffer;
- info.count_buffer_offset = countBufferOffset;
- info.stride = stride;
-
- radv_draw(cmd_buffer, &info);
-}
-
-void radv_CmdDrawIndexedIndirectCountAMD(
- VkCommandBuffer commandBuffer,
- VkBuffer _buffer,
- VkDeviceSize offset,
- VkBuffer _countBuffer,
- VkDeviceSize countBufferOffset,
- uint32_t maxDrawCount,
- uint32_t stride)
-{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
- RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
- struct radv_draw_info info = {};
-
- info.indexed = true;
- info.count = maxDrawCount;
- info.indirect = buffer;
- info.indirect_offset = offset;
- info.count_buffer = count_buffer;
- info.count_buffer_offset = countBufferOffset;
- info.stride = stride;
-
- radv_draw(cmd_buffer, &info);
-}
-
void radv_CmdDrawIndirectCountKHR(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
AC_UD_CS_GRID_SIZE);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
+ ASSERTED unsigned cdw_max = radeon_check_space(ws, cs, 25);
if (info->indirect) {
uint64_t va = radv_buffer_get_va(info->indirect->bo);
}
if (loc->sgpr_idx != -1) {
- assert(!loc->indirect);
assert(loc->num_sgprs == 3);
radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
{
struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
bool has_prefetch =
- cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
+ cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7;
bool pipeline_is_dirty = pipeline &&
pipeline != cmd_buffer->state.emitted_compute_pipeline;
radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
- radv_cmd_buffer_resolve_subpass(cmd_buffer);
-
- for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
- VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
- radv_handle_subpass_image_transition(cmd_buffer,
- (struct radv_subpass_attachment){i, layout});
- }
+ radv_cmd_buffer_end_subpass(cmd_buffer);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
+ vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
cmd_buffer->state.pass = NULL;
cmd_buffer->state.subpass = NULL;
cmd_buffer->state.attachments = NULL;
cmd_buffer->state.framebuffer = NULL;
+ cmd_buffer->state.subpass_sample_locs = NULL;
}
void radv_CmdEndRenderPass2KHR(
{
assert(range->baseMipLevel == 0);
assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
- unsigned layer_count = radv_get_layerCount(image, range);
- uint64_t size = image->surface.htile_slice_size * layer_count;
VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
- uint64_t offset = image->offset + image->htile_offset +
- image->surface.htile_slice_size * range->baseArrayLayer;
struct radv_cmd_state *state = &cmd_buffer->state;
VkClearDepthStencilValue value = {};
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
- state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo, offset,
- size, clear_word);
+ state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, clear_word);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
VkImageLayout dst_layout,
unsigned src_queue_mask,
unsigned dst_queue_mask,
- const VkImageSubresourceRange *range)
+ const VkImageSubresourceRange *range,
+ struct radv_sample_locations_state *sample_locs)
{
if (!radv_image_has_htile(image))
return;
- if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
- radv_layout_has_htile(image, dst_layout, dst_queue_mask)) {
- /* TODO: merge with the clear if applicable */
- radv_initialize_htile(cmd_buffer, image, range, 0);
+ if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
+ uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
+
+ if (radv_layout_is_htile_compressed(image, dst_layout,
+ dst_queue_mask)) {
+ clear_value = 0;
+ }
+
+ radv_initialize_htile(cmd_buffer, image, range, clear_value);
} else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
- radv_decompress_depth_image_inplace(cmd_buffer, image, &local_range);
+ radv_decompress_depth_image_inplace(cmd_buffer, image,
+ &local_range, sample_locs);
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
}
static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image, uint32_t value)
+ struct radv_image *image,
+ const VkImageSubresourceRange *range,
+ uint32_t value)
{
struct radv_cmd_state *state = &cmd_buffer->state;
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
- state->flush_bits |= radv_clear_cmask(cmd_buffer, image, value);
+ state->flush_bits |= radv_clear_cmask(cmd_buffer, image, range, value);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
}
void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image)
+ struct radv_image *image,
+ const VkImageSubresourceRange *range)
{
struct radv_cmd_state *state = &cmd_buffer->state;
static const uint32_t fmask_clear_values[4] = {
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
- state->flush_bits |= radv_clear_fmask(cmd_buffer, image, value);
+ state->flush_bits |= radv_clear_fmask(cmd_buffer, image, range, value);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
}
void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image, uint32_t value)
+ struct radv_image *image,
+ const VkImageSubresourceRange *range, uint32_t value)
{
struct radv_cmd_state *state = &cmd_buffer->state;
+ unsigned size = 0;
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
- state->flush_bits |= radv_clear_dcc(cmd_buffer, image, value);
+ state->flush_bits |= radv_clear_dcc(cmd_buffer, image, range, value);
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX8) {
+ /* When DCC is enabled with mipmaps, some levels might not
+ * support fast clears and we have to initialize them as "fully
+ * expanded".
+ */
+ /* Compute the size of all fast clearable DCC levels. */
+ for (unsigned i = 0; i < image->planes[0].surface.num_dcc_levels; i++) {
+ struct legacy_surf_level *surf_level =
+ &image->planes[0].surface.u.legacy.level[i];
+ unsigned dcc_fast_clear_size =
+ surf_level->dcc_slice_fast_clear_size * image->info.array_size;
+
+ if (!dcc_fast_clear_size)
+ break;
+
+ size = surf_level->dcc_offset + dcc_fast_clear_size;
+ }
+
+ /* Initialize the mipmap levels without DCC. */
+ if (size != image->planes[0].surface.dcc_size) {
+ state->flush_bits |=
+ radv_fill_buffer(cmd_buffer, image->bo,
+ image->offset + image->dcc_offset + size,
+ image->planes[0].surface.dcc_size - size,
+ 0xffffffff);
+ }
+ }
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
VkImageLayout src_layout,
VkImageLayout dst_layout,
unsigned src_queue_mask,
- unsigned dst_queue_mask)
+ unsigned dst_queue_mask,
+ const VkImageSubresourceRange *range)
{
if (radv_image_has_cmask(image)) {
uint32_t value = 0xffffffffu; /* Fully expanded mode. */
value = 0xccccccccu;
}
- radv_initialise_cmask(cmd_buffer, image, value);
+ radv_initialise_cmask(cmd_buffer, image, range, value);
}
if (radv_image_has_fmask(image)) {
- radv_initialize_fmask(cmd_buffer, image);
+ radv_initialize_fmask(cmd_buffer, image, range);
}
- if (radv_image_has_dcc(image)) {
+ if (radv_dcc_enabled(image, range->baseMipLevel)) {
uint32_t value = 0xffffffffu; /* Fully expanded mode. */
bool need_decompress_pass = false;
need_decompress_pass = true;
}
- radv_initialize_dcc(cmd_buffer, image, value);
+ radv_initialize_dcc(cmd_buffer, image, range, value);
- radv_update_fce_metadata(cmd_buffer, image,
+ radv_update_fce_metadata(cmd_buffer, image, range,
need_decompress_pass);
}
- if (radv_image_has_cmask(image) || radv_image_has_dcc(image)) {
+ if (radv_image_has_cmask(image) ||
+ radv_dcc_enabled(image, range->baseMipLevel)) {
uint32_t color_values[2] = {};
- radv_set_color_clear_metadata(cmd_buffer, image, color_values);
+ radv_set_color_clear_metadata(cmd_buffer, image, range,
+ color_values);
}
}
if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
radv_init_color_image_metadata(cmd_buffer, image,
src_layout, dst_layout,
- src_queue_mask, dst_queue_mask);
+ src_queue_mask, dst_queue_mask,
+ range);
return;
}
- if (radv_image_has_dcc(image)) {
+ if (radv_dcc_enabled(image, range->baseMipLevel)) {
if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
- radv_initialize_dcc(cmd_buffer, image, 0xffffffffu);
+ radv_initialize_dcc(cmd_buffer, image, range, 0xffffffffu);
} else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) &&
!radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) {
radv_decompress_dcc(cmd_buffer, image, range);
radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
}
} else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) {
+ bool fce_eliminate = false, fmask_expand = false;
+
if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
!radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
- radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
+ fce_eliminate = true;
}
if (radv_image_has_fmask(image)) {
if (src_layout != VK_IMAGE_LAYOUT_GENERAL &&
dst_layout == VK_IMAGE_LAYOUT_GENERAL) {
- radv_expand_fmask_image_inplace(cmd_buffer, image, range);
+ /* A FMASK decompress is required before doing
+ * a MSAA decompress using FMASK.
+ */
+ fmask_expand = true;
}
}
+
+ if (fce_eliminate || fmask_expand)
+ radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
+
+ if (fmask_expand)
+ radv_expand_fmask_image_inplace(cmd_buffer, image, range);
}
}
VkImageLayout dst_layout,
uint32_t src_family,
uint32_t dst_family,
- const VkImageSubresourceRange *range)
+ const VkImageSubresourceRange *range,
+ struct radv_sample_locations_state *sample_locs)
{
if (image->exclusive && src_family != dst_family) {
/* This is an acquire or a release operation and there will be
assert(src_family == cmd_buffer->queue_family_index ||
dst_family == cmd_buffer->queue_family_index);
+ if (src_family == VK_QUEUE_FAMILY_EXTERNAL ||
+ src_family == VK_QUEUE_FAMILY_FOREIGN_EXT)
+ return;
+
if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER)
return;
return;
}
+ if (src_layout == dst_layout)
+ return;
+
unsigned src_queue_mask =
radv_image_queue_family_mask(image, src_family,
cmd_buffer->queue_family_index);
radv_handle_depth_image_transition(cmd_buffer, image,
src_layout, dst_layout,
src_queue_mask, dst_queue_mask,
- range);
+ range, sample_locs);
} else {
radv_handle_color_image_transition(cmd_buffer, image,
src_layout, dst_layout,
uint32_t eventCount;
const VkEvent *pEvents;
VkPipelineStageFlags srcStageMask;
+ VkPipelineStageFlags dstStageMask;
};
static void
radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff);
assert(cmd_buffer->cs->cdw <= cdw_max);
image);
}
- radv_stage_flush(cmd_buffer, info->srcStageMask);
+ /* The Vulkan spec 1.1.98 says:
+ *
+ * "An execution dependency with only
+ * VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT in the destination stage mask
+ * will only prevent that stage from executing in subsequently
+ * submitted commands. As this stage does not perform any actual
+ * execution, this is not observable - in effect, it does not delay
+ * processing of subsequent commands. Similarly an execution dependency
+ * with only VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT in the source stage mask
+ * will effectively not wait for any prior commands to complete."
+ */
+ if (info->dstStageMask != VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT)
+ radv_stage_flush(cmd_buffer, info->srcStageMask);
cmd_buffer->state.flush_bits |= src_flush_bits;
for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
+
+ const struct VkSampleLocationsInfoEXT *sample_locs_info =
+ vk_find_struct_const(pImageMemoryBarriers[i].pNext,
+ SAMPLE_LOCATIONS_INFO_EXT);
+ struct radv_sample_locations_state sample_locations = {};
+
+ if (sample_locs_info) {
+ assert(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT);
+ sample_locations.per_pixel = sample_locs_info->sampleLocationsPerPixel;
+ sample_locations.grid_size = sample_locs_info->sampleLocationGridSize;
+ sample_locations.count = sample_locs_info->sampleLocationsCount;
+ typed_memcpy(&sample_locations.locations[0],
+ sample_locs_info->pSampleLocations,
+ sample_locs_info->sampleLocationsCount);
+ }
+
radv_handle_image_transition(cmd_buffer, image,
pImageMemoryBarriers[i].oldLayout,
pImageMemoryBarriers[i].newLayout,
pImageMemoryBarriers[i].srcQueueFamilyIndex,
pImageMemoryBarriers[i].dstQueueFamilyIndex,
- &pImageMemoryBarriers[i].subresourceRange);
+ &pImageMemoryBarriers[i].subresourceRange,
+ sample_locs_info ? &sample_locations : NULL);
}
/* Make sure CP DMA is idle because the driver might have performed a
info.eventCount = 0;
info.pEvents = NULL;
info.srcStageMask = srcStageMask;
+ info.dstStageMask = destStageMask;
radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers,
radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18);
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21);
/* Flags that only require a top-of-pipe event. */
VkPipelineStageFlags top_of_pipe_flags =
cmd_buffer->device->physical_device->rad_info.chip_class,
radv_cmd_buffer_uses_mec(cmd_buffer),
V_028A90_BOTTOM_OF_PIPE_TS, 0,
+ EOP_DST_SEL_MEM,
EOP_DATA_SEL_VALUE_32BIT, va, value,
cmd_buffer->gfx9_eop_bug_va);
}
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer);
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
bool draw_visible = true;
- uint64_t va;
+ uint64_t pred_value = 0;
+ uint64_t va, new_va;
+ unsigned pred_offset;
va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset;
si_emit_cache_flush(cmd_buffer);
+ /* From the Vulkan spec 1.1.107:
+ *
+ * "If the 32-bit value at offset in buffer memory is zero, then the
+ * rendering commands are discarded, otherwise they are executed as
+ * normal. If the value of the predicate in buffer memory changes while
+ * conditional rendering is active, the rendering commands may be
+ * discarded in an implementation-dependent way. Some implementations
+ * may latch the value of the predicate upon beginning conditional
+ * rendering while others may read it before every rendering command."
+ *
+ * But, the AMD hardware treats the predicate as a 64-bit value which
+ * means we need a workaround in the driver. Luckily, it's not required
+ * to support if the value changes when predication is active.
+ *
+ * The workaround is as follows:
+ * 1) allocate a 64-value in the upload BO and initialize it to 0
+ * 2) copy the 32-bit predicate value to the upload BO
+ * 3) use the new allocated VA address for predication
+ *
+ * Based on the conditionalrender demo, it's faster to do the COPY_DATA
+ * in ME (+ sync PFP) instead of PFP.
+ */
+ radv_cmd_buffer_upload_data(cmd_buffer, 8, 16, &pred_value, &pred_offset);
+
+ new_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo) + pred_offset;
+
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
+ COPY_DATA_WR_CONFIRM);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, new_va);
+ radeon_emit(cs, new_va >> 32);
+
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+ radeon_emit(cs, 0);
+
/* Enable predication for this command buffer. */
- si_emit_set_predication_state(cmd_buffer, draw_visible, va);
+ si_emit_set_predication_state(cmd_buffer, draw_visible, new_va);
cmd_buffer->state.predicating = true;
/* Store conditional rendering user info. */
cmd_buffer->state.predication_type = draw_visible;
- cmd_buffer->state.predication_va = va;
+ cmd_buffer->state.predication_va = new_va;
}
void radv_CmdEndConditionalRenderingEXT(
enabled_mask |= 1 << idx;
}
- cmd_buffer->state.streamout.enabled_mask = enabled_mask;
+ cmd_buffer->state.streamout.enabled_mask |= enabled_mask;
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_STREAMOUT_BUFFER;
}
unsigned reg_strmout_cntl;
/* The register is at different places on different ASICs. */
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
} else {
radeon_emit(cs, 4); /* poll interval */
}
-void radv_CmdBeginTransformFeedbackEXT(
- VkCommandBuffer commandBuffer,
- uint32_t firstCounterBuffer,
- uint32_t counterBufferCount,
- const VkBuffer* pCounterBuffers,
- const VkDeviceSize* pCounterBufferOffsets)
+static void
+radv_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer,
+ uint32_t firstCounterBuffer,
+ uint32_t counterBufferCount,
+ const VkBuffer *pCounterBuffers,
+ const VkDeviceSize *pCounterBufferOffsets)
+
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
struct radv_streamout_state *so = &cmd_buffer->state.streamout;
struct radeon_cmdbuf *cs = cmd_buffer->cs;
if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
counter_buffer_idx = -1;
- /* SI binds streamout buffers as shader resources.
+ /* AMD GCN binds streamout buffers as shader resources.
* VGT only counts primitives and tells the shader through
* SGPRs what to do.
*/
radv_set_streamout_enable(cmd_buffer, true);
}
-void radv_CmdEndTransformFeedbackEXT(
+void radv_CmdBeginTransformFeedbackEXT(
VkCommandBuffer commandBuffer,
uint32_t firstCounterBuffer,
uint32_t counterBufferCount,
const VkDeviceSize* pCounterBufferOffsets)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ radv_emit_streamout_begin(cmd_buffer,
+ firstCounterBuffer, counterBufferCount,
+ pCounterBuffers, pCounterBufferOffsets);
+}
+
+static void
+radv_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer,
+ uint32_t firstCounterBuffer,
+ uint32_t counterBufferCount,
+ const VkBuffer *pCounterBuffers,
+ const VkDeviceSize *pCounterBufferOffsets)
+{
struct radv_streamout_state *so = &cmd_buffer->state.streamout;
struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint32_t i;
radv_set_streamout_enable(cmd_buffer, false);
}
+void radv_CmdEndTransformFeedbackEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstCounterBuffer,
+ uint32_t counterBufferCount,
+ const VkBuffer* pCounterBuffers,
+ const VkDeviceSize* pCounterBufferOffsets)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ radv_emit_streamout_end(cmd_buffer,
+ firstCounterBuffer, counterBufferCount,
+ pCounterBuffers, pCounterBufferOffsets);
+}
+
void radv_CmdDrawIndirectByteCountEXT(
VkCommandBuffer commandBuffer,
uint32_t instanceCount,
radv_draw(cmd_buffer, &info);
}
+
+/* VK_AMD_buffer_marker */
+void radv_CmdWriteBufferMarkerAMD(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlagBits pipelineStage,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ uint32_t marker)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_buffer, buffer, dstBuffer);
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ uint64_t va = radv_buffer_get_va(buffer->bo) + dstOffset;
+
+ si_emit_cache_flush(cmd_buffer);
+
+ if (!(pipelineStage & ~VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)) {
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) |
+ COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
+ COPY_DATA_WR_CONFIRM);
+ radeon_emit(cs, marker);
+ radeon_emit(cs, 0);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ } else {
+ si_cs_emit_write_event_eop(cs,
+ cmd_buffer->device->physical_device->rad_info.chip_class,
+ radv_cmd_buffer_uses_mec(cmd_buffer),
+ V_028A90_BOTTOM_OF_PIPE_TS, 0,
+ EOP_DST_SEL_MEM,
+ EOP_DATA_SEL_VALUE_32BIT,
+ va, marker,
+ cmd_buffer->gfx9_eop_bug_va);
+ }
+}