sba.GeneralStateMOCS = GENX(MOCS);
sba.GeneralStateBaseAddressModifyEnable = true;
+ sba.StatelessDataPortAccessMOCS = GENX(MOCS);
+
sba.SurfaceStateBaseAddress =
anv_cmd_buffer_surface_base_address(cmd_buffer);
sba.SurfaceStateMOCS = GENX(MOCS);
union isl_color_value *fast_clear_color)
{
struct anv_attachment_state *att_state = &cmd_state->attachments[att];
- struct anv_image_view *iview = cmd_state->framebuffer->attachments[att];
+ struct anv_image_view *iview = cmd_state->attachments[att].image_view;
assert(iview->n_planes == 1);
struct anv_render_pass_attachment *pass_att =
&cmd_state->pass->attachments[att];
struct anv_attachment_state *att_state = &cmd_state->attachments[att];
- struct anv_image_view *iview = cmd_state->framebuffer->attachments[att];
+ struct anv_image_view *iview = cmd_state->attachments[att].image_view;
/* These will be initialized after the first subpass transition. */
att_state->aux_usage = ISL_AUX_USAGE_NONE;
set_image_compressed_bit(cmd_buffer, image, aspect, 0, 0, 1, true);
}
-#if GEN_IS_HASWELL || GEN_GEN >= 8
-static inline uint32_t
-mi_alu(uint32_t opcode, uint32_t operand1, uint32_t operand2)
-{
- struct GENX(MI_MATH_ALU_INSTRUCTION) instr = {
- .ALUOpcode = opcode,
- .Operand1 = operand1,
- .Operand2 = operand2,
- };
-
- uint32_t dw;
- GENX(MI_MATH_ALU_INSTRUCTION_pack)(NULL, &dw, &instr);
-
- return dw;
-}
-#endif
-
/* This is only really practical on haswell and above because it requires
* MI math in order to get it correct.
*/
{
const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
struct anv_cmd_state *state = &cmd_buffer->state;
+ struct anv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
vk_free(&cmd_buffer->pool->alloc, state->attachments);
next_state.offset += ss_stride;
next_state.map += ss_stride;
+ const VkRenderPassAttachmentBeginInfoKHR *begin_attachment =
+ vk_find_struct_const(begin, RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR);
+
+ if (begin && !begin_attachment)
+ assert(pass->attachment_count == framebuffer->attachment_count);
+
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
if (vk_format_is_color(pass->attachments[i].format)) {
state->attachments[i].color.state = next_state;
next_state.offset += ss_stride;
next_state.map += ss_stride;
}
+
+ if (begin_attachment && begin_attachment->attachmentCount != 0) {
+ assert(begin_attachment->attachmentCount == pass->attachment_count);
+ ANV_FROM_HANDLE(anv_image_view, iview, begin_attachment->pAttachments[i]);
+ cmd_buffer->state.attachments[i].image_view = iview;
+ } else if (framebuffer && i < framebuffer->attachment_count) {
+ cmd_buffer->state.attachments[i].image_view = framebuffer->attachments[i];
+ }
}
assert(next_state.offset == state->render_pass_states.offset +
state->render_pass_states.alloc_size);
if (begin) {
- ANV_FROM_HANDLE(anv_framebuffer, framebuffer, begin->framebuffer);
- assert(pass->attachment_count == framebuffer->attachment_count);
-
isl_null_fill_state(isl_dev, state->null_surface_state.map,
isl_extent3d(framebuffer->width,
framebuffer->height,
if (clear_aspects)
state->attachments[i].clear_value = begin->pClearValues[i];
- struct anv_image_view *iview = framebuffer->attachments[i];
+ struct anv_image_view *iview = cmd_buffer->state.attachments[i].image_view;
anv_assert(iview->vk_format == att->format);
const uint32_t num_layers = iview->planes[0].isl.array_len;
assert(!urb_low_bw || cfg->n[GEN_L3P_URB] == cfg->n[GEN_L3P_SLM]);
/* Minimum number of ways that can be allocated to the URB. */
- MAYBE_UNUSED const unsigned n0_urb = devinfo->is_baytrail ? 32 : 0;
+ const unsigned n0_urb = devinfo->is_baytrail ? 32 : 0;
assert(cfg->n[GEN_L3P_URB] >= n0_urb);
uint32_t l3sqcr1, l3cr2, l3cr3;
genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
- MAYBE_UNUSED VkResult result;
+ VkResult result;
assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
#endif
+#if GEN_GEN == 9
+ if (pipeline == _3D) {
+ /* There is a mid-object preemption workaround which requires you to
+ * re-emit MEDIA_VFE_STATE after switching from GPGPU to 3D. However,
+ * even without preemption, we have issues with geometry flickering when
+ * GPGPU and 3D are back-to-back and this seems to fix it. We don't
+ * really know why.
+ */
+ const uint32_t subslices =
+ MAX2(cmd_buffer->device->instance->physicalDevice.subslice_total, 1);
+ anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_VFE_STATE), vfe) {
+ vfe.MaximumNumberofThreads =
+ devinfo->max_cs_threads * subslices - 1;
+ vfe.NumberofURBEntries = 2;
+ vfe.URBEntryAllocationSize = 2;
+ }
+ }
+#endif
+
/* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
* PIPELINE_SELECT [DevBWR+]":
*
assert(a < cmd_state->pass->attachment_count);
struct anv_attachment_state *att_state = &cmd_state->attachments[a];
- struct anv_image_view *iview = fb->attachments[a];
+ struct anv_image_view *iview = cmd_state->attachments[a].image_view;
const struct anv_image *image = iview->image;
/* A resolve is necessary before use as an input attachment if the clear
cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
}
- struct anv_image_view *src_iview = fb->attachments[src_att];
- struct anv_image_view *dst_iview = fb->attachments[dst_att];
+ struct anv_image_view *src_iview = cmd_state->attachments[src_att].image_view;
+ struct anv_image_view *dst_iview = cmd_state->attachments[dst_att].image_view;
const VkRect2D render_area = cmd_buffer->state.render_area;
cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
}
- struct anv_image_view *src_iview = fb->attachments[src_att];
- struct anv_image_view *dst_iview = fb->attachments[dst_att];
+ struct anv_image_view *src_iview = cmd_state->attachments[src_att].image_view;
+ struct anv_image_view *dst_iview = cmd_state->attachments[dst_att].image_view;
const VkRect2D render_area = cmd_buffer->state.render_area;
assert(a != VK_ATTACHMENT_UNUSED);
struct anv_attachment_state *att_state = &cmd_state->attachments[a];
- struct anv_image_view *iview = fb->attachments[a];
+ struct anv_image_view *iview = cmd_state->attachments[a].image_view;;
const struct anv_image *image = iview->image;
if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
assert(a < cmd_state->pass->attachment_count);
struct anv_attachment_state *att_state = &cmd_state->attachments[a];
- struct anv_image_view *iview = fb->attachments[a];
+ struct anv_image_view *iview = cmd_state->attachments[a].image_view;
const struct anv_image *image = iview->image;
if ((image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) &&
* SRGB view & a UNORM image).
*/
if (fast_clear_type != ANV_FAST_CLEAR_NONE) {
- anv_perf_warn(cmd_buffer->device->instance, fb,
+ anv_perf_warn(cmd_buffer->device->instance, iview,
"Doing a partial resolve to get rid of clear color at the "
"end of a renderpass due to an image/view format mismatch");
cmd_buffer->state.hiz_enabled = false;
#ifndef NDEBUG
- anv_dump_add_framebuffer(cmd_buffer, cmd_buffer->state.framebuffer);
+ anv_dump_add_attachments(cmd_buffer);
#endif
/* Remove references to render pass specific state. This enables us to