void genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer);
void genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer);
-void genX(setup_pipeline_l3_config)(struct anv_pipeline *pipeline);
-
void genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
const struct anv_pipeline *pipeline);
#include <fcntl.h>
#include "util/mesa-sha1.h"
+#include "common/gen_l3_config.h"
#include "anv_private.h"
#include "brw_nir.h"
#include "anv_nir.h"
return VK_SUCCESS;
}
-
-void
-anv_setup_pipeline_l3_config(struct anv_pipeline *pipeline)
-{
- const struct gen_device_info *devinfo = &pipeline->device->info;
- switch (devinfo->gen) {
- case 7:
- if (devinfo->is_haswell)
- gen75_setup_pipeline_l3_config(pipeline);
- else
- gen7_setup_pipeline_l3_config(pipeline);
- break;
- case 8:
- gen8_setup_pipeline_l3_config(pipeline);
- break;
- case 9:
- gen9_setup_pipeline_l3_config(pipeline);
- break;
- default:
- unreachable("unsupported gen\n");
- }
-}
-
void
anv_compute_urb_partition(struct anv_pipeline *pipeline)
{
}
}
+/**
+ * Calculate the desired L3 partitioning based on the current state of the
+ * pipeline. For now this simply returns the conservative defaults calculated
+ * by get_default_l3_weights(), but we could probably do better by gathering
+ * more statistics from the pipeline state (e.g. guess of expected URB usage
+ * and bound surfaces), or by using feed-back from performance counters.
+ */
+void
+anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
+{
+ const struct gen_device_info *devinfo = &pipeline->device->info;
+
+ const struct gen_l3_weights w =
+ gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm);
+
+ pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
+ pipeline->urb.total_size =
+ gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
+}
+
VkResult
anv_pipeline_init(struct anv_pipeline *pipeline,
struct anv_device *device,
assert(extra->disable_vs);
}
- anv_setup_pipeline_l3_config(pipeline);
+ anv_pipeline_setup_l3_config(pipeline, false);
anv_compute_urb_partition(pipeline);
const VkPipelineVertexInputStateCreateInfo *vi_info =
anv_compute_urb_partition(struct anv_pipeline *pipeline);
void
-anv_setup_pipeline_l3_config(struct anv_pipeline *pipeline);
+anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm);
/**
* Subsurface of an anv_image.
#include "genxml/gen_macros.h"
#include "genxml/genX_pack.h"
-/**
- * Calculate the desired L3 partitioning based on the current state of the
- * pipeline. For now this simply returns the conservative defaults calculated
- * by get_default_l3_weights(), but we could probably do better by gathering
- * more statistics from the pipeline state (e.g. guess of expected URB usage
- * and bound surfaces), or by using feed-back from performance counters.
- */
-static struct gen_l3_weights
-get_pipeline_state_l3_weights(const struct anv_pipeline *pipeline)
-{
- bool needs_dc = false, needs_slm = false;
-
- for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
- if (!anv_pipeline_has_stage(pipeline, i))
- continue;
-
- const struct brw_stage_prog_data *prog_data =
- anv_shader_bin_get_prog_data(pipeline->shaders[i]);
-
- needs_dc |= pipeline->needs_data_cache;
- needs_slm |= prog_data->total_shared;
- }
-
- return gen_get_default_l3_weights(&pipeline->device->info,
- needs_dc, needs_slm);
-}
-
#define emit_lri(batch, reg, imm) \
anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) { \
lri.RegisterOffset = __anv_reg_num(reg); \
}
-void
-genX(setup_pipeline_l3_config)(struct anv_pipeline *pipeline)
-{
- const struct gen_l3_weights w = get_pipeline_state_l3_weights(pipeline);
- const struct gen_device_info *devinfo = &pipeline->device->info;
-
- pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
- pipeline->urb.total_size =
- gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
-}
-
void
genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
const struct anv_pipeline *pipeline)
pipeline->use_repclear = false;
- anv_setup_pipeline_l3_config(pipeline);
-
const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
+ anv_pipeline_setup_l3_config(pipeline, cs_prog_data->base.total_shared > 0);
+
uint32_t group_size = cs_prog_data->local_size[0] *
cs_prog_data->local_size[1] * cs_prog_data->local_size[2];
uint32_t remainder = group_size & (cs_prog_data->simd_size - 1);