buf->dirty_mask = 0;
}
+
+void
+panfrost_emit_shared_memory(struct panfrost_batch *batch,
+ const struct pipe_grid_info *info,
+ struct midgard_payload_vertex_tiler *vtp)
+{
+ struct panfrost_context *ctx = batch->ctx;
+ struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
+ struct panfrost_shader_state *ss = &all->variants[all->active_variant];
+ unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
+ 128));
+ unsigned shared_size = single_size * info->grid[0] * info->grid[1] *
+ info->grid[2] * 4;
+ struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
+ shared_size,
+ 1);
+
+ struct mali_shared_memory shared = {
+ .shared_memory = bo->gpu,
+ .shared_workgroup_count =
+ util_logbase2_ceil(info->grid[0]) +
+ util_logbase2_ceil(info->grid[1]) +
+ util_logbase2_ceil(info->grid[2]),
+ .shared_unk1 = 0x2,
+ .shared_shift = util_logbase2(single_size) - 1
+ };
+
+ vtp->postfix.shared_memory = panfrost_upload_transient(batch, &shared,
+ sizeof(shared));
+}
#define __PAN_CMDSTREAM_H__
#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
#include "panfrost-job.h"
enum pipe_shader_type stage,
struct midgard_payload_vertex_tiler *vtp);
+void
+panfrost_emit_shared_memory(struct panfrost_batch *batch,
+ const struct pipe_grid_info *info,
+ struct midgard_payload_vertex_tiler *vtp);
+
#endif /* __PAN_CMDSTREAM_H__ */
*/
#include "pan_context.h"
+#include "pan_cmdstream.h"
#include "pan_bo.h"
#include "util/u_memory.h"
#include "nir_serialize.h"
/* TODO: Stub */
struct midgard_payload_vertex_tiler *payload = &ctx->payloads[PIPE_SHADER_COMPUTE];
- struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
- struct panfrost_shader_state *ss = &all->variants[all->active_variant];
/* We implement OpenCL inputs as uniforms (or a UBO -- same thing), so
* reuse the graphics path for this by lowering to Gallium */
pipe->set_constant_buffer(pipe, PIPE_SHADER_COMPUTE, 0, &ubuf);
panfrost_emit_for_draw(ctx, false);
-
- unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size, 128));
- unsigned shared_size = single_size * info->grid[0] * info->grid[1] * info->grid[2] * 4;
-
- struct mali_shared_memory shared = {
- .shared_memory = panfrost_batch_get_shared_memory(batch, shared_size, 1)->gpu,
- .shared_workgroup_count =
- util_logbase2_ceil(info->grid[0]) +
- util_logbase2_ceil(info->grid[1]) +
- util_logbase2_ceil(info->grid[2]),
- .shared_unk1 = 0x2,
- .shared_shift = util_logbase2(single_size) - 1
- };
-
- payload->postfix.shared_memory =
- panfrost_upload_transient(batch, &shared, sizeof(shared));
+ panfrost_emit_shared_memory(batch, info, payload);
/* Invoke according to the grid info */