*/
#include "iris_batch.h"
+#include "iris_binder.h"
#include "iris_bufmgr.h"
#include "iris_context.h"
batch->validation_list =
malloc(batch->exec_array_size * sizeof(batch->validation_list[0]));
+ batch->binder.bo = NULL;
+
batch->cache.render = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
batch->cache.depth = _mesa_set_create(NULL, _mesa_hash_pointer,
create_batch(batch);
assert(batch->bo->index == 0);
+ iris_destroy_binder(&batch->binder);
+ iris_init_binder(&batch->binder, batch->bo->bufmgr);
+
if (batch->state_sizes)
_mesa_hash_table_clear(batch->state_sizes, NULL);
#include <stdbool.h>
#include "i915_drm.h"
#include "common/gen_decoder.h"
+#include "iris_binder.h"
/* The kernel assumes batchbuffers are smaller than 256kB. */
#define MAX_BATCH_SIZE (256 * 1024)
/** Size of the primary batch if we've moved on to a secondary. */
unsigned primary_batch_size;
-
/** Last BO submitted to the hardware. Used for glFinish(). */
struct iris_bo *last_bo;
/** The amount of aperture space (in bytes) used by all exec_bos */
int aperture_space;
+ /** Binder (containing binding tables) */
+ struct iris_binder binder;
+
struct {
/**
* Set of struct brw_bo * that have been rendered to within this
#include "util/u_math.h"
#include "iris_binder.h"
#include "iris_bufmgr.h"
+#include "iris_context.h"
/* 64kb */
#define BINDER_SIZE (64 * 1024)
-void *
-iris_binder_reserve(struct iris_binder *binder, unsigned size,
- uint32_t *out_offset)
+/**
+ * Reserve a block of space in the binder.
+ */
+uint32_t
+iris_binder_reserve(struct iris_batch *batch, unsigned size)
{
- /* XXX: if we ever make this allocate a new BO, then make binder_reserve
- * return the BO, so at least verify use_pinned_bo gets the right one
- */
- /* XXX: Implement a real ringbuffer, for now just croak if run out */
- assert(size > 0);
- assert(binder->insert_point + size <= BINDER_SIZE);
+ struct iris_binder *binder = &batch->binder;
+ assert(size > 0);
assert((binder->insert_point % 64) == 0);
- *out_offset = binder->insert_point;
+
+ /* If we can't fit all stages in the binder, flush the batch which
+ * will cause us to gain a new empty binder.
+ */
+ if (binder->insert_point + size > BINDER_SIZE)
+ iris_batch_flush(batch);
+
+ uint32_t offset = binder->insert_point;
+
+ /* It had better fit now. */
+ assert(offset + size <= BINDER_SIZE);
binder->insert_point = align(binder->insert_point + size, 64);
- return binder->map + *out_offset;
+ iris_use_pinned_bo(batch, binder->bo, false);
+
+ return offset;
+}
+
+/**
+ * Reserve and record binder space for 3D pipeline shader stages.
+ */
+void
+iris_binder_reserve_3d(struct iris_batch *batch,
+ struct iris_compiled_shader **shaders)
+{
+ struct iris_binder *binder = &batch->binder;
+ unsigned total_size = 0;
+ unsigned sizes[MESA_SHADER_STAGES] = {};
+
+ for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
+ if (!shaders[stage])
+ continue;
+
+ const struct brw_stage_prog_data *prog_data =
+ (const void *) shaders[stage]->prog_data;
+
+ sizes[stage] = align(prog_data->binding_table.size_bytes, 64);
+ total_size += sizes[stage];
+ }
+
+ uint32_t offset = iris_binder_reserve(batch, total_size);
+
+ /* Assign space and record the current binding table. */
+ for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
+ binder->bt_offset[stage] = sizes[stage] > 0 ? offset : 0;
+ offset += sizes[stage];
+ }
}
void
#include <stdint.h>
#include <stdbool.h>
+#include "compiler/shader_enums.h"
struct iris_bo;
+struct iris_batch;
struct iris_bufmgr;
+struct iris_compiled_shader;
struct iris_binder
{
struct iris_bo *bo;
void *map;
- /* Insert new entries at this offset (in bytes) */
- unsigned insert_point;
+ /** Insert new entries at this offset (in bytes) */
+ uint32_t insert_point;
+
+ /**
+ * Last assigned offset for each shader stage's binding table.
+ * Zero is considered invalid and means there's no binding table.
+ */
+ uint32_t bt_offset[MESA_SHADER_STAGES];
};
void iris_init_binder(struct iris_binder *binder, struct iris_bufmgr *bufmgr);
void iris_destroy_binder(struct iris_binder *binder);
-void *iris_binder_reserve(struct iris_binder *binder, unsigned size,
- uint32_t *out_offset);
+uint32_t iris_binder_reserve(struct iris_batch *batch, unsigned size);
+void iris_binder_reserve_3d(struct iris_batch *batch,
+ struct iris_compiled_shader **shaders);
#endif
struct iris_context *ice = blorp_batch->blorp->driver_ctx;
struct iris_batch *batch = blorp_batch->driver_batch;
- uint32_t *bt_map = iris_binder_reserve(&ice->state.binder,
- num_entries * sizeof(uint32_t),
- bt_offset);
- iris_use_pinned_bo(batch, ice->state.binder.bo, false);
+ *bt_offset = iris_binder_reserve(batch, num_entries * sizeof(uint32_t));
+ uint32_t *bt_map = batch->binder.map + *bt_offset;
for (unsigned i = 0; i < num_entries; i++) {
surface_maps[i] = stream_state(batch, ice->state.surface_uploader,
u_upload_destroy(ctx->stream_uploader);
iris_destroy_program_cache(ice);
- iris_destroy_binder(&ice->state.binder);
u_upload_destroy(ice->state.surface_uploader);
u_upload_destroy(ice->state.dynamic_uploader);
iris_init_program_cache(ice);
- iris_init_binder(&ice->state.binder, screen->bufmgr);
-
ice->state.surface_uploader =
u_upload_create(&ice->ctx, 16384, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
IRIS_RESOURCE_FLAG_SURFACE_MEMZONE);
#include "intel/common/gen_debug.h"
#include "intel/compiler/brw_compiler.h"
#include "iris_batch.h"
-#include "iris_binder.h"
#include "iris_screen.h"
struct iris_bo;
unsigned num_samplers[MESA_SHADER_STAGES];
unsigned num_textures[MESA_SHADER_STAGES];
- struct iris_binder binder;
struct u_upload_mgr *surface_uploader;
// XXX: may want a separate uploader for "hey I made a CSO!" vs
// "I'm streaming this out at draw time and never want it again!"
iris_cache_sets_clear(batch);
// XXX: ^^^
-
iris_update_compiled_shaders(ice);
+ iris_binder_reserve_3d(batch, ice->shaders.prog);
ice->vtbl.upload_render_state(ice, batch, info);
// XXX: don't flush always
return isv->surface_state_offset;
}
+static void
+iris_populate_binding_table(struct iris_context *ice,
+ struct iris_batch *batch,
+ gl_shader_stage stage)
+{
+ const struct iris_binder *binder = &batch->binder;
+ struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+ if (!shader)
+ return;
+
+ // Surfaces:
+ // - pull constants
+ // - ubos/ssbos/abos
+ // - images
+ // - textures
+ // - render targets - write and read
+
+ struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
+ uint32_t *bt_map = binder->map + binder->bt_offset[stage];
+ int s = 0;
+
+ if (stage == MESA_SHADER_FRAGMENT) {
+ struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
+ for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
+ bt_map[s++] = use_surface(batch, cso_fb->cbufs[i], true);
+ }
+ }
+
+ assert(prog_data->binding_table.texture_start ==
+ (ice->state.num_textures[stage] ? s : 0xd0d0d0d0));
+
+ for (int i = 0; i < ice->state.num_textures[stage]; i++) {
+ struct iris_sampler_view *view = ice->state.textures[stage][i];
+ bt_map[s++] = use_sampler_view(batch, view);
+ }
+
+#if 0
+ // XXX: not implemented yet
+ assert(prog_data->binding_table.pull_constants_start == 0xd0d0d0d0);
+ assert(prog_data->binding_table.ubo_start == 0xd0d0d0d0);
+ assert(prog_data->binding_table.ssbo_start == 0xd0d0d0d0);
+ assert(prog_data->binding_table.image_start == 0xd0d0d0d0);
+ assert(prog_data->binding_table.shader_time_start == 0xd0d0d0d0);
+ //assert(prog_data->binding_table.plane_start[1] == 0xd0d0d0d0);
+ //assert(prog_data->binding_table.plane_start[2] == 0xd0d0d0d0);
+#endif
+}
+
static void
iris_upload_render_state(struct iris_context *ice,
struct iris_batch *batch,
}
}
- // Surfaces:
- // - pull constants
- // - ubos/ssbos/abos
- // - images
- // - textures
- // - render targets - write and read
- // XXX: 3DSTATE_BINDING_TABLE_POINTERS_XS
-
- for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- struct iris_compiled_shader *shader = ice->shaders.prog[stage];
- if (!shader) // XXX: dirty bits...also, emit a disable maybe?
- continue;
-
- struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
- uint32_t bt_offset = 0;
- uint32_t *bt_map = NULL;
- int s = 0;
-
- if (prog_data->binding_table.size_bytes != 0) {
- iris_use_pinned_bo(batch, ice->state.binder.bo, false);
- bt_map = iris_binder_reserve(&ice->state.binder,
- prog_data->binding_table.size_bytes,
- &bt_offset);
- }
+ if (1) { // XXX: DIRTY BINDINGS
+ const struct iris_binder *binder = &batch->binder;
- iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
- ptr._3DCommandSubOpcode = 38 + stage;
- ptr.PointertoVSBindingTable = bt_offset;
- }
-
- if (stage == MESA_SHADER_FRAGMENT) {
- struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
- for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
- bt_map[s++] = use_surface(batch, cso_fb->cbufs[i], true);
+ for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
+ iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
+ ptr._3DCommandSubOpcode = 38 + stage;
+ ptr.PointertoVSBindingTable = binder->bt_offset[stage];
}
}
- assert(prog_data->binding_table.texture_start ==
- (ice->state.num_textures[stage] ? s : 0xd0d0d0d0));
-
- for (int i = 0; i < ice->state.num_textures[stage]; i++) {
- struct iris_sampler_view *view = ice->state.textures[stage][i];
- bt_map[s++] = use_sampler_view(batch, view);
+ for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
+ iris_populate_binding_table(ice, batch, stage);
}
-
-#if 0
- // XXX: not implemented yet
- assert(prog_data->binding_table.pull_constants_start == 0xd0d0d0d0);
- assert(prog_data->binding_table.ubo_start == 0xd0d0d0d0);
- assert(prog_data->binding_table.ssbo_start == 0xd0d0d0d0);
- assert(prog_data->binding_table.image_start == 0xd0d0d0d0);
- assert(prog_data->binding_table.shader_time_start == 0xd0d0d0d0);
- //assert(prog_data->binding_table.plane_start[1] == 0xd0d0d0d0);
- //assert(prog_data->binding_table.plane_start[2] == 0xd0d0d0d0);
-#endif
}
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {