pipe_surface_reference(&nvc0->surfaces[s][i], NULL);
}
+ for (s = 0; s < 6; ++s)
+ for (i = 0; i < NVC0_MAX_BUFFERS; ++i)
+ pipe_resource_reference(&nvc0->buffers[s][i].buffer, NULL);
+
for (i = 0; i < nvc0->num_tfbbufs; ++i)
pipe_so_target_reference(&nvc0->tfbbuf[i], NULL);
if (bind & (PIPE_BIND_VERTEX_BUFFER |
PIPE_BIND_INDEX_BUFFER |
PIPE_BIND_CONSTANT_BUFFER |
+ PIPE_BIND_SHADER_BUFFER |
PIPE_BIND_STREAM_OUTPUT |
PIPE_BIND_COMMAND_ARGS_BUFFER |
PIPE_BIND_SAMPLER_VIEW)) {
}
}
}
+
+ for (s = 0; s < 5; ++s) {
+ for (i = 0; i < NVC0_MAX_BUFFERS; ++i) {
+ if (nvc0->buffers[s][i].buffer == res) {
+ nvc0->buffers_dirty[s] |= 1 << i;
+ nvc0->dirty |= NVC0_NEW_BUFFERS;
+ nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_BUF);
+ if (!--ref)
+ return ref;
+ }
+ }
+ }
}
return ref;
#define NVC0_NEW_SURFACES (1 << 23)
#define NVC0_NEW_MIN_SAMPLES (1 << 24)
#define NVC0_NEW_TESSFACTOR (1 << 25)
+#define NVC0_NEW_BUFFERS (1 << 26)
#define NVC0_NEW_CP_PROGRAM (1 << 0)
#define NVC0_NEW_CP_SURFACES (1 << 1)
#define NVC0_BIND_CB(s, i) (164 + 16 * (s) + (i))
#define NVC0_BIND_TFB 244
#define NVC0_BIND_SUF 245
-#define NVC0_BIND_SCREEN 246
-#define NVC0_BIND_TLS 247
-#define NVC0_BIND_3D_COUNT 248
+#define NVC0_BIND_BUF 246
+#define NVC0_BIND_SCREEN 247
+#define NVC0_BIND_TLS 249
+#define NVC0_BIND_3D_COUNT 250
/* compute bufctx (during launch_grid) */
#define NVC0_BIND_CP_CB(i) ( 0 + (i))
struct nvc0_blitctx *blit;
+ /* NOTE: some of these surfaces may reference buffers */
struct pipe_surface *surfaces[2][NVC0_MAX_SURFACE_SLOTS];
uint16_t surfaces_dirty[2];
uint16_t surfaces_valid[2];
+ struct pipe_shader_buffer buffers[6][NVC0_MAX_BUFFERS];
+ uint32_t buffers_dirty[6];
+ uint32_t buffers_valid[6];
+
struct util_dynarray global_residents;
};
push->rsvd_kick = 5;
screen->base.vidmem_bindings |= PIPE_BIND_CONSTANT_BUFFER |
+ PIPE_BIND_SHADER_BUFFER |
PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER |
PIPE_BIND_COMMAND_ARGS_BUFFER;
screen->base.sysmem_bindings |=
PUSH_DATA (push, screen->tls->size);
BEGIN_NVC0(push, NVC0_3D(WARP_TEMP_ALLOC), 1);
PUSH_DATA (push, 0);
+ /* Reduce likelihood of collision with real buffers by placing the hole at
+ * the top of the 4G area. This will have to be dealt with for real
+ * eventually by blocking off that area from the VM.
+ */
BEGIN_NVC0(push, NVC0_3D(LOCAL_BASE), 1);
- PUSH_DATA (push, 0);
+ PUSH_DATA (push, 0xff << 24);
if (screen->eng3d->oclass < GM107_3D_CLASS) {
ret = nouveau_bo_new(dev, NV_VRAM_DOMAIN(&screen->base), 1 << 17, 1 << 20, NULL,
unsigned start_slot, unsigned count,
struct pipe_image_view **views)
{
-#if 0
- nvc0_bind_surfaces_range(nvc0_context(pipe), 0, start, nr, views);
+}
+
+static void
+nvc0_bind_buffers_range(struct nvc0_context *nvc0, const unsigned t,
+ unsigned start, unsigned nr,
+ struct pipe_shader_buffer *pbuffers)
+{
+ const unsigned end = start + nr;
+ const unsigned mask = ((1 << nr) - 1) << start;
+ unsigned i;
+
+ assert(t < 5);
+
+ if (pbuffers) {
+ for (i = start; i < end; ++i) {
+ const unsigned p = i - start;
+ if (pbuffers[p].buffer)
+ nvc0->buffers_valid[t] |= (1 << i);
+ else
+ nvc0->buffers_valid[t] &= ~(1 << i);
+ nvc0->buffers[t][i].buffer_offset = pbuffers[p].buffer_offset;
+ nvc0->buffers[t][i].buffer_size = pbuffers[p].buffer_size;
+ pipe_resource_reference(&nvc0->buffers[t][i].buffer, pbuffers[p].buffer);
+ }
+ } else {
+ for (i = start; i < end; ++i)
+ pipe_resource_reference(&nvc0->buffers[t][i].buffer, NULL);
+ nvc0->buffers_valid[t] &= ~mask;
+ }
+ nvc0->buffers_dirty[t] |= mask;
+
+ nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_BUF);
+}
+
+static void
+nvc0_set_shader_buffers(struct pipe_context *pipe,
+ unsigned shader,
+ unsigned start, unsigned nr,
+ struct pipe_shader_buffer *buffers)
+{
+ const unsigned s = nvc0_shader_stage(shader);
+ nvc0_bind_buffers_range(nvc0_context(pipe), s, start, nr, buffers);
- nvc0_context(pipe)->dirty |= NVC0_NEW_SURFACES;
-#endif
+ nvc0_context(pipe)->dirty |= NVC0_NEW_BUFFERS;
}
static inline void
pipe->set_global_binding = nvc0_set_global_bindings;
pipe->set_compute_resources = nvc0_set_compute_resources;
pipe->set_shader_images = nvc0_set_shader_images;
+ pipe->set_shader_buffers = nvc0_set_shader_buffers;
nvc0->sample_mask = ~0;
nvc0->min_samples = 1;
}
}
+static void
+nvc0_validate_buffers(struct nvc0_context *nvc0)
+{
+ struct nouveau_pushbuf *push = nvc0->base.pushbuf;
+ int i, s;
+
+ for (s = 0; s < 5; s++) {
+ BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
+ PUSH_DATA (push, 1024);
+ PUSH_DATAh(push, nvc0->screen->uniform_bo->offset + (5 << 16) + (s << 10));
+ PUSH_DATA (push, nvc0->screen->uniform_bo->offset + (5 << 16) + (s << 10));
+ BEGIN_1IC0(push, NVC0_3D(CB_POS), 1 + 4 * NVC0_MAX_BUFFERS);
+ PUSH_DATA (push, 512);
+ for (i = 0; i < NVC0_MAX_BUFFERS; i++) {
+ if (nvc0->buffers[s][i].buffer) {
+ struct nv04_resource *res =
+ nv04_resource(nvc0->buffers[s][i].buffer);
+ PUSH_DATA (push, res->address + nvc0->buffers[s][i].buffer_offset);
+ PUSH_DATAh(push, res->address + nvc0->buffers[s][i].buffer_offset);
+ PUSH_DATA (push, nvc0->buffers[s][i].buffer_size);
+ PUSH_DATA (push, 0);
+ BCTX_REFN(nvc0->bufctx_3d, BUF, res, RDWR);
+ } else {
+ PUSH_DATA (push, 0);
+ PUSH_DATA (push, 0);
+ PUSH_DATA (push, 0);
+ PUSH_DATA (push, 0);
+ }
+ }
+ }
+
+}
+
static void
nvc0_validate_sample_mask(struct nvc0_context *nvc0)
{
{ nve4_set_tex_handles, NVC0_NEW_TEXTURES | NVC0_NEW_SAMPLERS },
{ nvc0_vertex_arrays_validate, NVC0_NEW_VERTEX | NVC0_NEW_ARRAYS },
{ nvc0_validate_surfaces, NVC0_NEW_SURFACES },
+ { nvc0_validate_buffers, NVC0_NEW_BUFFERS },
{ nvc0_idxbuf_validate, NVC0_NEW_IDXBUF },
{ nvc0_tfb_validate, NVC0_NEW_TFB_TARGETS | NVC0_NEW_GMTYPROG },
{ nvc0_validate_min_samples, NVC0_NEW_MIN_SAMPLES },