freedreno: Fix missing rsc->seqno updates
[mesa.git] / src / gallium / drivers / freedreno / freedreno_state.c
1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "pipe/p_state.h"
28 #include "util/u_dual_blend.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_helpers.h"
32
33 #include "freedreno_state.h"
34 #include "freedreno_context.h"
35 #include "freedreno_resource.h"
36 #include "freedreno_texture.h"
37 #include "freedreno_gmem.h"
38 #include "freedreno_query_hw.h"
39 #include "freedreno_util.h"
40
41 /* All the generic state handling.. In case of CSO's that are specific
42 * to the GPU version, when the bind and the delete are common they can
43 * go in here.
44 */
45
46 static void
47 fd_set_blend_color(struct pipe_context *pctx,
48 const struct pipe_blend_color *blend_color)
49 {
50 struct fd_context *ctx = fd_context(pctx);
51 ctx->blend_color = *blend_color;
52 ctx->dirty |= FD_DIRTY_BLEND_COLOR;
53 }
54
55 static void
56 fd_set_stencil_ref(struct pipe_context *pctx,
57 const struct pipe_stencil_ref *stencil_ref)
58 {
59 struct fd_context *ctx = fd_context(pctx);
60 ctx->stencil_ref =* stencil_ref;
61 ctx->dirty |= FD_DIRTY_STENCIL_REF;
62 }
63
64 static void
65 fd_set_clip_state(struct pipe_context *pctx,
66 const struct pipe_clip_state *clip)
67 {
68 struct fd_context *ctx = fd_context(pctx);
69 ctx->ucp = *clip;
70 ctx->dirty |= FD_DIRTY_UCP;
71 }
72
73 static void
74 fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
75 {
76 struct fd_context *ctx = fd_context(pctx);
77 ctx->sample_mask = (uint16_t)sample_mask;
78 ctx->dirty |= FD_DIRTY_SAMPLE_MASK;
79 }
80
81 static void
82 fd_set_min_samples(struct pipe_context *pctx, unsigned min_samples)
83 {
84 struct fd_context *ctx = fd_context(pctx);
85 ctx->min_samples = min_samples;
86 ctx->dirty |= FD_DIRTY_MIN_SAMPLES;
87 }
88
89 /* notes from calim on #dri-devel:
90 * index==0 will be non-UBO (ie. glUniformXYZ()) all packed together padded
91 * out to vec4's
92 * I should be able to consider that I own the user_ptr until the next
93 * set_constant_buffer() call, at which point I don't really care about the
94 * previous values.
95 * index>0 will be UBO's.. well, I'll worry about that later
96 */
97 static void
98 fd_set_constant_buffer(struct pipe_context *pctx,
99 enum pipe_shader_type shader, uint index,
100 const struct pipe_constant_buffer *cb)
101 {
102 struct fd_context *ctx = fd_context(pctx);
103 struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];
104
105 util_copy_constant_buffer(&so->cb[index], cb);
106
107 /* Note that gallium frontends can unbind constant buffers by
108 * passing NULL here.
109 */
110 if (unlikely(!cb)) {
111 so->enabled_mask &= ~(1 << index);
112 return;
113 }
114
115 so->enabled_mask |= 1 << index;
116 ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_CONST;
117 ctx->dirty |= FD_DIRTY_CONST;
118
119 fd_resource_set_usage(cb->buffer, FD_DIRTY_CONST);
120 }
121
122 static void
123 fd_set_shader_buffers(struct pipe_context *pctx,
124 enum pipe_shader_type shader,
125 unsigned start, unsigned count,
126 const struct pipe_shader_buffer *buffers,
127 unsigned writable_bitmask)
128 {
129 struct fd_context *ctx = fd_context(pctx);
130 struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[shader];
131 const unsigned modified_bits = u_bit_consecutive(start, count);
132
133 so->enabled_mask &= ~modified_bits;
134 so->writable_mask &= ~modified_bits;
135 so->writable_mask |= writable_bitmask << start;
136
137 for (unsigned i = 0; i < count; i++) {
138 unsigned n = i + start;
139 struct pipe_shader_buffer *buf = &so->sb[n];
140
141 if (buffers && buffers[i].buffer) {
142 if ((buf->buffer == buffers[i].buffer) &&
143 (buf->buffer_offset == buffers[i].buffer_offset) &&
144 (buf->buffer_size == buffers[i].buffer_size))
145 continue;
146
147 buf->buffer_offset = buffers[i].buffer_offset;
148 buf->buffer_size = buffers[i].buffer_size;
149 pipe_resource_reference(&buf->buffer, buffers[i].buffer);
150
151 fd_resource_set_usage(buffers[i].buffer, FD_DIRTY_SSBO);
152
153 so->enabled_mask |= BIT(n);
154 } else {
155 pipe_resource_reference(&buf->buffer, NULL);
156 }
157 }
158
159 ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_SSBO;
160 ctx->dirty |= FD_DIRTY_SSBO;
161 }
162
163 void
164 fd_set_shader_images(struct pipe_context *pctx,
165 enum pipe_shader_type shader,
166 unsigned start, unsigned count,
167 const struct pipe_image_view *images)
168 {
169 struct fd_context *ctx = fd_context(pctx);
170 struct fd_shaderimg_stateobj *so = &ctx->shaderimg[shader];
171
172 unsigned mask = 0;
173
174 if (images) {
175 for (unsigned i = 0; i < count; i++) {
176 unsigned n = i + start;
177 struct pipe_image_view *buf = &so->si[n];
178
179 if ((buf->resource == images[i].resource) &&
180 (buf->format == images[i].format) &&
181 (buf->access == images[i].access) &&
182 !memcmp(&buf->u, &images[i].u, sizeof(buf->u)))
183 continue;
184
185 mask |= BIT(n);
186 util_copy_image_view(buf, &images[i]);
187
188 if (buf->resource) {
189 fd_resource_set_usage(buf->resource, FD_DIRTY_IMAGE);
190 so->enabled_mask |= BIT(n);
191 } else {
192 so->enabled_mask &= ~BIT(n);
193 }
194 }
195 } else {
196 mask = (BIT(count) - 1) << start;
197
198 for (unsigned i = 0; i < count; i++) {
199 unsigned n = i + start;
200 struct pipe_image_view *img = &so->si[n];
201
202 pipe_resource_reference(&img->resource, NULL);
203 }
204
205 so->enabled_mask &= ~mask;
206 }
207
208 ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_IMAGE;
209 ctx->dirty |= FD_DIRTY_IMAGE;
210 }
211
212 static void
213 fd_set_framebuffer_state(struct pipe_context *pctx,
214 const struct pipe_framebuffer_state *framebuffer)
215 {
216 struct fd_context *ctx = fd_context(pctx);
217 struct pipe_framebuffer_state *cso;
218
219 DBG("%ux%u, %u layers, %u samples",
220 framebuffer->width, framebuffer->height,
221 framebuffer->layers, framebuffer->samples);
222
223 cso = &ctx->framebuffer;
224
225 if (util_framebuffer_state_equal(cso, framebuffer))
226 return;
227
228 util_copy_framebuffer_state(cso, framebuffer);
229
230 cso->samples = util_framebuffer_get_num_samples(cso);
231
232 if (ctx->screen->reorder) {
233 struct fd_batch *old_batch = NULL;
234
235 fd_batch_reference(&old_batch, ctx->batch);
236
237 if (likely(old_batch))
238 fd_batch_set_stage(old_batch, FD_STAGE_NULL);
239
240 fd_batch_reference(&ctx->batch, NULL);
241 fd_context_all_dirty(ctx);
242
243 if (old_batch && old_batch->blit && !old_batch->back_blit) {
244 /* for blits, there is not really much point in hanging on
245 * to the uncommitted batch (ie. you probably don't blit
246 * multiple times to the same surface), so we might as
247 * well go ahead and flush this one:
248 */
249 fd_batch_flush(old_batch);
250 }
251
252 fd_batch_reference(&old_batch, NULL);
253 } else if (ctx->batch) {
254 DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
255 framebuffer->cbufs[0], framebuffer->zsbuf);
256 fd_batch_flush(ctx->batch);
257 }
258
259 ctx->dirty |= FD_DIRTY_FRAMEBUFFER;
260
261 ctx->disabled_scissor.minx = 0;
262 ctx->disabled_scissor.miny = 0;
263 ctx->disabled_scissor.maxx = cso->width;
264 ctx->disabled_scissor.maxy = cso->height;
265
266 ctx->dirty |= FD_DIRTY_SCISSOR;
267 }
268
269 static void
270 fd_set_polygon_stipple(struct pipe_context *pctx,
271 const struct pipe_poly_stipple *stipple)
272 {
273 struct fd_context *ctx = fd_context(pctx);
274 ctx->stipple = *stipple;
275 ctx->dirty |= FD_DIRTY_STIPPLE;
276 }
277
278 static void
279 fd_set_scissor_states(struct pipe_context *pctx,
280 unsigned start_slot,
281 unsigned num_scissors,
282 const struct pipe_scissor_state *scissor)
283 {
284 struct fd_context *ctx = fd_context(pctx);
285
286 ctx->scissor = *scissor;
287 ctx->dirty |= FD_DIRTY_SCISSOR;
288 }
289
290 static void
291 fd_set_viewport_states(struct pipe_context *pctx,
292 unsigned start_slot,
293 unsigned num_viewports,
294 const struct pipe_viewport_state *viewport)
295 {
296 struct fd_context *ctx = fd_context(pctx);
297 struct pipe_scissor_state *scissor = &ctx->viewport_scissor;
298 float minx, miny, maxx, maxy;
299
300 ctx->viewport = *viewport;
301
302 /* see si_get_scissor_from_viewport(): */
303
304 /* Convert (-1, -1) and (1, 1) from clip space into window space. */
305 minx = -viewport->scale[0] + viewport->translate[0];
306 miny = -viewport->scale[1] + viewport->translate[1];
307 maxx = viewport->scale[0] + viewport->translate[0];
308 maxy = viewport->scale[1] + viewport->translate[1];
309
310 /* Handle inverted viewports. */
311 if (minx > maxx) {
312 swap(minx, maxx);
313 }
314 if (miny > maxy) {
315 swap(miny, maxy);
316 }
317
318 debug_assert(miny >= 0);
319 debug_assert(maxy >= 0);
320
321 /* Convert to integer and round up the max bounds. */
322 scissor->minx = minx;
323 scissor->miny = miny;
324 scissor->maxx = ceilf(maxx);
325 scissor->maxy = ceilf(maxy);
326
327 ctx->dirty |= FD_DIRTY_VIEWPORT;
328 }
329
330 static void
331 fd_set_vertex_buffers(struct pipe_context *pctx,
332 unsigned start_slot, unsigned count,
333 const struct pipe_vertex_buffer *vb)
334 {
335 struct fd_context *ctx = fd_context(pctx);
336 struct fd_vertexbuf_stateobj *so = &ctx->vtx.vertexbuf;
337 int i;
338
339 /* on a2xx, pitch is encoded in the vtx fetch instruction, so
340 * we need to mark VTXSTATE as dirty as well to trigger patching
341 * and re-emitting the vtx shader:
342 */
343 if (ctx->screen->gpu_id < 300) {
344 for (i = 0; i < count; i++) {
345 bool new_enabled = vb && vb[i].buffer.resource;
346 bool old_enabled = so->vb[i].buffer.resource != NULL;
347 uint32_t new_stride = vb ? vb[i].stride : 0;
348 uint32_t old_stride = so->vb[i].stride;
349 if ((new_enabled != old_enabled) || (new_stride != old_stride)) {
350 ctx->dirty |= FD_DIRTY_VTXSTATE;
351 break;
352 }
353 }
354 }
355
356 util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot, count);
357 so->count = util_last_bit(so->enabled_mask);
358
359 if (!vb)
360 return;
361
362 ctx->dirty |= FD_DIRTY_VTXBUF;
363
364 for (unsigned i = 0; i < count; i++) {
365 assert(!vb[i].is_user_buffer);
366 fd_resource_set_usage(vb[i].buffer.resource, FD_DIRTY_VTXBUF);
367 }
368 }
369
370 static void
371 fd_blend_state_bind(struct pipe_context *pctx, void *hwcso)
372 {
373 struct fd_context *ctx = fd_context(pctx);
374 struct pipe_blend_state *cso = hwcso;
375 bool old_is_dual = ctx->blend ?
376 ctx->blend->rt[0].blend_enable && util_blend_state_is_dual(ctx->blend, 0) :
377 false;
378 bool new_is_dual = cso ?
379 cso->rt[0].blend_enable && util_blend_state_is_dual(cso, 0) :
380 false;
381 ctx->blend = hwcso;
382 ctx->dirty |= FD_DIRTY_BLEND;
383 if (old_is_dual != new_is_dual)
384 ctx->dirty |= FD_DIRTY_BLEND_DUAL;
385 }
386
387 static void
388 fd_blend_state_delete(struct pipe_context *pctx, void *hwcso)
389 {
390 FREE(hwcso);
391 }
392
393 static void
394 fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
395 {
396 struct fd_context *ctx = fd_context(pctx);
397 struct pipe_scissor_state *old_scissor = fd_context_get_scissor(ctx);
398 bool discard = ctx->rasterizer && ctx->rasterizer->rasterizer_discard;
399
400 ctx->rasterizer = hwcso;
401 ctx->dirty |= FD_DIRTY_RASTERIZER;
402
403 if (ctx->rasterizer && ctx->rasterizer->scissor) {
404 ctx->current_scissor = &ctx->scissor;
405 } else {
406 ctx->current_scissor = &ctx->disabled_scissor;
407 }
408
409 /* if scissor enable bit changed we need to mark scissor
410 * state as dirty as well:
411 * NOTE: we can do a shallow compare, since we only care
412 * if it changed to/from &ctx->disable_scissor
413 */
414 if (old_scissor != fd_context_get_scissor(ctx))
415 ctx->dirty |= FD_DIRTY_SCISSOR;
416
417 if (ctx->rasterizer && (discard != ctx->rasterizer->rasterizer_discard))
418 ctx->dirty |= FD_DIRTY_RASTERIZER_DISCARD;
419 }
420
421 static void
422 fd_rasterizer_state_delete(struct pipe_context *pctx, void *hwcso)
423 {
424 FREE(hwcso);
425 }
426
427 static void
428 fd_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
429 {
430 struct fd_context *ctx = fd_context(pctx);
431 ctx->zsa = hwcso;
432 ctx->dirty |= FD_DIRTY_ZSA;
433 }
434
435 static void
436 fd_zsa_state_delete(struct pipe_context *pctx, void *hwcso)
437 {
438 FREE(hwcso);
439 }
440
441 static void *
442 fd_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
443 const struct pipe_vertex_element *elements)
444 {
445 struct fd_vertex_stateobj *so = CALLOC_STRUCT(fd_vertex_stateobj);
446
447 if (!so)
448 return NULL;
449
450 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
451 so->num_elements = num_elements;
452
453 return so;
454 }
455
456 static void
457 fd_vertex_state_delete(struct pipe_context *pctx, void *hwcso)
458 {
459 FREE(hwcso);
460 }
461
462 static void
463 fd_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
464 {
465 struct fd_context *ctx = fd_context(pctx);
466 ctx->vtx.vtx = hwcso;
467 ctx->dirty |= FD_DIRTY_VTXSTATE;
468 }
469
470 static struct pipe_stream_output_target *
471 fd_create_stream_output_target(struct pipe_context *pctx,
472 struct pipe_resource *prsc, unsigned buffer_offset,
473 unsigned buffer_size)
474 {
475 struct pipe_stream_output_target *target;
476 struct fd_resource *rsc = fd_resource(prsc);
477
478 target = CALLOC_STRUCT(pipe_stream_output_target);
479 if (!target)
480 return NULL;
481
482 pipe_reference_init(&target->reference, 1);
483 pipe_resource_reference(&target->buffer, prsc);
484
485 target->context = pctx;
486 target->buffer_offset = buffer_offset;
487 target->buffer_size = buffer_size;
488
489 assert(rsc->base.target == PIPE_BUFFER);
490 util_range_add(&rsc->base, &rsc->valid_buffer_range,
491 buffer_offset, buffer_offset + buffer_size);
492
493 return target;
494 }
495
496 static void
497 fd_stream_output_target_destroy(struct pipe_context *pctx,
498 struct pipe_stream_output_target *target)
499 {
500 pipe_resource_reference(&target->buffer, NULL);
501 FREE(target);
502 }
503
504 static void
505 fd_set_stream_output_targets(struct pipe_context *pctx,
506 unsigned num_targets, struct pipe_stream_output_target **targets,
507 const unsigned *offsets)
508 {
509 struct fd_context *ctx = fd_context(pctx);
510 struct fd_streamout_stateobj *so = &ctx->streamout;
511 unsigned i;
512
513 debug_assert(num_targets <= ARRAY_SIZE(so->targets));
514
515 for (i = 0; i < num_targets; i++) {
516 boolean changed = targets[i] != so->targets[i];
517 boolean reset = (offsets[i] != (unsigned)-1);
518
519 so->reset |= (reset << i);
520
521 if (!changed && !reset)
522 continue;
523
524 if (reset)
525 so->offsets[i] = offsets[i];
526
527 pipe_so_target_reference(&so->targets[i], targets[i]);
528 }
529
530 for (; i < so->num_targets; i++) {
531 pipe_so_target_reference(&so->targets[i], NULL);
532 }
533
534 so->num_targets = num_targets;
535
536 ctx->dirty |= FD_DIRTY_STREAMOUT;
537 }
538
539 static void
540 fd_bind_compute_state(struct pipe_context *pctx, void *state)
541 {
542 struct fd_context *ctx = fd_context(pctx);
543 ctx->compute = state;
544 ctx->dirty_shader[PIPE_SHADER_COMPUTE] |= FD_DIRTY_SHADER_PROG;
545 }
546
547 static void
548 fd_set_compute_resources(struct pipe_context *pctx,
549 unsigned start, unsigned count, struct pipe_surface **prscs)
550 {
551 // TODO
552 }
553
554 /* used by clover to bind global objects, returning the bo address
555 * via handles[n]
556 */
557 static void
558 fd_set_global_binding(struct pipe_context *pctx,
559 unsigned first, unsigned count, struct pipe_resource **prscs,
560 uint32_t **handles)
561 {
562 struct fd_context *ctx = fd_context(pctx);
563 struct fd_global_bindings_stateobj *so = &ctx->global_bindings;
564 unsigned mask = 0;
565
566 if (prscs) {
567 for (unsigned i = 0; i < count; i++) {
568 unsigned n = i + first;
569
570 mask |= BIT(n);
571
572 pipe_resource_reference(&so->buf[n], prscs[i]);
573
574 if (so->buf[n]) {
575 struct fd_resource *rsc = fd_resource(so->buf[n]);
576 uint64_t iova = fd_bo_get_iova(rsc->bo);
577 // TODO need to scream if iova > 32b or fix gallium API..
578 *handles[i] += iova;
579 }
580
581 if (prscs[i])
582 so->enabled_mask |= BIT(n);
583 else
584 so->enabled_mask &= ~BIT(n);
585 }
586 } else {
587 mask = (BIT(count) - 1) << first;
588
589 for (unsigned i = 0; i < count; i++) {
590 unsigned n = i + first;
591 pipe_resource_reference(&so->buf[n], NULL);
592 }
593
594 so->enabled_mask &= ~mask;
595 }
596
597 }
598
599 void
600 fd_state_init(struct pipe_context *pctx)
601 {
602 pctx->set_blend_color = fd_set_blend_color;
603 pctx->set_stencil_ref = fd_set_stencil_ref;
604 pctx->set_clip_state = fd_set_clip_state;
605 pctx->set_sample_mask = fd_set_sample_mask;
606 pctx->set_min_samples = fd_set_min_samples;
607 pctx->set_constant_buffer = fd_set_constant_buffer;
608 pctx->set_shader_buffers = fd_set_shader_buffers;
609 pctx->set_shader_images = fd_set_shader_images;
610 pctx->set_framebuffer_state = fd_set_framebuffer_state;
611 pctx->set_polygon_stipple = fd_set_polygon_stipple;
612 pctx->set_scissor_states = fd_set_scissor_states;
613 pctx->set_viewport_states = fd_set_viewport_states;
614
615 pctx->set_vertex_buffers = fd_set_vertex_buffers;
616
617 pctx->bind_blend_state = fd_blend_state_bind;
618 pctx->delete_blend_state = fd_blend_state_delete;
619
620 pctx->bind_rasterizer_state = fd_rasterizer_state_bind;
621 pctx->delete_rasterizer_state = fd_rasterizer_state_delete;
622
623 pctx->bind_depth_stencil_alpha_state = fd_zsa_state_bind;
624 pctx->delete_depth_stencil_alpha_state = fd_zsa_state_delete;
625
626 if (!pctx->create_vertex_elements_state)
627 pctx->create_vertex_elements_state = fd_vertex_state_create;
628 pctx->delete_vertex_elements_state = fd_vertex_state_delete;
629 pctx->bind_vertex_elements_state = fd_vertex_state_bind;
630
631 pctx->create_stream_output_target = fd_create_stream_output_target;
632 pctx->stream_output_target_destroy = fd_stream_output_target_destroy;
633 pctx->set_stream_output_targets = fd_set_stream_output_targets;
634
635 if (has_compute(fd_screen(pctx->screen))) {
636 pctx->bind_compute_state = fd_bind_compute_state;
637 pctx->set_compute_resources = fd_set_compute_resources;
638 pctx->set_global_binding = fd_set_global_binding;
639 }
640 }