freedreno: simplify pctx->clear()
[mesa.git] / src / gallium / drivers / freedreno / freedreno_state.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include "pipe/p_state.h"
30 #include "util/u_dual_blend.h"
31 #include "util/u_string.h"
32 #include "util/u_memory.h"
33 #include "util/u_helpers.h"
34
35 #include "freedreno_state.h"
36 #include "freedreno_context.h"
37 #include "freedreno_resource.h"
38 #include "freedreno_texture.h"
39 #include "freedreno_gmem.h"
40 #include "freedreno_query_hw.h"
41 #include "freedreno_util.h"
42
43 /* All the generic state handling.. In case of CSO's that are specific
44 * to the GPU version, when the bind and the delete are common they can
45 * go in here.
46 */
47
48 static void
49 fd_set_blend_color(struct pipe_context *pctx,
50 const struct pipe_blend_color *blend_color)
51 {
52 struct fd_context *ctx = fd_context(pctx);
53 ctx->blend_color = *blend_color;
54 ctx->dirty |= FD_DIRTY_BLEND_COLOR;
55 }
56
57 static void
58 fd_set_stencil_ref(struct pipe_context *pctx,
59 const struct pipe_stencil_ref *stencil_ref)
60 {
61 struct fd_context *ctx = fd_context(pctx);
62 ctx->stencil_ref =* stencil_ref;
63 ctx->dirty |= FD_DIRTY_STENCIL_REF;
64 }
65
66 static void
67 fd_set_clip_state(struct pipe_context *pctx,
68 const struct pipe_clip_state *clip)
69 {
70 struct fd_context *ctx = fd_context(pctx);
71 ctx->ucp = *clip;
72 ctx->dirty |= FD_DIRTY_UCP;
73 }
74
75 static void
76 fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
77 {
78 struct fd_context *ctx = fd_context(pctx);
79 ctx->sample_mask = (uint16_t)sample_mask;
80 ctx->dirty |= FD_DIRTY_SAMPLE_MASK;
81 }
82
83 /* notes from calim on #dri-devel:
84 * index==0 will be non-UBO (ie. glUniformXYZ()) all packed together padded
85 * out to vec4's
86 * I should be able to consider that I own the user_ptr until the next
87 * set_constant_buffer() call, at which point I don't really care about the
88 * previous values.
89 * index>0 will be UBO's.. well, I'll worry about that later
90 */
91 static void
92 fd_set_constant_buffer(struct pipe_context *pctx,
93 enum pipe_shader_type shader, uint index,
94 const struct pipe_constant_buffer *cb)
95 {
96 struct fd_context *ctx = fd_context(pctx);
97 struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];
98
99 util_copy_constant_buffer(&so->cb[index], cb);
100
101 /* Note that the state tracker can unbind constant buffers by
102 * passing NULL here.
103 */
104 if (unlikely(!cb)) {
105 so->enabled_mask &= ~(1 << index);
106 return;
107 }
108
109 so->enabled_mask |= 1 << index;
110 ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_CONST;
111 ctx->dirty |= FD_DIRTY_CONST;
112 }
113
114 static void
115 fd_set_shader_buffers(struct pipe_context *pctx,
116 enum pipe_shader_type shader,
117 unsigned start, unsigned count,
118 const struct pipe_shader_buffer *buffers)
119 {
120 struct fd_context *ctx = fd_context(pctx);
121 struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[shader];
122 unsigned mask = 0;
123
124 if (buffers) {
125 for (unsigned i = 0; i < count; i++) {
126 unsigned n = i + start;
127 struct pipe_shader_buffer *buf = &so->sb[n];
128
129 if ((buf->buffer == buffers[i].buffer) &&
130 (buf->buffer_offset == buffers[i].buffer_offset) &&
131 (buf->buffer_size == buffers[i].buffer_size))
132 continue;
133
134 mask |= BIT(n);
135
136 buf->buffer_offset = buffers[i].buffer_offset;
137 buf->buffer_size = buffers[i].buffer_size;
138 pipe_resource_reference(&buf->buffer, buffers[i].buffer);
139
140 if (buf->buffer)
141 so->enabled_mask |= BIT(n);
142 else
143 so->enabled_mask &= ~BIT(n);
144 }
145 } else {
146 mask = (BIT(count) - 1) << start;
147
148 for (unsigned i = 0; i < count; i++) {
149 unsigned n = i + start;
150 struct pipe_shader_buffer *buf = &so->sb[n];
151
152 pipe_resource_reference(&buf->buffer, NULL);
153 }
154
155 so->enabled_mask &= ~mask;
156 }
157
158 ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_SSBO;
159 }
160
161 static void
162 fd_set_shader_images(struct pipe_context *pctx,
163 enum pipe_shader_type shader,
164 unsigned start, unsigned count,
165 const struct pipe_image_view *images)
166 {
167 struct fd_context *ctx = fd_context(pctx);
168 struct fd_shaderimg_stateobj *so = &ctx->shaderimg[shader];
169
170 unsigned mask = 0;
171
172 if (images) {
173 for (unsigned i = 0; i < count; i++) {
174 unsigned n = i + start;
175 struct pipe_image_view *buf = &so->si[n];
176
177 if ((buf->resource == images[i].resource) &&
178 (buf->format == images[i].format) &&
179 (buf->access == images[i].access) &&
180 !memcmp(&buf->u, &images[i].u, sizeof(buf->u)))
181 continue;
182
183 mask |= BIT(n);
184 util_copy_image_view(buf, &images[i]);
185
186 if (buf->resource)
187 so->enabled_mask |= BIT(n);
188 else
189 so->enabled_mask &= ~BIT(n);
190 }
191 } else {
192 mask = (BIT(count) - 1) << start;
193
194 for (unsigned i = 0; i < count; i++) {
195 unsigned n = i + start;
196 struct pipe_image_view *img = &so->si[n];
197
198 pipe_resource_reference(&img->resource, NULL);
199 }
200
201 so->enabled_mask &= ~mask;
202 }
203
204 ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_IMAGE;
205 }
206
207 static void
208 fd_set_framebuffer_state(struct pipe_context *pctx,
209 const struct pipe_framebuffer_state *framebuffer)
210 {
211 struct fd_context *ctx = fd_context(pctx);
212 struct pipe_framebuffer_state *cso;
213
214 if (ctx->screen->reorder) {
215 struct fd_batch *old_batch = NULL;
216
217 fd_batch_reference(&old_batch, ctx->batch);
218
219 if (likely(old_batch))
220 fd_batch_set_stage(old_batch, FD_STAGE_NULL);
221
222 fd_batch_reference(&ctx->batch, NULL);
223 fd_context_all_dirty(ctx);
224
225 if (old_batch && old_batch->blit && !old_batch->back_blit) {
226 /* for blits, there is not really much point in hanging on
227 * to the uncommitted batch (ie. you probably don't blit
228 * multiple times to the same surface), so we might as
229 * well go ahead and flush this one:
230 */
231 fd_batch_flush(old_batch, false, false);
232 }
233
234 fd_batch_reference(&old_batch, NULL);
235 } else {
236 DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
237 framebuffer->cbufs[0], framebuffer->zsbuf);
238 fd_batch_flush(ctx->batch, false, false);
239 }
240
241 cso = &ctx->framebuffer;
242
243 util_copy_framebuffer_state(cso, framebuffer);
244
245 cso->samples = util_framebuffer_get_num_samples(cso);
246
247 ctx->dirty |= FD_DIRTY_FRAMEBUFFER;
248
249 ctx->disabled_scissor.minx = 0;
250 ctx->disabled_scissor.miny = 0;
251 ctx->disabled_scissor.maxx = cso->width;
252 ctx->disabled_scissor.maxy = cso->height;
253
254 ctx->dirty |= FD_DIRTY_SCISSOR;
255 }
256
257 static void
258 fd_set_polygon_stipple(struct pipe_context *pctx,
259 const struct pipe_poly_stipple *stipple)
260 {
261 struct fd_context *ctx = fd_context(pctx);
262 ctx->stipple = *stipple;
263 ctx->dirty |= FD_DIRTY_STIPPLE;
264 }
265
266 static void
267 fd_set_scissor_states(struct pipe_context *pctx,
268 unsigned start_slot,
269 unsigned num_scissors,
270 const struct pipe_scissor_state *scissor)
271 {
272 struct fd_context *ctx = fd_context(pctx);
273
274 ctx->scissor = *scissor;
275 ctx->dirty |= FD_DIRTY_SCISSOR;
276 }
277
278 static void
279 fd_set_viewport_states(struct pipe_context *pctx,
280 unsigned start_slot,
281 unsigned num_viewports,
282 const struct pipe_viewport_state *viewport)
283 {
284 struct fd_context *ctx = fd_context(pctx);
285 ctx->viewport = *viewport;
286 ctx->dirty |= FD_DIRTY_VIEWPORT;
287 }
288
289 static void
290 fd_set_vertex_buffers(struct pipe_context *pctx,
291 unsigned start_slot, unsigned count,
292 const struct pipe_vertex_buffer *vb)
293 {
294 struct fd_context *ctx = fd_context(pctx);
295 struct fd_vertexbuf_stateobj *so = &ctx->vtx.vertexbuf;
296 int i;
297
298 /* on a2xx, pitch is encoded in the vtx fetch instruction, so
299 * we need to mark VTXSTATE as dirty as well to trigger patching
300 * and re-emitting the vtx shader:
301 */
302 if (ctx->screen->gpu_id < 300) {
303 for (i = 0; i < count; i++) {
304 bool new_enabled = vb && vb[i].buffer.resource;
305 bool old_enabled = so->vb[i].buffer.resource != NULL;
306 uint32_t new_stride = vb ? vb[i].stride : 0;
307 uint32_t old_stride = so->vb[i].stride;
308 if ((new_enabled != old_enabled) || (new_stride != old_stride)) {
309 ctx->dirty |= FD_DIRTY_VTXSTATE;
310 break;
311 }
312 }
313 }
314
315 util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot, count);
316 so->count = util_last_bit(so->enabled_mask);
317
318 ctx->dirty |= FD_DIRTY_VTXBUF;
319 }
320
321 static void
322 fd_blend_state_bind(struct pipe_context *pctx, void *hwcso)
323 {
324 struct fd_context *ctx = fd_context(pctx);
325 struct pipe_blend_state *cso = hwcso;
326 bool old_is_dual = ctx->blend ?
327 ctx->blend->rt[0].blend_enable && util_blend_state_is_dual(ctx->blend, 0) :
328 false;
329 bool new_is_dual = cso ?
330 cso->rt[0].blend_enable && util_blend_state_is_dual(cso, 0) :
331 false;
332 ctx->blend = hwcso;
333 ctx->dirty |= FD_DIRTY_BLEND;
334 if (old_is_dual != new_is_dual)
335 ctx->dirty |= FD_DIRTY_BLEND_DUAL;
336 }
337
338 static void
339 fd_blend_state_delete(struct pipe_context *pctx, void *hwcso)
340 {
341 FREE(hwcso);
342 }
343
344 static void
345 fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
346 {
347 struct fd_context *ctx = fd_context(pctx);
348 struct pipe_scissor_state *old_scissor = fd_context_get_scissor(ctx);
349
350 ctx->rasterizer = hwcso;
351 ctx->dirty |= FD_DIRTY_RASTERIZER;
352
353 /* if scissor enable bit changed we need to mark scissor
354 * state as dirty as well:
355 * NOTE: we can do a shallow compare, since we only care
356 * if it changed to/from &ctx->disable_scissor
357 */
358 if (old_scissor != fd_context_get_scissor(ctx))
359 ctx->dirty |= FD_DIRTY_SCISSOR;
360 }
361
362 static void
363 fd_rasterizer_state_delete(struct pipe_context *pctx, void *hwcso)
364 {
365 FREE(hwcso);
366 }
367
368 static void
369 fd_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
370 {
371 struct fd_context *ctx = fd_context(pctx);
372 ctx->zsa = hwcso;
373 ctx->dirty |= FD_DIRTY_ZSA;
374 }
375
376 static void
377 fd_zsa_state_delete(struct pipe_context *pctx, void *hwcso)
378 {
379 FREE(hwcso);
380 }
381
382 static void *
383 fd_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
384 const struct pipe_vertex_element *elements)
385 {
386 struct fd_vertex_stateobj *so = CALLOC_STRUCT(fd_vertex_stateobj);
387
388 if (!so)
389 return NULL;
390
391 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
392 so->num_elements = num_elements;
393
394 return so;
395 }
396
397 static void
398 fd_vertex_state_delete(struct pipe_context *pctx, void *hwcso)
399 {
400 FREE(hwcso);
401 }
402
403 static void
404 fd_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
405 {
406 struct fd_context *ctx = fd_context(pctx);
407 ctx->vtx.vtx = hwcso;
408 ctx->dirty |= FD_DIRTY_VTXSTATE;
409 }
410
411 static struct pipe_stream_output_target *
412 fd_create_stream_output_target(struct pipe_context *pctx,
413 struct pipe_resource *prsc, unsigned buffer_offset,
414 unsigned buffer_size)
415 {
416 struct pipe_stream_output_target *target;
417 struct fd_resource *rsc = fd_resource(prsc);
418
419 target = CALLOC_STRUCT(pipe_stream_output_target);
420 if (!target)
421 return NULL;
422
423 pipe_reference_init(&target->reference, 1);
424 pipe_resource_reference(&target->buffer, prsc);
425
426 target->context = pctx;
427 target->buffer_offset = buffer_offset;
428 target->buffer_size = buffer_size;
429
430 assert(rsc->base.target == PIPE_BUFFER);
431 util_range_add(&rsc->valid_buffer_range,
432 buffer_offset, buffer_offset + buffer_size);
433
434 return target;
435 }
436
437 static void
438 fd_stream_output_target_destroy(struct pipe_context *pctx,
439 struct pipe_stream_output_target *target)
440 {
441 pipe_resource_reference(&target->buffer, NULL);
442 FREE(target);
443 }
444
445 static void
446 fd_set_stream_output_targets(struct pipe_context *pctx,
447 unsigned num_targets, struct pipe_stream_output_target **targets,
448 const unsigned *offsets)
449 {
450 struct fd_context *ctx = fd_context(pctx);
451 struct fd_streamout_stateobj *so = &ctx->streamout;
452 unsigned i;
453
454 debug_assert(num_targets <= ARRAY_SIZE(so->targets));
455
456 for (i = 0; i < num_targets; i++) {
457 boolean changed = targets[i] != so->targets[i];
458 boolean append = (offsets[i] == (unsigned)-1);
459
460 if (!changed && append)
461 continue;
462
463 if (!append)
464 so->offsets[i] = offsets[i];
465
466 pipe_so_target_reference(&so->targets[i], targets[i]);
467 }
468
469 for (; i < so->num_targets; i++) {
470 pipe_so_target_reference(&so->targets[i], NULL);
471 }
472
473 so->num_targets = num_targets;
474
475 ctx->dirty |= FD_DIRTY_STREAMOUT;
476 }
477
478 static void
479 fd_bind_compute_state(struct pipe_context *pctx, void *state)
480 {
481 struct fd_context *ctx = fd_context(pctx);
482 ctx->compute = state;
483 ctx->dirty_shader[PIPE_SHADER_COMPUTE] |= FD_DIRTY_SHADER_PROG;
484 }
485
486 static void
487 fd_set_compute_resources(struct pipe_context *pctx,
488 unsigned start, unsigned count, struct pipe_surface **prscs)
489 {
490 // TODO
491 }
492
493 /* used by clover to bind global objects, returning the bo address
494 * via handles[n]
495 */
496 static void
497 fd_set_global_binding(struct pipe_context *pctx,
498 unsigned first, unsigned count, struct pipe_resource **prscs,
499 uint32_t **handles)
500 {
501 struct fd_context *ctx = fd_context(pctx);
502 struct fd_global_bindings_stateobj *so = &ctx->global_bindings;
503 unsigned mask = 0;
504
505 if (prscs) {
506 for (unsigned i = 0; i < count; i++) {
507 unsigned n = i + first;
508
509 mask |= BIT(n);
510
511 pipe_resource_reference(&so->buf[n], prscs[i]);
512
513 if (so->buf[n]) {
514 struct fd_resource *rsc = fd_resource(so->buf[n]);
515 uint64_t iova = fd_bo_get_iova(rsc->bo);
516 // TODO need to scream if iova > 32b or fix gallium API..
517 *handles[i] += iova;
518 }
519
520 if (prscs[i])
521 so->enabled_mask |= BIT(n);
522 else
523 so->enabled_mask &= ~BIT(n);
524 }
525 } else {
526 mask = (BIT(count) - 1) << first;
527
528 for (unsigned i = 0; i < count; i++) {
529 unsigned n = i + first;
530 if (so->buf[n]) {
531 struct fd_resource *rsc = fd_resource(so->buf[n]);
532 fd_bo_put_iova(rsc->bo);
533 }
534 pipe_resource_reference(&so->buf[n], NULL);
535 }
536
537 so->enabled_mask &= ~mask;
538 }
539
540 }
541
542 void
543 fd_state_init(struct pipe_context *pctx)
544 {
545 pctx->set_blend_color = fd_set_blend_color;
546 pctx->set_stencil_ref = fd_set_stencil_ref;
547 pctx->set_clip_state = fd_set_clip_state;
548 pctx->set_sample_mask = fd_set_sample_mask;
549 pctx->set_constant_buffer = fd_set_constant_buffer;
550 pctx->set_shader_buffers = fd_set_shader_buffers;
551 pctx->set_shader_images = fd_set_shader_images;
552 pctx->set_framebuffer_state = fd_set_framebuffer_state;
553 pctx->set_polygon_stipple = fd_set_polygon_stipple;
554 pctx->set_scissor_states = fd_set_scissor_states;
555 pctx->set_viewport_states = fd_set_viewport_states;
556
557 pctx->set_vertex_buffers = fd_set_vertex_buffers;
558
559 pctx->bind_blend_state = fd_blend_state_bind;
560 pctx->delete_blend_state = fd_blend_state_delete;
561
562 pctx->bind_rasterizer_state = fd_rasterizer_state_bind;
563 pctx->delete_rasterizer_state = fd_rasterizer_state_delete;
564
565 pctx->bind_depth_stencil_alpha_state = fd_zsa_state_bind;
566 pctx->delete_depth_stencil_alpha_state = fd_zsa_state_delete;
567
568 pctx->create_vertex_elements_state = fd_vertex_state_create;
569 pctx->delete_vertex_elements_state = fd_vertex_state_delete;
570 pctx->bind_vertex_elements_state = fd_vertex_state_bind;
571
572 pctx->create_stream_output_target = fd_create_stream_output_target;
573 pctx->stream_output_target_destroy = fd_stream_output_target_destroy;
574 pctx->set_stream_output_targets = fd_set_stream_output_targets;
575
576 if (has_compute(fd_screen(pctx->screen))) {
577 pctx->bind_compute_state = fd_bind_compute_state;
578 pctx->set_compute_resources = fd_set_compute_resources;
579 pctx->set_global_binding = fd_set_global_binding;
580 }
581 }