2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "pipe/p_state.h"
28 #include "util/u_dual_blend.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_helpers.h"
33 #include "freedreno_state.h"
34 #include "freedreno_context.h"
35 #include "freedreno_resource.h"
36 #include "freedreno_texture.h"
37 #include "freedreno_gmem.h"
38 #include "freedreno_query_hw.h"
39 #include "freedreno_util.h"
41 /* All the generic state handling.. In case of CSO's that are specific
42 * to the GPU version, when the bind and the delete are common they can
47 fd_set_blend_color(struct pipe_context
*pctx
,
48 const struct pipe_blend_color
*blend_color
)
50 struct fd_context
*ctx
= fd_context(pctx
);
51 ctx
->blend_color
= *blend_color
;
52 ctx
->dirty
|= FD_DIRTY_BLEND_COLOR
;
56 fd_set_stencil_ref(struct pipe_context
*pctx
,
57 const struct pipe_stencil_ref
*stencil_ref
)
59 struct fd_context
*ctx
= fd_context(pctx
);
60 ctx
->stencil_ref
=* stencil_ref
;
61 ctx
->dirty
|= FD_DIRTY_STENCIL_REF
;
65 fd_set_clip_state(struct pipe_context
*pctx
,
66 const struct pipe_clip_state
*clip
)
68 struct fd_context
*ctx
= fd_context(pctx
);
70 ctx
->dirty
|= FD_DIRTY_UCP
;
74 fd_set_sample_mask(struct pipe_context
*pctx
, unsigned sample_mask
)
76 struct fd_context
*ctx
= fd_context(pctx
);
77 ctx
->sample_mask
= (uint16_t)sample_mask
;
78 ctx
->dirty
|= FD_DIRTY_SAMPLE_MASK
;
81 /* notes from calim on #dri-devel:
82 * index==0 will be non-UBO (ie. glUniformXYZ()) all packed together padded
84 * I should be able to consider that I own the user_ptr until the next
85 * set_constant_buffer() call, at which point I don't really care about the
87 * index>0 will be UBO's.. well, I'll worry about that later
90 fd_set_constant_buffer(struct pipe_context
*pctx
,
91 enum pipe_shader_type shader
, uint index
,
92 const struct pipe_constant_buffer
*cb
)
94 struct fd_context
*ctx
= fd_context(pctx
);
95 struct fd_constbuf_stateobj
*so
= &ctx
->constbuf
[shader
];
97 util_copy_constant_buffer(&so
->cb
[index
], cb
);
99 /* Note that the state tracker can unbind constant buffers by
103 so
->enabled_mask
&= ~(1 << index
);
107 so
->enabled_mask
|= 1 << index
;
108 ctx
->dirty_shader
[shader
] |= FD_DIRTY_SHADER_CONST
;
109 ctx
->dirty
|= FD_DIRTY_CONST
;
113 fd_set_shader_buffers(struct pipe_context
*pctx
,
114 enum pipe_shader_type shader
,
115 unsigned start
, unsigned count
,
116 const struct pipe_shader_buffer
*buffers
)
118 struct fd_context
*ctx
= fd_context(pctx
);
119 struct fd_shaderbuf_stateobj
*so
= &ctx
->shaderbuf
[shader
];
123 for (unsigned i
= 0; i
< count
; i
++) {
124 unsigned n
= i
+ start
;
125 struct pipe_shader_buffer
*buf
= &so
->sb
[n
];
127 if ((buf
->buffer
== buffers
[i
].buffer
) &&
128 (buf
->buffer_offset
== buffers
[i
].buffer_offset
) &&
129 (buf
->buffer_size
== buffers
[i
].buffer_size
))
134 buf
->buffer_offset
= buffers
[i
].buffer_offset
;
135 buf
->buffer_size
= buffers
[i
].buffer_size
;
136 pipe_resource_reference(&buf
->buffer
, buffers
[i
].buffer
);
139 so
->enabled_mask
|= BIT(n
);
141 so
->enabled_mask
&= ~BIT(n
);
144 mask
= (BIT(count
) - 1) << start
;
146 for (unsigned i
= 0; i
< count
; i
++) {
147 unsigned n
= i
+ start
;
148 struct pipe_shader_buffer
*buf
= &so
->sb
[n
];
150 pipe_resource_reference(&buf
->buffer
, NULL
);
153 so
->enabled_mask
&= ~mask
;
156 ctx
->dirty_shader
[shader
] |= FD_DIRTY_SHADER_SSBO
;
160 fd_set_shader_images(struct pipe_context
*pctx
,
161 enum pipe_shader_type shader
,
162 unsigned start
, unsigned count
,
163 const struct pipe_image_view
*images
)
165 struct fd_context
*ctx
= fd_context(pctx
);
166 struct fd_shaderimg_stateobj
*so
= &ctx
->shaderimg
[shader
];
171 for (unsigned i
= 0; i
< count
; i
++) {
172 unsigned n
= i
+ start
;
173 struct pipe_image_view
*buf
= &so
->si
[n
];
175 if ((buf
->resource
== images
[i
].resource
) &&
176 (buf
->format
== images
[i
].format
) &&
177 (buf
->access
== images
[i
].access
) &&
178 !memcmp(&buf
->u
, &images
[i
].u
, sizeof(buf
->u
)))
182 util_copy_image_view(buf
, &images
[i
]);
185 so
->enabled_mask
|= BIT(n
);
187 so
->enabled_mask
&= ~BIT(n
);
190 mask
= (BIT(count
) - 1) << start
;
192 for (unsigned i
= 0; i
< count
; i
++) {
193 unsigned n
= i
+ start
;
194 struct pipe_image_view
*img
= &so
->si
[n
];
196 pipe_resource_reference(&img
->resource
, NULL
);
199 so
->enabled_mask
&= ~mask
;
202 ctx
->dirty_shader
[shader
] |= FD_DIRTY_SHADER_IMAGE
;
206 fd_set_framebuffer_state(struct pipe_context
*pctx
,
207 const struct pipe_framebuffer_state
*framebuffer
)
209 struct fd_context
*ctx
= fd_context(pctx
);
210 struct pipe_framebuffer_state
*cso
;
212 DBG("%ux%u, %u layers, %u samples",
213 framebuffer
->width
, framebuffer
->height
,
214 framebuffer
->layers
, framebuffer
->samples
);
216 cso
= &ctx
->framebuffer
;
218 if (util_framebuffer_state_equal(cso
, framebuffer
))
221 util_copy_framebuffer_state(cso
, framebuffer
);
223 cso
->samples
= util_framebuffer_get_num_samples(cso
);
225 if (ctx
->screen
->reorder
) {
226 struct fd_batch
*old_batch
= NULL
;
228 fd_batch_reference(&old_batch
, ctx
->batch
);
230 if (likely(old_batch
))
231 fd_batch_set_stage(old_batch
, FD_STAGE_NULL
);
233 fd_batch_reference(&ctx
->batch
, NULL
);
234 fd_context_all_dirty(ctx
);
236 if (old_batch
&& old_batch
->blit
&& !old_batch
->back_blit
) {
237 /* for blits, there is not really much point in hanging on
238 * to the uncommitted batch (ie. you probably don't blit
239 * multiple times to the same surface), so we might as
240 * well go ahead and flush this one:
242 fd_batch_flush(old_batch
, false, false);
245 fd_batch_reference(&old_batch
, NULL
);
247 DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx
->batch
->needs_flush
,
248 framebuffer
->cbufs
[0], framebuffer
->zsbuf
);
249 fd_batch_flush(ctx
->batch
, false, false);
250 util_copy_framebuffer_state(&ctx
->batch
->framebuffer
, cso
);
253 ctx
->dirty
|= FD_DIRTY_FRAMEBUFFER
;
255 ctx
->disabled_scissor
.minx
= 0;
256 ctx
->disabled_scissor
.miny
= 0;
257 ctx
->disabled_scissor
.maxx
= cso
->width
;
258 ctx
->disabled_scissor
.maxy
= cso
->height
;
260 ctx
->dirty
|= FD_DIRTY_SCISSOR
;
264 fd_set_polygon_stipple(struct pipe_context
*pctx
,
265 const struct pipe_poly_stipple
*stipple
)
267 struct fd_context
*ctx
= fd_context(pctx
);
268 ctx
->stipple
= *stipple
;
269 ctx
->dirty
|= FD_DIRTY_STIPPLE
;
273 fd_set_scissor_states(struct pipe_context
*pctx
,
275 unsigned num_scissors
,
276 const struct pipe_scissor_state
*scissor
)
278 struct fd_context
*ctx
= fd_context(pctx
);
280 ctx
->scissor
= *scissor
;
281 ctx
->dirty
|= FD_DIRTY_SCISSOR
;
285 fd_set_viewport_states(struct pipe_context
*pctx
,
287 unsigned num_viewports
,
288 const struct pipe_viewport_state
*viewport
)
290 struct fd_context
*ctx
= fd_context(pctx
);
291 ctx
->viewport
= *viewport
;
292 ctx
->dirty
|= FD_DIRTY_VIEWPORT
;
296 fd_set_vertex_buffers(struct pipe_context
*pctx
,
297 unsigned start_slot
, unsigned count
,
298 const struct pipe_vertex_buffer
*vb
)
300 struct fd_context
*ctx
= fd_context(pctx
);
301 struct fd_vertexbuf_stateobj
*so
= &ctx
->vtx
.vertexbuf
;
304 /* on a2xx, pitch is encoded in the vtx fetch instruction, so
305 * we need to mark VTXSTATE as dirty as well to trigger patching
306 * and re-emitting the vtx shader:
308 if (ctx
->screen
->gpu_id
< 300) {
309 for (i
= 0; i
< count
; i
++) {
310 bool new_enabled
= vb
&& vb
[i
].buffer
.resource
;
311 bool old_enabled
= so
->vb
[i
].buffer
.resource
!= NULL
;
312 uint32_t new_stride
= vb
? vb
[i
].stride
: 0;
313 uint32_t old_stride
= so
->vb
[i
].stride
;
314 if ((new_enabled
!= old_enabled
) || (new_stride
!= old_stride
)) {
315 ctx
->dirty
|= FD_DIRTY_VTXSTATE
;
321 util_set_vertex_buffers_mask(so
->vb
, &so
->enabled_mask
, vb
, start_slot
, count
);
322 so
->count
= util_last_bit(so
->enabled_mask
);
324 ctx
->dirty
|= FD_DIRTY_VTXBUF
;
328 fd_blend_state_bind(struct pipe_context
*pctx
, void *hwcso
)
330 struct fd_context
*ctx
= fd_context(pctx
);
331 struct pipe_blend_state
*cso
= hwcso
;
332 bool old_is_dual
= ctx
->blend
?
333 ctx
->blend
->rt
[0].blend_enable
&& util_blend_state_is_dual(ctx
->blend
, 0) :
335 bool new_is_dual
= cso
?
336 cso
->rt
[0].blend_enable
&& util_blend_state_is_dual(cso
, 0) :
339 ctx
->dirty
|= FD_DIRTY_BLEND
;
340 if (old_is_dual
!= new_is_dual
)
341 ctx
->dirty
|= FD_DIRTY_BLEND_DUAL
;
345 fd_blend_state_delete(struct pipe_context
*pctx
, void *hwcso
)
351 fd_rasterizer_state_bind(struct pipe_context
*pctx
, void *hwcso
)
353 struct fd_context
*ctx
= fd_context(pctx
);
354 struct pipe_scissor_state
*old_scissor
= fd_context_get_scissor(ctx
);
356 ctx
->rasterizer
= hwcso
;
357 ctx
->dirty
|= FD_DIRTY_RASTERIZER
;
359 /* if scissor enable bit changed we need to mark scissor
360 * state as dirty as well:
361 * NOTE: we can do a shallow compare, since we only care
362 * if it changed to/from &ctx->disable_scissor
364 if (old_scissor
!= fd_context_get_scissor(ctx
))
365 ctx
->dirty
|= FD_DIRTY_SCISSOR
;
369 fd_rasterizer_state_delete(struct pipe_context
*pctx
, void *hwcso
)
375 fd_zsa_state_bind(struct pipe_context
*pctx
, void *hwcso
)
377 struct fd_context
*ctx
= fd_context(pctx
);
379 ctx
->dirty
|= FD_DIRTY_ZSA
;
383 fd_zsa_state_delete(struct pipe_context
*pctx
, void *hwcso
)
389 fd_vertex_state_create(struct pipe_context
*pctx
, unsigned num_elements
,
390 const struct pipe_vertex_element
*elements
)
392 struct fd_vertex_stateobj
*so
= CALLOC_STRUCT(fd_vertex_stateobj
);
397 memcpy(so
->pipe
, elements
, sizeof(*elements
) * num_elements
);
398 so
->num_elements
= num_elements
;
404 fd_vertex_state_delete(struct pipe_context
*pctx
, void *hwcso
)
410 fd_vertex_state_bind(struct pipe_context
*pctx
, void *hwcso
)
412 struct fd_context
*ctx
= fd_context(pctx
);
413 ctx
->vtx
.vtx
= hwcso
;
414 ctx
->dirty
|= FD_DIRTY_VTXSTATE
;
417 static struct pipe_stream_output_target
*
418 fd_create_stream_output_target(struct pipe_context
*pctx
,
419 struct pipe_resource
*prsc
, unsigned buffer_offset
,
420 unsigned buffer_size
)
422 struct pipe_stream_output_target
*target
;
423 struct fd_resource
*rsc
= fd_resource(prsc
);
425 target
= CALLOC_STRUCT(pipe_stream_output_target
);
429 pipe_reference_init(&target
->reference
, 1);
430 pipe_resource_reference(&target
->buffer
, prsc
);
432 target
->context
= pctx
;
433 target
->buffer_offset
= buffer_offset
;
434 target
->buffer_size
= buffer_size
;
436 assert(rsc
->base
.target
== PIPE_BUFFER
);
437 util_range_add(&rsc
->valid_buffer_range
,
438 buffer_offset
, buffer_offset
+ buffer_size
);
444 fd_stream_output_target_destroy(struct pipe_context
*pctx
,
445 struct pipe_stream_output_target
*target
)
447 pipe_resource_reference(&target
->buffer
, NULL
);
452 fd_set_stream_output_targets(struct pipe_context
*pctx
,
453 unsigned num_targets
, struct pipe_stream_output_target
**targets
,
454 const unsigned *offsets
)
456 struct fd_context
*ctx
= fd_context(pctx
);
457 struct fd_streamout_stateobj
*so
= &ctx
->streamout
;
460 debug_assert(num_targets
<= ARRAY_SIZE(so
->targets
));
462 for (i
= 0; i
< num_targets
; i
++) {
463 boolean changed
= targets
[i
] != so
->targets
[i
];
464 boolean append
= (offsets
[i
] == (unsigned)-1);
466 if (!changed
&& append
)
470 so
->offsets
[i
] = offsets
[i
];
472 pipe_so_target_reference(&so
->targets
[i
], targets
[i
]);
475 for (; i
< so
->num_targets
; i
++) {
476 pipe_so_target_reference(&so
->targets
[i
], NULL
);
479 so
->num_targets
= num_targets
;
481 ctx
->dirty
|= FD_DIRTY_STREAMOUT
;
485 fd_bind_compute_state(struct pipe_context
*pctx
, void *state
)
487 struct fd_context
*ctx
= fd_context(pctx
);
488 ctx
->compute
= state
;
489 ctx
->dirty_shader
[PIPE_SHADER_COMPUTE
] |= FD_DIRTY_SHADER_PROG
;
493 fd_set_compute_resources(struct pipe_context
*pctx
,
494 unsigned start
, unsigned count
, struct pipe_surface
**prscs
)
499 /* used by clover to bind global objects, returning the bo address
503 fd_set_global_binding(struct pipe_context
*pctx
,
504 unsigned first
, unsigned count
, struct pipe_resource
**prscs
,
507 struct fd_context
*ctx
= fd_context(pctx
);
508 struct fd_global_bindings_stateobj
*so
= &ctx
->global_bindings
;
512 for (unsigned i
= 0; i
< count
; i
++) {
513 unsigned n
= i
+ first
;
517 pipe_resource_reference(&so
->buf
[n
], prscs
[i
]);
520 struct fd_resource
*rsc
= fd_resource(so
->buf
[n
]);
521 uint64_t iova
= fd_bo_get_iova(rsc
->bo
);
522 // TODO need to scream if iova > 32b or fix gallium API..
527 so
->enabled_mask
|= BIT(n
);
529 so
->enabled_mask
&= ~BIT(n
);
532 mask
= (BIT(count
) - 1) << first
;
534 for (unsigned i
= 0; i
< count
; i
++) {
535 unsigned n
= i
+ first
;
537 struct fd_resource
*rsc
= fd_resource(so
->buf
[n
]);
538 fd_bo_put_iova(rsc
->bo
);
540 pipe_resource_reference(&so
->buf
[n
], NULL
);
543 so
->enabled_mask
&= ~mask
;
549 fd_state_init(struct pipe_context
*pctx
)
551 pctx
->set_blend_color
= fd_set_blend_color
;
552 pctx
->set_stencil_ref
= fd_set_stencil_ref
;
553 pctx
->set_clip_state
= fd_set_clip_state
;
554 pctx
->set_sample_mask
= fd_set_sample_mask
;
555 pctx
->set_constant_buffer
= fd_set_constant_buffer
;
556 pctx
->set_shader_buffers
= fd_set_shader_buffers
;
557 pctx
->set_shader_images
= fd_set_shader_images
;
558 pctx
->set_framebuffer_state
= fd_set_framebuffer_state
;
559 pctx
->set_polygon_stipple
= fd_set_polygon_stipple
;
560 pctx
->set_scissor_states
= fd_set_scissor_states
;
561 pctx
->set_viewport_states
= fd_set_viewport_states
;
563 pctx
->set_vertex_buffers
= fd_set_vertex_buffers
;
565 pctx
->bind_blend_state
= fd_blend_state_bind
;
566 pctx
->delete_blend_state
= fd_blend_state_delete
;
568 pctx
->bind_rasterizer_state
= fd_rasterizer_state_bind
;
569 pctx
->delete_rasterizer_state
= fd_rasterizer_state_delete
;
571 pctx
->bind_depth_stencil_alpha_state
= fd_zsa_state_bind
;
572 pctx
->delete_depth_stencil_alpha_state
= fd_zsa_state_delete
;
574 pctx
->create_vertex_elements_state
= fd_vertex_state_create
;
575 pctx
->delete_vertex_elements_state
= fd_vertex_state_delete
;
576 pctx
->bind_vertex_elements_state
= fd_vertex_state_bind
;
578 pctx
->create_stream_output_target
= fd_create_stream_output_target
;
579 pctx
->stream_output_target_destroy
= fd_stream_output_target_destroy
;
580 pctx
->set_stream_output_targets
= fd_set_stream_output_targets
;
582 if (has_compute(fd_screen(pctx
->screen
))) {
583 pctx
->bind_compute_state
= fd_bind_compute_state
;
584 pctx
->set_compute_resources
= fd_set_compute_resources
;
585 pctx
->set_global_binding
= fd_set_global_binding
;