2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "pipe/p_state.h"
28 #include "util/u_dual_blend.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_helpers.h"
33 #include "freedreno_state.h"
34 #include "freedreno_context.h"
35 #include "freedreno_resource.h"
36 #include "freedreno_texture.h"
37 #include "freedreno_gmem.h"
38 #include "freedreno_query_hw.h"
39 #include "freedreno_util.h"
41 /* All the generic state handling.. In case of CSO's that are specific
42 * to the GPU version, when the bind and the delete are common they can
47 fd_set_blend_color(struct pipe_context
*pctx
,
48 const struct pipe_blend_color
*blend_color
)
50 struct fd_context
*ctx
= fd_context(pctx
);
51 ctx
->blend_color
= *blend_color
;
52 ctx
->dirty
|= FD_DIRTY_BLEND_COLOR
;
56 fd_set_stencil_ref(struct pipe_context
*pctx
,
57 const struct pipe_stencil_ref
*stencil_ref
)
59 struct fd_context
*ctx
= fd_context(pctx
);
60 ctx
->stencil_ref
=* stencil_ref
;
61 ctx
->dirty
|= FD_DIRTY_STENCIL_REF
;
65 fd_set_clip_state(struct pipe_context
*pctx
,
66 const struct pipe_clip_state
*clip
)
68 struct fd_context
*ctx
= fd_context(pctx
);
70 ctx
->dirty
|= FD_DIRTY_UCP
;
74 fd_set_sample_mask(struct pipe_context
*pctx
, unsigned sample_mask
)
76 struct fd_context
*ctx
= fd_context(pctx
);
77 ctx
->sample_mask
= (uint16_t)sample_mask
;
78 ctx
->dirty
|= FD_DIRTY_SAMPLE_MASK
;
82 fd_set_min_samples(struct pipe_context
*pctx
, unsigned min_samples
)
84 struct fd_context
*ctx
= fd_context(pctx
);
85 ctx
->min_samples
= min_samples
;
86 ctx
->dirty
|= FD_DIRTY_MIN_SAMPLES
;
89 /* notes from calim on #dri-devel:
90 * index==0 will be non-UBO (ie. glUniformXYZ()) all packed together padded
92 * I should be able to consider that I own the user_ptr until the next
93 * set_constant_buffer() call, at which point I don't really care about the
95 * index>0 will be UBO's.. well, I'll worry about that later
98 fd_set_constant_buffer(struct pipe_context
*pctx
,
99 enum pipe_shader_type shader
, uint index
,
100 const struct pipe_constant_buffer
*cb
)
102 struct fd_context
*ctx
= fd_context(pctx
);
103 struct fd_constbuf_stateobj
*so
= &ctx
->constbuf
[shader
];
105 util_copy_constant_buffer(&so
->cb
[index
], cb
);
107 /* Note that the state tracker can unbind constant buffers by
111 so
->enabled_mask
&= ~(1 << index
);
115 so
->enabled_mask
|= 1 << index
;
116 ctx
->dirty_shader
[shader
] |= FD_DIRTY_SHADER_CONST
;
117 ctx
->dirty
|= FD_DIRTY_CONST
;
121 fd_set_shader_buffers(struct pipe_context
*pctx
,
122 enum pipe_shader_type shader
,
123 unsigned start
, unsigned count
,
124 const struct pipe_shader_buffer
*buffers
,
125 unsigned writable_bitmask
)
127 struct fd_context
*ctx
= fd_context(pctx
);
128 struct fd_shaderbuf_stateobj
*so
= &ctx
->shaderbuf
[shader
];
132 for (unsigned i
= 0; i
< count
; i
++) {
133 unsigned n
= i
+ start
;
134 struct pipe_shader_buffer
*buf
= &so
->sb
[n
];
136 if ((buf
->buffer
== buffers
[i
].buffer
) &&
137 (buf
->buffer_offset
== buffers
[i
].buffer_offset
) &&
138 (buf
->buffer_size
== buffers
[i
].buffer_size
))
143 buf
->buffer_offset
= buffers
[i
].buffer_offset
;
144 buf
->buffer_size
= buffers
[i
].buffer_size
;
145 pipe_resource_reference(&buf
->buffer
, buffers
[i
].buffer
);
148 so
->enabled_mask
|= BIT(n
);
150 so
->enabled_mask
&= ~BIT(n
);
153 mask
= (BIT(count
) - 1) << start
;
155 for (unsigned i
= 0; i
< count
; i
++) {
156 unsigned n
= i
+ start
;
157 struct pipe_shader_buffer
*buf
= &so
->sb
[n
];
159 pipe_resource_reference(&buf
->buffer
, NULL
);
162 so
->enabled_mask
&= ~mask
;
165 ctx
->dirty_shader
[shader
] |= FD_DIRTY_SHADER_SSBO
;
169 fd_set_shader_images(struct pipe_context
*pctx
,
170 enum pipe_shader_type shader
,
171 unsigned start
, unsigned count
,
172 const struct pipe_image_view
*images
)
174 struct fd_context
*ctx
= fd_context(pctx
);
175 struct fd_shaderimg_stateobj
*so
= &ctx
->shaderimg
[shader
];
180 for (unsigned i
= 0; i
< count
; i
++) {
181 unsigned n
= i
+ start
;
182 struct pipe_image_view
*buf
= &so
->si
[n
];
184 if ((buf
->resource
== images
[i
].resource
) &&
185 (buf
->format
== images
[i
].format
) &&
186 (buf
->access
== images
[i
].access
) &&
187 !memcmp(&buf
->u
, &images
[i
].u
, sizeof(buf
->u
)))
191 util_copy_image_view(buf
, &images
[i
]);
194 so
->enabled_mask
|= BIT(n
);
196 so
->enabled_mask
&= ~BIT(n
);
199 mask
= (BIT(count
) - 1) << start
;
201 for (unsigned i
= 0; i
< count
; i
++) {
202 unsigned n
= i
+ start
;
203 struct pipe_image_view
*img
= &so
->si
[n
];
205 pipe_resource_reference(&img
->resource
, NULL
);
208 so
->enabled_mask
&= ~mask
;
211 ctx
->dirty_shader
[shader
] |= FD_DIRTY_SHADER_IMAGE
;
215 fd_set_framebuffer_state(struct pipe_context
*pctx
,
216 const struct pipe_framebuffer_state
*framebuffer
)
218 struct fd_context
*ctx
= fd_context(pctx
);
219 struct pipe_framebuffer_state
*cso
;
221 DBG("%ux%u, %u layers, %u samples",
222 framebuffer
->width
, framebuffer
->height
,
223 framebuffer
->layers
, framebuffer
->samples
);
225 cso
= &ctx
->framebuffer
;
227 if (util_framebuffer_state_equal(cso
, framebuffer
))
230 util_copy_framebuffer_state(cso
, framebuffer
);
232 cso
->samples
= util_framebuffer_get_num_samples(cso
);
234 if (ctx
->screen
->reorder
) {
235 struct fd_batch
*old_batch
= NULL
;
237 fd_batch_reference(&old_batch
, ctx
->batch
);
239 if (likely(old_batch
))
240 fd_batch_set_stage(old_batch
, FD_STAGE_NULL
);
242 fd_batch_reference(&ctx
->batch
, NULL
);
243 fd_context_all_dirty(ctx
);
245 if (old_batch
&& old_batch
->blit
&& !old_batch
->back_blit
) {
246 /* for blits, there is not really much point in hanging on
247 * to the uncommitted batch (ie. you probably don't blit
248 * multiple times to the same surface), so we might as
249 * well go ahead and flush this one:
251 fd_batch_flush(old_batch
);
254 fd_batch_reference(&old_batch
, NULL
);
255 } else if (ctx
->batch
) {
256 DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx
->batch
->needs_flush
,
257 framebuffer
->cbufs
[0], framebuffer
->zsbuf
);
258 fd_batch_flush(ctx
->batch
);
261 ctx
->dirty
|= FD_DIRTY_FRAMEBUFFER
;
263 ctx
->disabled_scissor
.minx
= 0;
264 ctx
->disabled_scissor
.miny
= 0;
265 ctx
->disabled_scissor
.maxx
= cso
->width
;
266 ctx
->disabled_scissor
.maxy
= cso
->height
;
268 ctx
->dirty
|= FD_DIRTY_SCISSOR
;
272 fd_set_polygon_stipple(struct pipe_context
*pctx
,
273 const struct pipe_poly_stipple
*stipple
)
275 struct fd_context
*ctx
= fd_context(pctx
);
276 ctx
->stipple
= *stipple
;
277 ctx
->dirty
|= FD_DIRTY_STIPPLE
;
281 fd_set_scissor_states(struct pipe_context
*pctx
,
283 unsigned num_scissors
,
284 const struct pipe_scissor_state
*scissor
)
286 struct fd_context
*ctx
= fd_context(pctx
);
288 ctx
->scissor
= *scissor
;
289 ctx
->dirty
|= FD_DIRTY_SCISSOR
;
293 fd_set_viewport_states(struct pipe_context
*pctx
,
295 unsigned num_viewports
,
296 const struct pipe_viewport_state
*viewport
)
298 struct fd_context
*ctx
= fd_context(pctx
);
299 struct pipe_scissor_state
*scissor
= &ctx
->viewport_scissor
;
300 float minx
, miny
, maxx
, maxy
;
302 ctx
->viewport
= *viewport
;
304 /* see si_get_scissor_from_viewport(): */
306 /* Convert (-1, -1) and (1, 1) from clip space into window space. */
307 minx
= -viewport
->scale
[0] + viewport
->translate
[0];
308 miny
= -viewport
->scale
[1] + viewport
->translate
[1];
309 maxx
= viewport
->scale
[0] + viewport
->translate
[0];
310 maxy
= viewport
->scale
[1] + viewport
->translate
[1];
312 /* Handle inverted viewports. */
320 debug_assert(miny
>= 0);
321 debug_assert(maxy
>= 0);
323 /* Convert to integer and round up the max bounds. */
324 scissor
->minx
= minx
;
325 scissor
->miny
= miny
;
326 scissor
->maxx
= ceilf(maxx
);
327 scissor
->maxy
= ceilf(maxy
);
329 ctx
->dirty
|= FD_DIRTY_VIEWPORT
;
333 fd_set_vertex_buffers(struct pipe_context
*pctx
,
334 unsigned start_slot
, unsigned count
,
335 const struct pipe_vertex_buffer
*vb
)
337 struct fd_context
*ctx
= fd_context(pctx
);
338 struct fd_vertexbuf_stateobj
*so
= &ctx
->vtx
.vertexbuf
;
341 /* on a2xx, pitch is encoded in the vtx fetch instruction, so
342 * we need to mark VTXSTATE as dirty as well to trigger patching
343 * and re-emitting the vtx shader:
345 if (ctx
->screen
->gpu_id
< 300) {
346 for (i
= 0; i
< count
; i
++) {
347 bool new_enabled
= vb
&& vb
[i
].buffer
.resource
;
348 bool old_enabled
= so
->vb
[i
].buffer
.resource
!= NULL
;
349 uint32_t new_stride
= vb
? vb
[i
].stride
: 0;
350 uint32_t old_stride
= so
->vb
[i
].stride
;
351 if ((new_enabled
!= old_enabled
) || (new_stride
!= old_stride
)) {
352 ctx
->dirty
|= FD_DIRTY_VTXSTATE
;
358 util_set_vertex_buffers_mask(so
->vb
, &so
->enabled_mask
, vb
, start_slot
, count
);
359 so
->count
= util_last_bit(so
->enabled_mask
);
361 ctx
->dirty
|= FD_DIRTY_VTXBUF
;
365 fd_blend_state_bind(struct pipe_context
*pctx
, void *hwcso
)
367 struct fd_context
*ctx
= fd_context(pctx
);
368 struct pipe_blend_state
*cso
= hwcso
;
369 bool old_is_dual
= ctx
->blend
?
370 ctx
->blend
->rt
[0].blend_enable
&& util_blend_state_is_dual(ctx
->blend
, 0) :
372 bool new_is_dual
= cso
?
373 cso
->rt
[0].blend_enable
&& util_blend_state_is_dual(cso
, 0) :
376 ctx
->dirty
|= FD_DIRTY_BLEND
;
377 if (old_is_dual
!= new_is_dual
)
378 ctx
->dirty
|= FD_DIRTY_BLEND_DUAL
;
382 fd_blend_state_delete(struct pipe_context
*pctx
, void *hwcso
)
388 fd_rasterizer_state_bind(struct pipe_context
*pctx
, void *hwcso
)
390 struct fd_context
*ctx
= fd_context(pctx
);
391 struct pipe_scissor_state
*old_scissor
= fd_context_get_scissor(ctx
);
393 ctx
->rasterizer
= hwcso
;
394 ctx
->dirty
|= FD_DIRTY_RASTERIZER
;
396 /* if scissor enable bit changed we need to mark scissor
397 * state as dirty as well:
398 * NOTE: we can do a shallow compare, since we only care
399 * if it changed to/from &ctx->disable_scissor
401 if (old_scissor
!= fd_context_get_scissor(ctx
))
402 ctx
->dirty
|= FD_DIRTY_SCISSOR
;
406 fd_rasterizer_state_delete(struct pipe_context
*pctx
, void *hwcso
)
412 fd_zsa_state_bind(struct pipe_context
*pctx
, void *hwcso
)
414 struct fd_context
*ctx
= fd_context(pctx
);
416 ctx
->dirty
|= FD_DIRTY_ZSA
;
420 fd_zsa_state_delete(struct pipe_context
*pctx
, void *hwcso
)
426 fd_vertex_state_create(struct pipe_context
*pctx
, unsigned num_elements
,
427 const struct pipe_vertex_element
*elements
)
429 struct fd_vertex_stateobj
*so
= CALLOC_STRUCT(fd_vertex_stateobj
);
434 memcpy(so
->pipe
, elements
, sizeof(*elements
) * num_elements
);
435 so
->num_elements
= num_elements
;
441 fd_vertex_state_delete(struct pipe_context
*pctx
, void *hwcso
)
447 fd_vertex_state_bind(struct pipe_context
*pctx
, void *hwcso
)
449 struct fd_context
*ctx
= fd_context(pctx
);
450 ctx
->vtx
.vtx
= hwcso
;
451 ctx
->dirty
|= FD_DIRTY_VTXSTATE
;
454 static struct pipe_stream_output_target
*
455 fd_create_stream_output_target(struct pipe_context
*pctx
,
456 struct pipe_resource
*prsc
, unsigned buffer_offset
,
457 unsigned buffer_size
)
459 struct pipe_stream_output_target
*target
;
460 struct fd_resource
*rsc
= fd_resource(prsc
);
462 target
= CALLOC_STRUCT(pipe_stream_output_target
);
466 pipe_reference_init(&target
->reference
, 1);
467 pipe_resource_reference(&target
->buffer
, prsc
);
469 target
->context
= pctx
;
470 target
->buffer_offset
= buffer_offset
;
471 target
->buffer_size
= buffer_size
;
473 assert(rsc
->base
.target
== PIPE_BUFFER
);
474 util_range_add(&rsc
->base
, &rsc
->valid_buffer_range
,
475 buffer_offset
, buffer_offset
+ buffer_size
);
481 fd_stream_output_target_destroy(struct pipe_context
*pctx
,
482 struct pipe_stream_output_target
*target
)
484 pipe_resource_reference(&target
->buffer
, NULL
);
489 fd_set_stream_output_targets(struct pipe_context
*pctx
,
490 unsigned num_targets
, struct pipe_stream_output_target
**targets
,
491 const unsigned *offsets
)
493 struct fd_context
*ctx
= fd_context(pctx
);
494 struct fd_streamout_stateobj
*so
= &ctx
->streamout
;
497 debug_assert(num_targets
<= ARRAY_SIZE(so
->targets
));
499 for (i
= 0; i
< num_targets
; i
++) {
500 boolean changed
= targets
[i
] != so
->targets
[i
];
501 boolean reset
= (offsets
[i
] != (unsigned)-1);
503 so
->reset
|= (reset
<< i
);
505 if (!changed
&& !reset
)
509 so
->offsets
[i
] = offsets
[i
];
511 pipe_so_target_reference(&so
->targets
[i
], targets
[i
]);
514 for (; i
< so
->num_targets
; i
++) {
515 pipe_so_target_reference(&so
->targets
[i
], NULL
);
518 so
->num_targets
= num_targets
;
520 ctx
->dirty
|= FD_DIRTY_STREAMOUT
;
524 fd_bind_compute_state(struct pipe_context
*pctx
, void *state
)
526 struct fd_context
*ctx
= fd_context(pctx
);
527 ctx
->compute
= state
;
528 ctx
->dirty_shader
[PIPE_SHADER_COMPUTE
] |= FD_DIRTY_SHADER_PROG
;
532 fd_set_compute_resources(struct pipe_context
*pctx
,
533 unsigned start
, unsigned count
, struct pipe_surface
**prscs
)
538 /* used by clover to bind global objects, returning the bo address
542 fd_set_global_binding(struct pipe_context
*pctx
,
543 unsigned first
, unsigned count
, struct pipe_resource
**prscs
,
546 struct fd_context
*ctx
= fd_context(pctx
);
547 struct fd_global_bindings_stateobj
*so
= &ctx
->global_bindings
;
551 for (unsigned i
= 0; i
< count
; i
++) {
552 unsigned n
= i
+ first
;
556 pipe_resource_reference(&so
->buf
[n
], prscs
[i
]);
559 struct fd_resource
*rsc
= fd_resource(so
->buf
[n
]);
560 uint64_t iova
= fd_bo_get_iova(rsc
->bo
);
561 // TODO need to scream if iova > 32b or fix gallium API..
566 so
->enabled_mask
|= BIT(n
);
568 so
->enabled_mask
&= ~BIT(n
);
571 mask
= (BIT(count
) - 1) << first
;
573 for (unsigned i
= 0; i
< count
; i
++) {
574 unsigned n
= i
+ first
;
576 struct fd_resource
*rsc
= fd_resource(so
->buf
[n
]);
577 fd_bo_put_iova(rsc
->bo
);
579 pipe_resource_reference(&so
->buf
[n
], NULL
);
582 so
->enabled_mask
&= ~mask
;
588 fd_state_init(struct pipe_context
*pctx
)
590 pctx
->set_blend_color
= fd_set_blend_color
;
591 pctx
->set_stencil_ref
= fd_set_stencil_ref
;
592 pctx
->set_clip_state
= fd_set_clip_state
;
593 pctx
->set_sample_mask
= fd_set_sample_mask
;
594 pctx
->set_min_samples
= fd_set_min_samples
;
595 pctx
->set_constant_buffer
= fd_set_constant_buffer
;
596 pctx
->set_shader_buffers
= fd_set_shader_buffers
;
597 pctx
->set_shader_images
= fd_set_shader_images
;
598 pctx
->set_framebuffer_state
= fd_set_framebuffer_state
;
599 pctx
->set_polygon_stipple
= fd_set_polygon_stipple
;
600 pctx
->set_scissor_states
= fd_set_scissor_states
;
601 pctx
->set_viewport_states
= fd_set_viewport_states
;
603 pctx
->set_vertex_buffers
= fd_set_vertex_buffers
;
605 pctx
->bind_blend_state
= fd_blend_state_bind
;
606 pctx
->delete_blend_state
= fd_blend_state_delete
;
608 pctx
->bind_rasterizer_state
= fd_rasterizer_state_bind
;
609 pctx
->delete_rasterizer_state
= fd_rasterizer_state_delete
;
611 pctx
->bind_depth_stencil_alpha_state
= fd_zsa_state_bind
;
612 pctx
->delete_depth_stencil_alpha_state
= fd_zsa_state_delete
;
614 pctx
->create_vertex_elements_state
= fd_vertex_state_create
;
615 pctx
->delete_vertex_elements_state
= fd_vertex_state_delete
;
616 pctx
->bind_vertex_elements_state
= fd_vertex_state_bind
;
618 pctx
->create_stream_output_target
= fd_create_stream_output_target
;
619 pctx
->stream_output_target_destroy
= fd_stream_output_target_destroy
;
620 pctx
->set_stream_output_targets
= fd_set_stream_output_targets
;
622 if (has_compute(fd_screen(pctx
->screen
))) {
623 pctx
->bind_compute_state
= fd_bind_compute_state
;
624 pctx
->set_compute_resources
= fd_set_compute_resources
;
625 pctx
->set_global_binding
= fd_set_global_binding
;