2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "pipe/p_state.h"
28 #include "util/u_dual_blend.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_helpers.h"
33 #include "freedreno_state.h"
34 #include "freedreno_context.h"
35 #include "freedreno_resource.h"
36 #include "freedreno_texture.h"
37 #include "freedreno_gmem.h"
38 #include "freedreno_query_hw.h"
39 #include "freedreno_util.h"
41 /* All the generic state handling.. In case of CSO's that are specific
42 * to the GPU version, when the bind and the delete are common they can
47 fd_set_blend_color(struct pipe_context
*pctx
,
48 const struct pipe_blend_color
*blend_color
)
50 struct fd_context
*ctx
= fd_context(pctx
);
51 ctx
->blend_color
= *blend_color
;
52 ctx
->dirty
|= FD_DIRTY_BLEND_COLOR
;
56 fd_set_stencil_ref(struct pipe_context
*pctx
,
57 const struct pipe_stencil_ref
*stencil_ref
)
59 struct fd_context
*ctx
= fd_context(pctx
);
60 ctx
->stencil_ref
=* stencil_ref
;
61 ctx
->dirty
|= FD_DIRTY_STENCIL_REF
;
65 fd_set_clip_state(struct pipe_context
*pctx
,
66 const struct pipe_clip_state
*clip
)
68 struct fd_context
*ctx
= fd_context(pctx
);
70 ctx
->dirty
|= FD_DIRTY_UCP
;
74 fd_set_sample_mask(struct pipe_context
*pctx
, unsigned sample_mask
)
76 struct fd_context
*ctx
= fd_context(pctx
);
77 ctx
->sample_mask
= (uint16_t)sample_mask
;
78 ctx
->dirty
|= FD_DIRTY_SAMPLE_MASK
;
82 fd_set_min_samples(struct pipe_context
*pctx
, unsigned min_samples
)
84 struct fd_context
*ctx
= fd_context(pctx
);
85 ctx
->min_samples
= min_samples
;
86 ctx
->dirty
|= FD_DIRTY_MIN_SAMPLES
;
89 /* notes from calim on #dri-devel:
90 * index==0 will be non-UBO (ie. glUniformXYZ()) all packed together padded
92 * I should be able to consider that I own the user_ptr until the next
93 * set_constant_buffer() call, at which point I don't really care about the
95 * index>0 will be UBO's.. well, I'll worry about that later
98 fd_set_constant_buffer(struct pipe_context
*pctx
,
99 enum pipe_shader_type shader
, uint index
,
100 const struct pipe_constant_buffer
*cb
)
102 struct fd_context
*ctx
= fd_context(pctx
);
103 struct fd_constbuf_stateobj
*so
= &ctx
->constbuf
[shader
];
105 util_copy_constant_buffer(&so
->cb
[index
], cb
);
107 /* Note that the state tracker can unbind constant buffers by
111 so
->enabled_mask
&= ~(1 << index
);
115 so
->enabled_mask
|= 1 << index
;
116 ctx
->dirty_shader
[shader
] |= FD_DIRTY_SHADER_CONST
;
117 ctx
->dirty
|= FD_DIRTY_CONST
;
121 fd_set_shader_buffers(struct pipe_context
*pctx
,
122 enum pipe_shader_type shader
,
123 unsigned start
, unsigned count
,
124 const struct pipe_shader_buffer
*buffers
,
125 unsigned writable_bitmask
)
127 struct fd_context
*ctx
= fd_context(pctx
);
128 struct fd_shaderbuf_stateobj
*so
= &ctx
->shaderbuf
[shader
];
129 const unsigned modified_bits
= u_bit_consecutive(start
, count
);
132 for (unsigned i
= 0; i
< count
; i
++) {
133 unsigned n
= i
+ start
;
134 struct pipe_shader_buffer
*buf
= &so
->sb
[n
];
136 if ((buf
->buffer
== buffers
[i
].buffer
) &&
137 (buf
->buffer_offset
== buffers
[i
].buffer_offset
) &&
138 (buf
->buffer_size
== buffers
[i
].buffer_size
))
141 buf
->buffer_offset
= buffers
[i
].buffer_offset
;
142 buf
->buffer_size
= buffers
[i
].buffer_size
;
143 pipe_resource_reference(&buf
->buffer
, buffers
[i
].buffer
);
146 so
->enabled_mask
|= BIT(n
);
148 so
->enabled_mask
&= ~BIT(n
);
151 for (unsigned i
= 0; i
< count
; i
++) {
152 unsigned n
= i
+ start
;
153 struct pipe_shader_buffer
*buf
= &so
->sb
[n
];
155 pipe_resource_reference(&buf
->buffer
, NULL
);
158 so
->enabled_mask
&= ~modified_bits
;
161 ctx
->dirty_shader
[shader
] |= FD_DIRTY_SHADER_SSBO
;
165 fd_set_shader_images(struct pipe_context
*pctx
,
166 enum pipe_shader_type shader
,
167 unsigned start
, unsigned count
,
168 const struct pipe_image_view
*images
)
170 struct fd_context
*ctx
= fd_context(pctx
);
171 struct fd_shaderimg_stateobj
*so
= &ctx
->shaderimg
[shader
];
176 for (unsigned i
= 0; i
< count
; i
++) {
177 unsigned n
= i
+ start
;
178 struct pipe_image_view
*buf
= &so
->si
[n
];
180 if ((buf
->resource
== images
[i
].resource
) &&
181 (buf
->format
== images
[i
].format
) &&
182 (buf
->access
== images
[i
].access
) &&
183 !memcmp(&buf
->u
, &images
[i
].u
, sizeof(buf
->u
)))
187 util_copy_image_view(buf
, &images
[i
]);
190 so
->enabled_mask
|= BIT(n
);
192 so
->enabled_mask
&= ~BIT(n
);
195 mask
= (BIT(count
) - 1) << start
;
197 for (unsigned i
= 0; i
< count
; i
++) {
198 unsigned n
= i
+ start
;
199 struct pipe_image_view
*img
= &so
->si
[n
];
201 pipe_resource_reference(&img
->resource
, NULL
);
204 so
->enabled_mask
&= ~mask
;
207 ctx
->dirty_shader
[shader
] |= FD_DIRTY_SHADER_IMAGE
;
211 fd_set_framebuffer_state(struct pipe_context
*pctx
,
212 const struct pipe_framebuffer_state
*framebuffer
)
214 struct fd_context
*ctx
= fd_context(pctx
);
215 struct pipe_framebuffer_state
*cso
;
217 DBG("%ux%u, %u layers, %u samples",
218 framebuffer
->width
, framebuffer
->height
,
219 framebuffer
->layers
, framebuffer
->samples
);
221 cso
= &ctx
->framebuffer
;
223 if (util_framebuffer_state_equal(cso
, framebuffer
))
226 util_copy_framebuffer_state(cso
, framebuffer
);
228 cso
->samples
= util_framebuffer_get_num_samples(cso
);
230 if (ctx
->screen
->reorder
) {
231 struct fd_batch
*old_batch
= NULL
;
233 fd_batch_reference(&old_batch
, ctx
->batch
);
235 if (likely(old_batch
))
236 fd_batch_set_stage(old_batch
, FD_STAGE_NULL
);
238 fd_batch_reference(&ctx
->batch
, NULL
);
239 fd_context_all_dirty(ctx
);
241 if (old_batch
&& old_batch
->blit
&& !old_batch
->back_blit
) {
242 /* for blits, there is not really much point in hanging on
243 * to the uncommitted batch (ie. you probably don't blit
244 * multiple times to the same surface), so we might as
245 * well go ahead and flush this one:
247 fd_batch_flush(old_batch
);
250 fd_batch_reference(&old_batch
, NULL
);
251 } else if (ctx
->batch
) {
252 DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx
->batch
->needs_flush
,
253 framebuffer
->cbufs
[0], framebuffer
->zsbuf
);
254 fd_batch_flush(ctx
->batch
);
257 ctx
->dirty
|= FD_DIRTY_FRAMEBUFFER
;
259 ctx
->disabled_scissor
.minx
= 0;
260 ctx
->disabled_scissor
.miny
= 0;
261 ctx
->disabled_scissor
.maxx
= cso
->width
;
262 ctx
->disabled_scissor
.maxy
= cso
->height
;
264 ctx
->dirty
|= FD_DIRTY_SCISSOR
;
268 fd_set_polygon_stipple(struct pipe_context
*pctx
,
269 const struct pipe_poly_stipple
*stipple
)
271 struct fd_context
*ctx
= fd_context(pctx
);
272 ctx
->stipple
= *stipple
;
273 ctx
->dirty
|= FD_DIRTY_STIPPLE
;
277 fd_set_scissor_states(struct pipe_context
*pctx
,
279 unsigned num_scissors
,
280 const struct pipe_scissor_state
*scissor
)
282 struct fd_context
*ctx
= fd_context(pctx
);
284 ctx
->scissor
= *scissor
;
285 ctx
->dirty
|= FD_DIRTY_SCISSOR
;
289 fd_set_viewport_states(struct pipe_context
*pctx
,
291 unsigned num_viewports
,
292 const struct pipe_viewport_state
*viewport
)
294 struct fd_context
*ctx
= fd_context(pctx
);
295 struct pipe_scissor_state
*scissor
= &ctx
->viewport_scissor
;
296 float minx
, miny
, maxx
, maxy
;
298 ctx
->viewport
= *viewport
;
300 /* see si_get_scissor_from_viewport(): */
302 /* Convert (-1, -1) and (1, 1) from clip space into window space. */
303 minx
= -viewport
->scale
[0] + viewport
->translate
[0];
304 miny
= -viewport
->scale
[1] + viewport
->translate
[1];
305 maxx
= viewport
->scale
[0] + viewport
->translate
[0];
306 maxy
= viewport
->scale
[1] + viewport
->translate
[1];
308 /* Handle inverted viewports. */
316 debug_assert(miny
>= 0);
317 debug_assert(maxy
>= 0);
319 /* Convert to integer and round up the max bounds. */
320 scissor
->minx
= minx
;
321 scissor
->miny
= miny
;
322 scissor
->maxx
= ceilf(maxx
);
323 scissor
->maxy
= ceilf(maxy
);
325 ctx
->dirty
|= FD_DIRTY_VIEWPORT
;
329 fd_set_vertex_buffers(struct pipe_context
*pctx
,
330 unsigned start_slot
, unsigned count
,
331 const struct pipe_vertex_buffer
*vb
)
333 struct fd_context
*ctx
= fd_context(pctx
);
334 struct fd_vertexbuf_stateobj
*so
= &ctx
->vtx
.vertexbuf
;
337 /* on a2xx, pitch is encoded in the vtx fetch instruction, so
338 * we need to mark VTXSTATE as dirty as well to trigger patching
339 * and re-emitting the vtx shader:
341 if (ctx
->screen
->gpu_id
< 300) {
342 for (i
= 0; i
< count
; i
++) {
343 bool new_enabled
= vb
&& vb
[i
].buffer
.resource
;
344 bool old_enabled
= so
->vb
[i
].buffer
.resource
!= NULL
;
345 uint32_t new_stride
= vb
? vb
[i
].stride
: 0;
346 uint32_t old_stride
= so
->vb
[i
].stride
;
347 if ((new_enabled
!= old_enabled
) || (new_stride
!= old_stride
)) {
348 ctx
->dirty
|= FD_DIRTY_VTXSTATE
;
354 util_set_vertex_buffers_mask(so
->vb
, &so
->enabled_mask
, vb
, start_slot
, count
);
355 so
->count
= util_last_bit(so
->enabled_mask
);
357 ctx
->dirty
|= FD_DIRTY_VTXBUF
;
361 fd_blend_state_bind(struct pipe_context
*pctx
, void *hwcso
)
363 struct fd_context
*ctx
= fd_context(pctx
);
364 struct pipe_blend_state
*cso
= hwcso
;
365 bool old_is_dual
= ctx
->blend
?
366 ctx
->blend
->rt
[0].blend_enable
&& util_blend_state_is_dual(ctx
->blend
, 0) :
368 bool new_is_dual
= cso
?
369 cso
->rt
[0].blend_enable
&& util_blend_state_is_dual(cso
, 0) :
372 ctx
->dirty
|= FD_DIRTY_BLEND
;
373 if (old_is_dual
!= new_is_dual
)
374 ctx
->dirty
|= FD_DIRTY_BLEND_DUAL
;
378 fd_blend_state_delete(struct pipe_context
*pctx
, void *hwcso
)
384 fd_rasterizer_state_bind(struct pipe_context
*pctx
, void *hwcso
)
386 struct fd_context
*ctx
= fd_context(pctx
);
387 struct pipe_scissor_state
*old_scissor
= fd_context_get_scissor(ctx
);
389 ctx
->rasterizer
= hwcso
;
390 ctx
->dirty
|= FD_DIRTY_RASTERIZER
;
392 /* if scissor enable bit changed we need to mark scissor
393 * state as dirty as well:
394 * NOTE: we can do a shallow compare, since we only care
395 * if it changed to/from &ctx->disable_scissor
397 if (old_scissor
!= fd_context_get_scissor(ctx
))
398 ctx
->dirty
|= FD_DIRTY_SCISSOR
;
402 fd_rasterizer_state_delete(struct pipe_context
*pctx
, void *hwcso
)
408 fd_zsa_state_bind(struct pipe_context
*pctx
, void *hwcso
)
410 struct fd_context
*ctx
= fd_context(pctx
);
412 ctx
->dirty
|= FD_DIRTY_ZSA
;
416 fd_zsa_state_delete(struct pipe_context
*pctx
, void *hwcso
)
422 fd_vertex_state_create(struct pipe_context
*pctx
, unsigned num_elements
,
423 const struct pipe_vertex_element
*elements
)
425 struct fd_vertex_stateobj
*so
= CALLOC_STRUCT(fd_vertex_stateobj
);
430 memcpy(so
->pipe
, elements
, sizeof(*elements
) * num_elements
);
431 so
->num_elements
= num_elements
;
437 fd_vertex_state_delete(struct pipe_context
*pctx
, void *hwcso
)
443 fd_vertex_state_bind(struct pipe_context
*pctx
, void *hwcso
)
445 struct fd_context
*ctx
= fd_context(pctx
);
446 ctx
->vtx
.vtx
= hwcso
;
447 ctx
->dirty
|= FD_DIRTY_VTXSTATE
;
450 static struct pipe_stream_output_target
*
451 fd_create_stream_output_target(struct pipe_context
*pctx
,
452 struct pipe_resource
*prsc
, unsigned buffer_offset
,
453 unsigned buffer_size
)
455 struct pipe_stream_output_target
*target
;
456 struct fd_resource
*rsc
= fd_resource(prsc
);
458 target
= CALLOC_STRUCT(pipe_stream_output_target
);
462 pipe_reference_init(&target
->reference
, 1);
463 pipe_resource_reference(&target
->buffer
, prsc
);
465 target
->context
= pctx
;
466 target
->buffer_offset
= buffer_offset
;
467 target
->buffer_size
= buffer_size
;
469 assert(rsc
->base
.target
== PIPE_BUFFER
);
470 util_range_add(&rsc
->base
, &rsc
->valid_buffer_range
,
471 buffer_offset
, buffer_offset
+ buffer_size
);
477 fd_stream_output_target_destroy(struct pipe_context
*pctx
,
478 struct pipe_stream_output_target
*target
)
480 pipe_resource_reference(&target
->buffer
, NULL
);
485 fd_set_stream_output_targets(struct pipe_context
*pctx
,
486 unsigned num_targets
, struct pipe_stream_output_target
**targets
,
487 const unsigned *offsets
)
489 struct fd_context
*ctx
= fd_context(pctx
);
490 struct fd_streamout_stateobj
*so
= &ctx
->streamout
;
493 debug_assert(num_targets
<= ARRAY_SIZE(so
->targets
));
495 for (i
= 0; i
< num_targets
; i
++) {
496 boolean changed
= targets
[i
] != so
->targets
[i
];
497 boolean reset
= (offsets
[i
] != (unsigned)-1);
499 so
->reset
|= (reset
<< i
);
501 if (!changed
&& !reset
)
505 so
->offsets
[i
] = offsets
[i
];
507 pipe_so_target_reference(&so
->targets
[i
], targets
[i
]);
510 for (; i
< so
->num_targets
; i
++) {
511 pipe_so_target_reference(&so
->targets
[i
], NULL
);
514 so
->num_targets
= num_targets
;
516 ctx
->dirty
|= FD_DIRTY_STREAMOUT
;
520 fd_bind_compute_state(struct pipe_context
*pctx
, void *state
)
522 struct fd_context
*ctx
= fd_context(pctx
);
523 ctx
->compute
= state
;
524 ctx
->dirty_shader
[PIPE_SHADER_COMPUTE
] |= FD_DIRTY_SHADER_PROG
;
528 fd_set_compute_resources(struct pipe_context
*pctx
,
529 unsigned start
, unsigned count
, struct pipe_surface
**prscs
)
534 /* used by clover to bind global objects, returning the bo address
538 fd_set_global_binding(struct pipe_context
*pctx
,
539 unsigned first
, unsigned count
, struct pipe_resource
**prscs
,
542 struct fd_context
*ctx
= fd_context(pctx
);
543 struct fd_global_bindings_stateobj
*so
= &ctx
->global_bindings
;
547 for (unsigned i
= 0; i
< count
; i
++) {
548 unsigned n
= i
+ first
;
552 pipe_resource_reference(&so
->buf
[n
], prscs
[i
]);
555 struct fd_resource
*rsc
= fd_resource(so
->buf
[n
]);
556 uint64_t iova
= fd_bo_get_iova(rsc
->bo
);
557 // TODO need to scream if iova > 32b or fix gallium API..
562 so
->enabled_mask
|= BIT(n
);
564 so
->enabled_mask
&= ~BIT(n
);
567 mask
= (BIT(count
) - 1) << first
;
569 for (unsigned i
= 0; i
< count
; i
++) {
570 unsigned n
= i
+ first
;
572 struct fd_resource
*rsc
= fd_resource(so
->buf
[n
]);
573 fd_bo_put_iova(rsc
->bo
);
575 pipe_resource_reference(&so
->buf
[n
], NULL
);
578 so
->enabled_mask
&= ~mask
;
584 fd_state_init(struct pipe_context
*pctx
)
586 pctx
->set_blend_color
= fd_set_blend_color
;
587 pctx
->set_stencil_ref
= fd_set_stencil_ref
;
588 pctx
->set_clip_state
= fd_set_clip_state
;
589 pctx
->set_sample_mask
= fd_set_sample_mask
;
590 pctx
->set_min_samples
= fd_set_min_samples
;
591 pctx
->set_constant_buffer
= fd_set_constant_buffer
;
592 pctx
->set_shader_buffers
= fd_set_shader_buffers
;
593 pctx
->set_shader_images
= fd_set_shader_images
;
594 pctx
->set_framebuffer_state
= fd_set_framebuffer_state
;
595 pctx
->set_polygon_stipple
= fd_set_polygon_stipple
;
596 pctx
->set_scissor_states
= fd_set_scissor_states
;
597 pctx
->set_viewport_states
= fd_set_viewport_states
;
599 pctx
->set_vertex_buffers
= fd_set_vertex_buffers
;
601 pctx
->bind_blend_state
= fd_blend_state_bind
;
602 pctx
->delete_blend_state
= fd_blend_state_delete
;
604 pctx
->bind_rasterizer_state
= fd_rasterizer_state_bind
;
605 pctx
->delete_rasterizer_state
= fd_rasterizer_state_delete
;
607 pctx
->bind_depth_stencil_alpha_state
= fd_zsa_state_bind
;
608 pctx
->delete_depth_stencil_alpha_state
= fd_zsa_state_delete
;
610 pctx
->create_vertex_elements_state
= fd_vertex_state_create
;
611 pctx
->delete_vertex_elements_state
= fd_vertex_state_delete
;
612 pctx
->bind_vertex_elements_state
= fd_vertex_state_bind
;
614 pctx
->create_stream_output_target
= fd_create_stream_output_target
;
615 pctx
->stream_output_target_destroy
= fd_stream_output_target_destroy
;
616 pctx
->set_stream_output_targets
= fd_set_stream_output_targets
;
618 if (has_compute(fd_screen(pctx
->screen
))) {
619 pctx
->bind_compute_state
= fd_bind_compute_state
;
620 pctx
->set_compute_resources
= fd_set_compute_resources
;
621 pctx
->set_global_binding
= fd_set_global_binding
;