vc4: Move the render job state into a separate structure.
[mesa.git] / src / gallium / drivers / vc4 / vc4_state.c
1 /*
2 * Copyright © 2014 Broadcom
3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "pipe/p_state.h"
26 #include "util/u_inlines.h"
27 #include "util/u_math.h"
28 #include "util/u_memory.h"
29 #include "util/u_helpers.h"
30
31 #include "vc4_context.h"
32
33 static void *
34 vc4_generic_cso_state_create(const void *src, uint32_t size)
35 {
36 void *dst = calloc(1, size);
37 if (!dst)
38 return NULL;
39 memcpy(dst, src, size);
40 return dst;
41 }
42
43 static void
44 vc4_generic_cso_state_delete(struct pipe_context *pctx, void *hwcso)
45 {
46 free(hwcso);
47 }
48
49 static void
50 vc4_set_blend_color(struct pipe_context *pctx,
51 const struct pipe_blend_color *blend_color)
52 {
53 struct vc4_context *vc4 = vc4_context(pctx);
54 vc4->blend_color.f = *blend_color;
55 for (int i = 0; i < 4; i++)
56 vc4->blend_color.ub[i] = float_to_ubyte(blend_color->color[i]);
57 vc4->dirty |= VC4_DIRTY_BLEND_COLOR;
58 }
59
60 static void
61 vc4_set_stencil_ref(struct pipe_context *pctx,
62 const struct pipe_stencil_ref *stencil_ref)
63 {
64 struct vc4_context *vc4 = vc4_context(pctx);
65 vc4->stencil_ref =* stencil_ref;
66 vc4->dirty |= VC4_DIRTY_STENCIL_REF;
67 }
68
69 static void
70 vc4_set_clip_state(struct pipe_context *pctx,
71 const struct pipe_clip_state *clip)
72 {
73 struct vc4_context *vc4 = vc4_context(pctx);
74 vc4->clip = *clip;
75 vc4->dirty |= VC4_DIRTY_CLIP;
76 }
77
78 static void
79 vc4_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
80 {
81 struct vc4_context *vc4 = vc4_context(pctx);
82 vc4->sample_mask = sample_mask & ((1 << VC4_MAX_SAMPLES) - 1);
83 vc4->dirty |= VC4_DIRTY_SAMPLE_MASK;
84 }
85
86 static uint16_t
87 float_to_187_half(float f)
88 {
89 return fui(f) >> 16;
90 }
91
92 static void *
93 vc4_create_rasterizer_state(struct pipe_context *pctx,
94 const struct pipe_rasterizer_state *cso)
95 {
96 struct vc4_rasterizer_state *so;
97
98 so = CALLOC_STRUCT(vc4_rasterizer_state);
99 if (!so)
100 return NULL;
101
102 so->base = *cso;
103
104 if (!(cso->cull_face & PIPE_FACE_FRONT))
105 so->config_bits[0] |= VC4_CONFIG_BITS_ENABLE_PRIM_FRONT;
106 if (!(cso->cull_face & PIPE_FACE_BACK))
107 so->config_bits[0] |= VC4_CONFIG_BITS_ENABLE_PRIM_BACK;
108
109 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
110 * BCM21553).
111 */
112 so->point_size = MAX2(cso->point_size, .125f);
113
114 if (cso->front_ccw)
115 so->config_bits[0] |= VC4_CONFIG_BITS_CW_PRIMITIVES;
116
117 if (cso->offset_tri) {
118 so->config_bits[0] |= VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET;
119
120 so->offset_units = float_to_187_half(cso->offset_units);
121 so->offset_factor = float_to_187_half(cso->offset_scale);
122 }
123
124 if (cso->multisample)
125 so->config_bits[0] |= VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X;
126
127 return so;
128 }
129
130 /* Blend state is baked into shaders. */
131 static void *
132 vc4_create_blend_state(struct pipe_context *pctx,
133 const struct pipe_blend_state *cso)
134 {
135 return vc4_generic_cso_state_create(cso, sizeof(*cso));
136 }
137
138 /**
139 * The TLB_STENCIL_SETUP data has a little bitfield for common writemask
140 * values, so you don't have to do a separate writemask setup.
141 */
142 static uint8_t
143 tlb_stencil_setup_writemask(uint8_t mask)
144 {
145 switch (mask) {
146 case 0x1: return 0;
147 case 0x3: return 1;
148 case 0xf: return 2;
149 case 0xff: return 3;
150 default: return 0xff;
151 }
152 }
153
154 static uint32_t
155 tlb_stencil_setup_bits(const struct pipe_stencil_state *state,
156 uint8_t writemask_bits)
157 {
158 static const uint8_t op_map[] = {
159 [PIPE_STENCIL_OP_ZERO] = 0,
160 [PIPE_STENCIL_OP_KEEP] = 1,
161 [PIPE_STENCIL_OP_REPLACE] = 2,
162 [PIPE_STENCIL_OP_INCR] = 3,
163 [PIPE_STENCIL_OP_DECR] = 4,
164 [PIPE_STENCIL_OP_INVERT] = 5,
165 [PIPE_STENCIL_OP_INCR_WRAP] = 6,
166 [PIPE_STENCIL_OP_DECR_WRAP] = 7,
167 };
168 uint32_t bits = 0;
169
170 if (writemask_bits != 0xff)
171 bits |= writemask_bits << 28;
172 bits |= op_map[state->zfail_op] << 25;
173 bits |= op_map[state->zpass_op] << 22;
174 bits |= op_map[state->fail_op] << 19;
175 bits |= state->func << 16;
176 /* Ref is filled in at uniform upload time */
177 bits |= state->valuemask << 0;
178
179 return bits;
180 }
181
182 static void *
183 vc4_create_depth_stencil_alpha_state(struct pipe_context *pctx,
184 const struct pipe_depth_stencil_alpha_state *cso)
185 {
186 struct vc4_depth_stencil_alpha_state *so;
187
188 so = CALLOC_STRUCT(vc4_depth_stencil_alpha_state);
189 if (!so)
190 return NULL;
191
192 so->base = *cso;
193
194 /* We always keep the early Z state correct, since a later state using
195 * early Z may want it.
196 */
197 so->config_bits[2] |= VC4_CONFIG_BITS_EARLY_Z_UPDATE;
198
199 if (cso->depth.enabled) {
200 if (cso->depth.writemask) {
201 so->config_bits[1] |= VC4_CONFIG_BITS_Z_UPDATE;
202 }
203 so->config_bits[1] |= (cso->depth.func <<
204 VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT);
205
206 /* We only handle early Z in the < direction because otherwise
207 * we'd have to runtime guess which direction to set in the
208 * render config.
209 */
210 if ((cso->depth.func == PIPE_FUNC_LESS ||
211 cso->depth.func == PIPE_FUNC_LEQUAL) &&
212 (!cso->stencil[0].enabled ||
213 (cso->stencil[0].zfail_op == PIPE_STENCIL_OP_KEEP &&
214 (!cso->stencil[1].enabled ||
215 cso->stencil[1].zfail_op == PIPE_STENCIL_OP_KEEP)))) {
216 so->config_bits[2] |= VC4_CONFIG_BITS_EARLY_Z;
217 }
218 } else {
219 so->config_bits[1] |= (PIPE_FUNC_ALWAYS <<
220 VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT);
221 }
222
223 if (cso->stencil[0].enabled) {
224 const struct pipe_stencil_state *front = &cso->stencil[0];
225 const struct pipe_stencil_state *back = &cso->stencil[1];
226
227 uint8_t front_writemask_bits =
228 tlb_stencil_setup_writemask(front->writemask);
229 uint8_t back_writemask = front->writemask;
230 uint8_t back_writemask_bits = front_writemask_bits;
231
232 so->stencil_uniforms[0] =
233 tlb_stencil_setup_bits(front, front_writemask_bits);
234 if (back->enabled) {
235 back_writemask = back->writemask;
236 back_writemask_bits =
237 tlb_stencil_setup_writemask(back->writemask);
238
239 so->stencil_uniforms[0] |= (1 << 30);
240 so->stencil_uniforms[1] =
241 tlb_stencil_setup_bits(back, back_writemask_bits);
242 so->stencil_uniforms[1] |= (2 << 30);
243 } else {
244 so->stencil_uniforms[0] |= (3 << 30);
245 }
246
247 if (front_writemask_bits == 0xff ||
248 back_writemask_bits == 0xff) {
249 so->stencil_uniforms[2] = (front->writemask |
250 (back_writemask << 8));
251 }
252 }
253
254 return so;
255 }
256
257 static void
258 vc4_set_polygon_stipple(struct pipe_context *pctx,
259 const struct pipe_poly_stipple *stipple)
260 {
261 struct vc4_context *vc4 = vc4_context(pctx);
262 vc4->stipple = *stipple;
263 vc4->dirty |= VC4_DIRTY_STIPPLE;
264 }
265
266 static void
267 vc4_set_scissor_states(struct pipe_context *pctx,
268 unsigned start_slot,
269 unsigned num_scissors,
270 const struct pipe_scissor_state *scissor)
271 {
272 struct vc4_context *vc4 = vc4_context(pctx);
273
274 vc4->scissor = *scissor;
275 vc4->dirty |= VC4_DIRTY_SCISSOR;
276 }
277
278 static void
279 vc4_set_viewport_states(struct pipe_context *pctx,
280 unsigned start_slot,
281 unsigned num_viewports,
282 const struct pipe_viewport_state *viewport)
283 {
284 struct vc4_context *vc4 = vc4_context(pctx);
285 vc4->viewport = *viewport;
286 vc4->dirty |= VC4_DIRTY_VIEWPORT;
287 }
288
289 static void
290 vc4_set_vertex_buffers(struct pipe_context *pctx,
291 unsigned start_slot, unsigned count,
292 const struct pipe_vertex_buffer *vb)
293 {
294 struct vc4_context *vc4 = vc4_context(pctx);
295 struct vc4_vertexbuf_stateobj *so = &vc4->vertexbuf;
296
297 util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb,
298 start_slot, count);
299 so->count = util_last_bit(so->enabled_mask);
300
301 vc4->dirty |= VC4_DIRTY_VTXBUF;
302 }
303
304 static void
305 vc4_set_index_buffer(struct pipe_context *pctx,
306 const struct pipe_index_buffer *ib)
307 {
308 struct vc4_context *vc4 = vc4_context(pctx);
309
310 if (ib) {
311 pipe_resource_reference(&vc4->indexbuf.buffer, ib->buffer);
312 vc4->indexbuf.index_size = ib->index_size;
313 vc4->indexbuf.offset = ib->offset;
314 vc4->indexbuf.user_buffer = ib->user_buffer;
315 } else {
316 pipe_resource_reference(&vc4->indexbuf.buffer, NULL);
317 }
318
319 vc4->dirty |= VC4_DIRTY_INDEXBUF;
320 }
321
322 static void
323 vc4_blend_state_bind(struct pipe_context *pctx, void *hwcso)
324 {
325 struct vc4_context *vc4 = vc4_context(pctx);
326 vc4->blend = hwcso;
327 vc4->dirty |= VC4_DIRTY_BLEND;
328 }
329
330 static void
331 vc4_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
332 {
333 struct vc4_context *vc4 = vc4_context(pctx);
334 struct vc4_rasterizer_state *rast = hwcso;
335
336 if (vc4->rasterizer && rast &&
337 vc4->rasterizer->base.flatshade != rast->base.flatshade) {
338 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
339 }
340
341 vc4->rasterizer = hwcso;
342 vc4->dirty |= VC4_DIRTY_RASTERIZER;
343 }
344
345 static void
346 vc4_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
347 {
348 struct vc4_context *vc4 = vc4_context(pctx);
349 vc4->zsa = hwcso;
350 vc4->dirty |= VC4_DIRTY_ZSA;
351 }
352
353 static void *
354 vc4_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
355 const struct pipe_vertex_element *elements)
356 {
357 struct vc4_vertex_stateobj *so = CALLOC_STRUCT(vc4_vertex_stateobj);
358
359 if (!so)
360 return NULL;
361
362 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
363 so->num_elements = num_elements;
364
365 return so;
366 }
367
368 static void
369 vc4_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
370 {
371 struct vc4_context *vc4 = vc4_context(pctx);
372 vc4->vtx = hwcso;
373 vc4->dirty |= VC4_DIRTY_VTXSTATE;
374 }
375
376 static void
377 vc4_set_constant_buffer(struct pipe_context *pctx, uint shader, uint index,
378 const struct pipe_constant_buffer *cb)
379 {
380 struct vc4_context *vc4 = vc4_context(pctx);
381 struct vc4_constbuf_stateobj *so = &vc4->constbuf[shader];
382
383 assert(index == 0);
384
385 /* Note that the state tracker can unbind constant buffers by
386 * passing NULL here.
387 */
388 if (unlikely(!cb)) {
389 so->enabled_mask &= ~(1 << index);
390 so->dirty_mask &= ~(1 << index);
391 return;
392 }
393
394 assert(!cb->buffer);
395 so->cb[index].buffer_offset = cb->buffer_offset;
396 so->cb[index].buffer_size = cb->buffer_size;
397 so->cb[index].user_buffer = cb->user_buffer;
398
399 so->enabled_mask |= 1 << index;
400 so->dirty_mask |= 1 << index;
401 vc4->dirty |= VC4_DIRTY_CONSTBUF;
402 }
403
404 static void
405 vc4_set_framebuffer_state(struct pipe_context *pctx,
406 const struct pipe_framebuffer_state *framebuffer)
407 {
408 struct vc4_context *vc4 = vc4_context(pctx);
409 struct vc4_job *job = vc4->job;
410 struct pipe_framebuffer_state *cso = &vc4->framebuffer;
411 unsigned i;
412
413 vc4_flush(pctx);
414
415 for (i = 0; i < framebuffer->nr_cbufs; i++)
416 pipe_surface_reference(&cso->cbufs[i], framebuffer->cbufs[i]);
417 for (; i < vc4->framebuffer.nr_cbufs; i++)
418 pipe_surface_reference(&cso->cbufs[i], NULL);
419
420 cso->nr_cbufs = framebuffer->nr_cbufs;
421
422 pipe_surface_reference(&cso->zsbuf, framebuffer->zsbuf);
423
424 cso->width = framebuffer->width;
425 cso->height = framebuffer->height;
426
427 /* If we're binding to uninitialized buffers, no need to load their
428 * contents before drawing..
429 */
430 if (cso->cbufs[0]) {
431 struct vc4_resource *rsc =
432 vc4_resource(cso->cbufs[0]->texture);
433 if (!rsc->writes)
434 job->cleared |= PIPE_CLEAR_COLOR0;
435 }
436
437 if (cso->zsbuf) {
438 struct vc4_resource *rsc =
439 vc4_resource(cso->zsbuf->texture);
440 if (!rsc->writes)
441 job->cleared |= PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL;
442 }
443
444 /* Nonzero texture mipmap levels are laid out as if they were in
445 * power-of-two-sized spaces. The renderbuffer config infers its
446 * stride from the width parameter, so we need to configure our
447 * framebuffer. Note that if the z/color buffers were mismatched
448 * sizes, we wouldn't be able to do this.
449 */
450 if (cso->cbufs[0] && cso->cbufs[0]->u.tex.level) {
451 struct vc4_resource *rsc =
452 vc4_resource(cso->cbufs[0]->texture);
453 cso->width =
454 (rsc->slices[cso->cbufs[0]->u.tex.level].stride /
455 rsc->cpp);
456 } else if (cso->zsbuf && cso->zsbuf->u.tex.level){
457 struct vc4_resource *rsc =
458 vc4_resource(cso->zsbuf->texture);
459 cso->width =
460 (rsc->slices[cso->zsbuf->u.tex.level].stride /
461 rsc->cpp);
462 }
463
464 job->msaa = false;
465 if (cso->cbufs[0])
466 job->msaa = cso->cbufs[0]->texture->nr_samples > 1;
467 else if (cso->zsbuf)
468 job->msaa = cso->zsbuf->texture->nr_samples > 1;
469
470 if (job->msaa) {
471 job->tile_width = 32;
472 job->tile_height = 32;
473 } else {
474 job->tile_width = 64;
475 job->tile_height = 64;
476 }
477 job->draw_tiles_x = DIV_ROUND_UP(cso->width, job->tile_width);
478 job->draw_tiles_y = DIV_ROUND_UP(cso->height, job->tile_height);
479
480 vc4->dirty |= VC4_DIRTY_FRAMEBUFFER;
481 }
482
483 static struct vc4_texture_stateobj *
484 vc4_get_stage_tex(struct vc4_context *vc4, enum pipe_shader_type shader)
485 {
486 switch (shader) {
487 case PIPE_SHADER_FRAGMENT:
488 vc4->dirty |= VC4_DIRTY_FRAGTEX;
489 return &vc4->fragtex;
490 break;
491 case PIPE_SHADER_VERTEX:
492 vc4->dirty |= VC4_DIRTY_VERTTEX;
493 return &vc4->verttex;
494 break;
495 default:
496 fprintf(stderr, "Unknown shader target %d\n", shader);
497 abort();
498 }
499 }
500
501 static uint32_t translate_wrap(uint32_t p_wrap, bool using_nearest)
502 {
503 switch (p_wrap) {
504 case PIPE_TEX_WRAP_REPEAT:
505 return 0;
506 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
507 return 1;
508 case PIPE_TEX_WRAP_MIRROR_REPEAT:
509 return 2;
510 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
511 return 3;
512 case PIPE_TEX_WRAP_CLAMP:
513 return (using_nearest ? 1 : 3);
514 default:
515 fprintf(stderr, "Unknown wrap mode %d\n", p_wrap);
516 assert(!"not reached");
517 return 0;
518 }
519 }
520
521 static void *
522 vc4_create_sampler_state(struct pipe_context *pctx,
523 const struct pipe_sampler_state *cso)
524 {
525 static const uint8_t minfilter_map[6] = {
526 VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR,
527 VC4_TEX_P1_MINFILT_LIN_MIP_NEAR,
528 VC4_TEX_P1_MINFILT_NEAR_MIP_LIN,
529 VC4_TEX_P1_MINFILT_LIN_MIP_LIN,
530 VC4_TEX_P1_MINFILT_NEAREST,
531 VC4_TEX_P1_MINFILT_LINEAR,
532 };
533 static const uint32_t magfilter_map[] = {
534 [PIPE_TEX_FILTER_NEAREST] = VC4_TEX_P1_MAGFILT_NEAREST,
535 [PIPE_TEX_FILTER_LINEAR] = VC4_TEX_P1_MAGFILT_LINEAR,
536 };
537 bool either_nearest =
538 (cso->mag_img_filter == PIPE_TEX_MIPFILTER_NEAREST ||
539 cso->min_img_filter == PIPE_TEX_MIPFILTER_NEAREST);
540 struct vc4_sampler_state *so = CALLOC_STRUCT(vc4_sampler_state);
541
542 if (!so)
543 return NULL;
544
545 memcpy(so, cso, sizeof(*cso));
546
547 so->texture_p1 =
548 (VC4_SET_FIELD(magfilter_map[cso->mag_img_filter],
549 VC4_TEX_P1_MAGFILT) |
550 VC4_SET_FIELD(minfilter_map[cso->min_mip_filter * 2 +
551 cso->min_img_filter],
552 VC4_TEX_P1_MINFILT) |
553 VC4_SET_FIELD(translate_wrap(cso->wrap_s, either_nearest),
554 VC4_TEX_P1_WRAP_S) |
555 VC4_SET_FIELD(translate_wrap(cso->wrap_t, either_nearest),
556 VC4_TEX_P1_WRAP_T));
557
558 return so;
559 }
560
561 static void
562 vc4_sampler_states_bind(struct pipe_context *pctx,
563 enum pipe_shader_type shader, unsigned start,
564 unsigned nr, void **hwcso)
565 {
566 struct vc4_context *vc4 = vc4_context(pctx);
567 struct vc4_texture_stateobj *stage_tex = vc4_get_stage_tex(vc4, shader);
568
569 assert(start == 0);
570 unsigned i;
571 unsigned new_nr = 0;
572
573 for (i = 0; i < nr; i++) {
574 if (hwcso[i])
575 new_nr = i + 1;
576 stage_tex->samplers[i] = hwcso[i];
577 }
578
579 for (; i < stage_tex->num_samplers; i++) {
580 stage_tex->samplers[i] = NULL;
581 }
582
583 stage_tex->num_samplers = new_nr;
584 }
585
586 static struct pipe_sampler_view *
587 vc4_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *prsc,
588 const struct pipe_sampler_view *cso)
589 {
590 struct vc4_sampler_view *so = CALLOC_STRUCT(vc4_sampler_view);
591 struct vc4_resource *rsc = vc4_resource(prsc);
592
593 if (!so)
594 return NULL;
595
596 so->base = *cso;
597
598 pipe_reference(NULL, &prsc->reference);
599
600 /* There is no hardware level clamping, and the start address of a
601 * texture may be misaligned, so in that case we have to copy to a
602 * temporary.
603 *
604 * Also, Raspberry Pi doesn't support sampling from raster textures,
605 * so we also have to copy to a temporary then.
606 */
607 if ((cso->u.tex.first_level &&
608 (cso->u.tex.first_level != cso->u.tex.last_level)) ||
609 rsc->vc4_format == VC4_TEXTURE_TYPE_RGBA32R) {
610 struct vc4_resource *shadow_parent = vc4_resource(prsc);
611 struct pipe_resource tmpl = shadow_parent->base.b;
612 struct vc4_resource *clone;
613
614 tmpl.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
615 tmpl.width0 = u_minify(tmpl.width0, cso->u.tex.first_level);
616 tmpl.height0 = u_minify(tmpl.height0, cso->u.tex.first_level);
617 tmpl.last_level = cso->u.tex.last_level - cso->u.tex.first_level;
618
619 prsc = vc4_resource_create(pctx->screen, &tmpl);
620 if (!prsc) {
621 free(so);
622 return NULL;
623 }
624 rsc = vc4_resource(prsc);
625 clone = vc4_resource(prsc);
626 clone->shadow_parent = &shadow_parent->base.b;
627 /* Flag it as needing update of the contents from the parent. */
628 clone->writes = shadow_parent->writes - 1;
629
630 assert(clone->vc4_format != VC4_TEXTURE_TYPE_RGBA32R);
631 } else if (cso->u.tex.first_level) {
632 so->force_first_level = true;
633 }
634 so->base.texture = prsc;
635 so->base.reference.count = 1;
636 so->base.context = pctx;
637
638 so->texture_p0 =
639 (VC4_SET_FIELD(rsc->slices[0].offset >> 12, VC4_TEX_P0_OFFSET) |
640 VC4_SET_FIELD(rsc->vc4_format & 15, VC4_TEX_P0_TYPE) |
641 VC4_SET_FIELD(so->force_first_level ?
642 cso->u.tex.last_level :
643 cso->u.tex.last_level -
644 cso->u.tex.first_level, VC4_TEX_P0_MIPLVLS) |
645 VC4_SET_FIELD(cso->target == PIPE_TEXTURE_CUBE,
646 VC4_TEX_P0_CMMODE));
647 so->texture_p1 =
648 (VC4_SET_FIELD(rsc->vc4_format >> 4, VC4_TEX_P1_TYPE4) |
649 VC4_SET_FIELD(prsc->height0 & 2047, VC4_TEX_P1_HEIGHT) |
650 VC4_SET_FIELD(prsc->width0 & 2047, VC4_TEX_P1_WIDTH));
651
652 return &so->base;
653 }
654
655 static void
656 vc4_sampler_view_destroy(struct pipe_context *pctx,
657 struct pipe_sampler_view *view)
658 {
659 pipe_resource_reference(&view->texture, NULL);
660 free(view);
661 }
662
663 static void
664 vc4_set_sampler_views(struct pipe_context *pctx,
665 enum pipe_shader_type shader,
666 unsigned start, unsigned nr,
667 struct pipe_sampler_view **views)
668 {
669 struct vc4_context *vc4 = vc4_context(pctx);
670 struct vc4_texture_stateobj *stage_tex = vc4_get_stage_tex(vc4, shader);
671 unsigned i;
672 unsigned new_nr = 0;
673
674 assert(start == 0);
675
676 for (i = 0; i < nr; i++) {
677 if (views[i])
678 new_nr = i + 1;
679 pipe_sampler_view_reference(&stage_tex->textures[i], views[i]);
680 }
681
682 for (; i < stage_tex->num_textures; i++) {
683 pipe_sampler_view_reference(&stage_tex->textures[i], NULL);
684 }
685
686 stage_tex->num_textures = new_nr;
687 }
688
689 void
690 vc4_state_init(struct pipe_context *pctx)
691 {
692 pctx->set_blend_color = vc4_set_blend_color;
693 pctx->set_stencil_ref = vc4_set_stencil_ref;
694 pctx->set_clip_state = vc4_set_clip_state;
695 pctx->set_sample_mask = vc4_set_sample_mask;
696 pctx->set_constant_buffer = vc4_set_constant_buffer;
697 pctx->set_framebuffer_state = vc4_set_framebuffer_state;
698 pctx->set_polygon_stipple = vc4_set_polygon_stipple;
699 pctx->set_scissor_states = vc4_set_scissor_states;
700 pctx->set_viewport_states = vc4_set_viewport_states;
701
702 pctx->set_vertex_buffers = vc4_set_vertex_buffers;
703 pctx->set_index_buffer = vc4_set_index_buffer;
704
705 pctx->create_blend_state = vc4_create_blend_state;
706 pctx->bind_blend_state = vc4_blend_state_bind;
707 pctx->delete_blend_state = vc4_generic_cso_state_delete;
708
709 pctx->create_rasterizer_state = vc4_create_rasterizer_state;
710 pctx->bind_rasterizer_state = vc4_rasterizer_state_bind;
711 pctx->delete_rasterizer_state = vc4_generic_cso_state_delete;
712
713 pctx->create_depth_stencil_alpha_state = vc4_create_depth_stencil_alpha_state;
714 pctx->bind_depth_stencil_alpha_state = vc4_zsa_state_bind;
715 pctx->delete_depth_stencil_alpha_state = vc4_generic_cso_state_delete;
716
717 pctx->create_vertex_elements_state = vc4_vertex_state_create;
718 pctx->delete_vertex_elements_state = vc4_generic_cso_state_delete;
719 pctx->bind_vertex_elements_state = vc4_vertex_state_bind;
720
721 pctx->create_sampler_state = vc4_create_sampler_state;
722 pctx->delete_sampler_state = vc4_generic_cso_state_delete;
723 pctx->bind_sampler_states = vc4_sampler_states_bind;
724
725 pctx->create_sampler_view = vc4_create_sampler_view;
726 pctx->sampler_view_destroy = vc4_sampler_view_destroy;
727 pctx->set_sampler_views = vc4_set_sampler_views;
728 }