mesa: Remove set but not used gl_client_array::Stride.
[mesa.git] / src / gallium / drivers / vc4 / vc4_state.c
1 /*
2 * Copyright © 2014 Broadcom
3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "pipe/p_state.h"
26 #include "util/u_inlines.h"
27 #include "util/u_math.h"
28 #include "util/u_memory.h"
29 #include "util/u_helpers.h"
30
31 #include "vc4_context.h"
32
33 static void *
34 vc4_generic_cso_state_create(const void *src, uint32_t size)
35 {
36 void *dst = calloc(1, size);
37 if (!dst)
38 return NULL;
39 memcpy(dst, src, size);
40 return dst;
41 }
42
43 static void
44 vc4_generic_cso_state_delete(struct pipe_context *pctx, void *hwcso)
45 {
46 free(hwcso);
47 }
48
49 static void
50 vc4_set_blend_color(struct pipe_context *pctx,
51 const struct pipe_blend_color *blend_color)
52 {
53 struct vc4_context *vc4 = vc4_context(pctx);
54 vc4->blend_color.f = *blend_color;
55 for (int i = 0; i < 4; i++)
56 vc4->blend_color.ub[i] = float_to_ubyte(blend_color->color[i]);
57 vc4->dirty |= VC4_DIRTY_BLEND_COLOR;
58 }
59
60 static void
61 vc4_set_stencil_ref(struct pipe_context *pctx,
62 const struct pipe_stencil_ref *stencil_ref)
63 {
64 struct vc4_context *vc4 = vc4_context(pctx);
65 vc4->stencil_ref =* stencil_ref;
66 vc4->dirty |= VC4_DIRTY_STENCIL_REF;
67 }
68
69 static void
70 vc4_set_clip_state(struct pipe_context *pctx,
71 const struct pipe_clip_state *clip)
72 {
73 struct vc4_context *vc4 = vc4_context(pctx);
74 vc4->clip = *clip;
75 vc4->dirty |= VC4_DIRTY_CLIP;
76 }
77
78 static void
79 vc4_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
80 {
81 struct vc4_context *vc4 = vc4_context(pctx);
82 vc4->sample_mask = sample_mask & ((1 << VC4_MAX_SAMPLES) - 1);
83 vc4->dirty |= VC4_DIRTY_SAMPLE_MASK;
84 }
85
86 static uint16_t
87 float_to_187_half(float f)
88 {
89 return fui(f) >> 16;
90 }
91
92 static void *
93 vc4_create_rasterizer_state(struct pipe_context *pctx,
94 const struct pipe_rasterizer_state *cso)
95 {
96 struct vc4_rasterizer_state *so;
97
98 so = CALLOC_STRUCT(vc4_rasterizer_state);
99 if (!so)
100 return NULL;
101
102 so->base = *cso;
103
104 if (!(cso->cull_face & PIPE_FACE_FRONT))
105 so->config_bits[0] |= VC4_CONFIG_BITS_ENABLE_PRIM_FRONT;
106 if (!(cso->cull_face & PIPE_FACE_BACK))
107 so->config_bits[0] |= VC4_CONFIG_BITS_ENABLE_PRIM_BACK;
108
109 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
110 * BCM21553).
111 */
112 so->point_size = MAX2(cso->point_size, .125f);
113
114 if (cso->front_ccw)
115 so->config_bits[0] |= VC4_CONFIG_BITS_CW_PRIMITIVES;
116
117 if (cso->offset_tri) {
118 so->config_bits[0] |= VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET;
119
120 so->offset_units = float_to_187_half(cso->offset_units);
121 so->offset_factor = float_to_187_half(cso->offset_scale);
122 }
123
124 if (cso->multisample)
125 so->config_bits[0] |= VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X;
126
127 return so;
128 }
129
130 /* Blend state is baked into shaders. */
131 static void *
132 vc4_create_blend_state(struct pipe_context *pctx,
133 const struct pipe_blend_state *cso)
134 {
135 return vc4_generic_cso_state_create(cso, sizeof(*cso));
136 }
137
138 /**
139 * The TLB_STENCIL_SETUP data has a little bitfield for common writemask
140 * values, so you don't have to do a separate writemask setup.
141 */
142 static uint8_t
143 tlb_stencil_setup_writemask(uint8_t mask)
144 {
145 switch (mask) {
146 case 0x1: return 0;
147 case 0x3: return 1;
148 case 0xf: return 2;
149 case 0xff: return 3;
150 default: return 0xff;
151 }
152 }
153
154 static uint32_t
155 tlb_stencil_setup_bits(const struct pipe_stencil_state *state,
156 uint8_t writemask_bits)
157 {
158 static const uint8_t op_map[] = {
159 [PIPE_STENCIL_OP_ZERO] = 0,
160 [PIPE_STENCIL_OP_KEEP] = 1,
161 [PIPE_STENCIL_OP_REPLACE] = 2,
162 [PIPE_STENCIL_OP_INCR] = 3,
163 [PIPE_STENCIL_OP_DECR] = 4,
164 [PIPE_STENCIL_OP_INVERT] = 5,
165 [PIPE_STENCIL_OP_INCR_WRAP] = 6,
166 [PIPE_STENCIL_OP_DECR_WRAP] = 7,
167 };
168 uint32_t bits = 0;
169
170 if (writemask_bits != 0xff)
171 bits |= writemask_bits << 28;
172 bits |= op_map[state->zfail_op] << 25;
173 bits |= op_map[state->zpass_op] << 22;
174 bits |= op_map[state->fail_op] << 19;
175 bits |= state->func << 16;
176 /* Ref is filled in at uniform upload time */
177 bits |= state->valuemask << 0;
178
179 return bits;
180 }
181
182 static void *
183 vc4_create_depth_stencil_alpha_state(struct pipe_context *pctx,
184 const struct pipe_depth_stencil_alpha_state *cso)
185 {
186 struct vc4_depth_stencil_alpha_state *so;
187
188 so = CALLOC_STRUCT(vc4_depth_stencil_alpha_state);
189 if (!so)
190 return NULL;
191
192 so->base = *cso;
193
194 /* We always keep the early Z state correct, since a later state using
195 * early Z may want it.
196 */
197 so->config_bits[2] |= VC4_CONFIG_BITS_EARLY_Z_UPDATE;
198
199 if (cso->depth.enabled) {
200 if (cso->depth.writemask) {
201 so->config_bits[1] |= VC4_CONFIG_BITS_Z_UPDATE;
202 }
203 so->config_bits[1] |= (cso->depth.func <<
204 VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT);
205
206 /* We only handle early Z in the < direction because otherwise
207 * we'd have to runtime guess which direction to set in the
208 * render config.
209 */
210 if ((cso->depth.func == PIPE_FUNC_LESS ||
211 cso->depth.func == PIPE_FUNC_LEQUAL) &&
212 (!cso->stencil[0].enabled ||
213 (cso->stencil[0].zfail_op == PIPE_STENCIL_OP_KEEP &&
214 (!cso->stencil[1].enabled ||
215 cso->stencil[1].zfail_op == PIPE_STENCIL_OP_KEEP)))) {
216 so->config_bits[2] |= VC4_CONFIG_BITS_EARLY_Z;
217 }
218 } else {
219 so->config_bits[1] |= (PIPE_FUNC_ALWAYS <<
220 VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT);
221 }
222
223 if (cso->stencil[0].enabled) {
224 const struct pipe_stencil_state *front = &cso->stencil[0];
225 const struct pipe_stencil_state *back = &cso->stencil[1];
226
227 uint8_t front_writemask_bits =
228 tlb_stencil_setup_writemask(front->writemask);
229 uint8_t back_writemask = front->writemask;
230 uint8_t back_writemask_bits = front_writemask_bits;
231
232 so->stencil_uniforms[0] =
233 tlb_stencil_setup_bits(front, front_writemask_bits);
234 if (back->enabled) {
235 back_writemask = back->writemask;
236 back_writemask_bits =
237 tlb_stencil_setup_writemask(back->writemask);
238
239 so->stencil_uniforms[0] |= (1 << 30);
240 so->stencil_uniforms[1] =
241 tlb_stencil_setup_bits(back, back_writemask_bits);
242 so->stencil_uniforms[1] |= (2 << 30);
243 } else {
244 so->stencil_uniforms[0] |= (3 << 30);
245 }
246
247 if (front_writemask_bits == 0xff ||
248 back_writemask_bits == 0xff) {
249 so->stencil_uniforms[2] = (front->writemask |
250 (back_writemask << 8));
251 }
252 }
253
254 return so;
255 }
256
257 static void
258 vc4_set_polygon_stipple(struct pipe_context *pctx,
259 const struct pipe_poly_stipple *stipple)
260 {
261 struct vc4_context *vc4 = vc4_context(pctx);
262 vc4->stipple = *stipple;
263 vc4->dirty |= VC4_DIRTY_STIPPLE;
264 }
265
266 static void
267 vc4_set_scissor_states(struct pipe_context *pctx,
268 unsigned start_slot,
269 unsigned num_scissors,
270 const struct pipe_scissor_state *scissor)
271 {
272 struct vc4_context *vc4 = vc4_context(pctx);
273
274 vc4->scissor = *scissor;
275 vc4->dirty |= VC4_DIRTY_SCISSOR;
276 }
277
278 static void
279 vc4_set_viewport_states(struct pipe_context *pctx,
280 unsigned start_slot,
281 unsigned num_viewports,
282 const struct pipe_viewport_state *viewport)
283 {
284 struct vc4_context *vc4 = vc4_context(pctx);
285 vc4->viewport = *viewport;
286 vc4->dirty |= VC4_DIRTY_VIEWPORT;
287 }
288
289 static void
290 vc4_set_vertex_buffers(struct pipe_context *pctx,
291 unsigned start_slot, unsigned count,
292 const struct pipe_vertex_buffer *vb)
293 {
294 struct vc4_context *vc4 = vc4_context(pctx);
295 struct vc4_vertexbuf_stateobj *so = &vc4->vertexbuf;
296
297 util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb,
298 start_slot, count);
299 so->count = util_last_bit(so->enabled_mask);
300
301 vc4->dirty |= VC4_DIRTY_VTXBUF;
302 }
303
304 static void
305 vc4_set_index_buffer(struct pipe_context *pctx,
306 const struct pipe_index_buffer *ib)
307 {
308 struct vc4_context *vc4 = vc4_context(pctx);
309
310 if (ib) {
311 pipe_resource_reference(&vc4->indexbuf.buffer, ib->buffer);
312 vc4->indexbuf.index_size = ib->index_size;
313 vc4->indexbuf.offset = ib->offset;
314 vc4->indexbuf.user_buffer = ib->user_buffer;
315 } else {
316 pipe_resource_reference(&vc4->indexbuf.buffer, NULL);
317 }
318
319 vc4->dirty |= VC4_DIRTY_INDEXBUF;
320 }
321
322 static void
323 vc4_blend_state_bind(struct pipe_context *pctx, void *hwcso)
324 {
325 struct vc4_context *vc4 = vc4_context(pctx);
326 vc4->blend = hwcso;
327 vc4->dirty |= VC4_DIRTY_BLEND;
328 }
329
330 static void
331 vc4_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
332 {
333 struct vc4_context *vc4 = vc4_context(pctx);
334 struct vc4_rasterizer_state *rast = hwcso;
335
336 if (vc4->rasterizer && rast &&
337 vc4->rasterizer->base.flatshade != rast->base.flatshade) {
338 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
339 }
340
341 vc4->rasterizer = hwcso;
342 vc4->dirty |= VC4_DIRTY_RASTERIZER;
343 }
344
345 static void
346 vc4_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
347 {
348 struct vc4_context *vc4 = vc4_context(pctx);
349 vc4->zsa = hwcso;
350 vc4->dirty |= VC4_DIRTY_ZSA;
351 }
352
353 static void *
354 vc4_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
355 const struct pipe_vertex_element *elements)
356 {
357 struct vc4_vertex_stateobj *so = CALLOC_STRUCT(vc4_vertex_stateobj);
358
359 if (!so)
360 return NULL;
361
362 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
363 so->num_elements = num_elements;
364
365 return so;
366 }
367
368 static void
369 vc4_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
370 {
371 struct vc4_context *vc4 = vc4_context(pctx);
372 vc4->vtx = hwcso;
373 vc4->dirty |= VC4_DIRTY_VTXSTATE;
374 }
375
376 static void
377 vc4_set_constant_buffer(struct pipe_context *pctx, uint shader, uint index,
378 const struct pipe_constant_buffer *cb)
379 {
380 struct vc4_context *vc4 = vc4_context(pctx);
381 struct vc4_constbuf_stateobj *so = &vc4->constbuf[shader];
382
383 assert(index == 0);
384
385 /* Note that the state tracker can unbind constant buffers by
386 * passing NULL here.
387 */
388 if (unlikely(!cb)) {
389 so->enabled_mask &= ~(1 << index);
390 so->dirty_mask &= ~(1 << index);
391 return;
392 }
393
394 assert(!cb->buffer);
395 so->cb[index].buffer_offset = cb->buffer_offset;
396 so->cb[index].buffer_size = cb->buffer_size;
397 so->cb[index].user_buffer = cb->user_buffer;
398
399 so->enabled_mask |= 1 << index;
400 so->dirty_mask |= 1 << index;
401 vc4->dirty |= VC4_DIRTY_CONSTBUF;
402 }
403
404 static void
405 vc4_set_framebuffer_state(struct pipe_context *pctx,
406 const struct pipe_framebuffer_state *framebuffer)
407 {
408 struct vc4_context *vc4 = vc4_context(pctx);
409 struct pipe_framebuffer_state *cso = &vc4->framebuffer;
410 unsigned i;
411
412 vc4_flush(pctx);
413
414 for (i = 0; i < framebuffer->nr_cbufs; i++)
415 pipe_surface_reference(&cso->cbufs[i], framebuffer->cbufs[i]);
416 for (; i < vc4->framebuffer.nr_cbufs; i++)
417 pipe_surface_reference(&cso->cbufs[i], NULL);
418
419 cso->nr_cbufs = framebuffer->nr_cbufs;
420
421 pipe_surface_reference(&cso->zsbuf, framebuffer->zsbuf);
422
423 cso->width = framebuffer->width;
424 cso->height = framebuffer->height;
425
426 /* If we're binding to uninitialized buffers, no need to load their
427 * contents before drawing..
428 */
429 if (cso->cbufs[0]) {
430 struct vc4_resource *rsc =
431 vc4_resource(cso->cbufs[0]->texture);
432 if (!rsc->writes)
433 vc4->cleared |= PIPE_CLEAR_COLOR0;
434 }
435
436 if (cso->zsbuf) {
437 struct vc4_resource *rsc =
438 vc4_resource(cso->zsbuf->texture);
439 if (!rsc->writes)
440 vc4->cleared |= PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL;
441 }
442
443 /* Nonzero texture mipmap levels are laid out as if they were in
444 * power-of-two-sized spaces. The renderbuffer config infers its
445 * stride from the width parameter, so we need to configure our
446 * framebuffer. Note that if the z/color buffers were mismatched
447 * sizes, we wouldn't be able to do this.
448 */
449 if (cso->cbufs[0] && cso->cbufs[0]->u.tex.level) {
450 struct vc4_resource *rsc =
451 vc4_resource(cso->cbufs[0]->texture);
452 cso->width =
453 (rsc->slices[cso->cbufs[0]->u.tex.level].stride /
454 rsc->cpp);
455 } else if (cso->zsbuf && cso->zsbuf->u.tex.level){
456 struct vc4_resource *rsc =
457 vc4_resource(cso->zsbuf->texture);
458 cso->width =
459 (rsc->slices[cso->zsbuf->u.tex.level].stride /
460 rsc->cpp);
461 }
462
463 vc4->msaa = false;
464 if (cso->cbufs[0])
465 vc4->msaa = cso->cbufs[0]->texture->nr_samples > 1;
466 else if (cso->zsbuf)
467 vc4->msaa = cso->zsbuf->texture->nr_samples > 1;
468
469 if (vc4->msaa) {
470 vc4->tile_width = 32;
471 vc4->tile_height = 32;
472 } else {
473 vc4->tile_width = 64;
474 vc4->tile_height = 64;
475 }
476 vc4->draw_tiles_x = DIV_ROUND_UP(cso->width, vc4->tile_width);
477 vc4->draw_tiles_y = DIV_ROUND_UP(cso->height, vc4->tile_height);
478
479 vc4->dirty |= VC4_DIRTY_FRAMEBUFFER;
480 }
481
482 static struct vc4_texture_stateobj *
483 vc4_get_stage_tex(struct vc4_context *vc4, unsigned shader)
484 {
485 switch (shader) {
486 case PIPE_SHADER_FRAGMENT:
487 vc4->dirty |= VC4_DIRTY_FRAGTEX;
488 return &vc4->fragtex;
489 break;
490 case PIPE_SHADER_VERTEX:
491 vc4->dirty |= VC4_DIRTY_VERTTEX;
492 return &vc4->verttex;
493 break;
494 default:
495 fprintf(stderr, "Unknown shader target %d\n", shader);
496 abort();
497 }
498 }
499
500 static uint32_t translate_wrap(uint32_t p_wrap, bool using_nearest)
501 {
502 switch (p_wrap) {
503 case PIPE_TEX_WRAP_REPEAT:
504 return 0;
505 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
506 return 1;
507 case PIPE_TEX_WRAP_MIRROR_REPEAT:
508 return 2;
509 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
510 return 3;
511 case PIPE_TEX_WRAP_CLAMP:
512 return (using_nearest ? 1 : 3);
513 default:
514 fprintf(stderr, "Unknown wrap mode %d\n", p_wrap);
515 assert(!"not reached");
516 return 0;
517 }
518 }
519
520 static void *
521 vc4_create_sampler_state(struct pipe_context *pctx,
522 const struct pipe_sampler_state *cso)
523 {
524 static const uint8_t minfilter_map[6] = {
525 VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR,
526 VC4_TEX_P1_MINFILT_LIN_MIP_NEAR,
527 VC4_TEX_P1_MINFILT_NEAR_MIP_LIN,
528 VC4_TEX_P1_MINFILT_LIN_MIP_LIN,
529 VC4_TEX_P1_MINFILT_NEAREST,
530 VC4_TEX_P1_MINFILT_LINEAR,
531 };
532 static const uint32_t magfilter_map[] = {
533 [PIPE_TEX_FILTER_NEAREST] = VC4_TEX_P1_MAGFILT_NEAREST,
534 [PIPE_TEX_FILTER_LINEAR] = VC4_TEX_P1_MAGFILT_LINEAR,
535 };
536 bool either_nearest =
537 (cso->mag_img_filter == PIPE_TEX_MIPFILTER_NEAREST ||
538 cso->min_img_filter == PIPE_TEX_MIPFILTER_NEAREST);
539 struct vc4_sampler_state *so = CALLOC_STRUCT(vc4_sampler_state);
540
541 if (!so)
542 return NULL;
543
544 memcpy(so, cso, sizeof(*cso));
545
546 so->texture_p1 =
547 (VC4_SET_FIELD(magfilter_map[cso->mag_img_filter],
548 VC4_TEX_P1_MAGFILT) |
549 VC4_SET_FIELD(minfilter_map[cso->min_mip_filter * 2 +
550 cso->min_img_filter],
551 VC4_TEX_P1_MINFILT) |
552 VC4_SET_FIELD(translate_wrap(cso->wrap_s, either_nearest),
553 VC4_TEX_P1_WRAP_S) |
554 VC4_SET_FIELD(translate_wrap(cso->wrap_t, either_nearest),
555 VC4_TEX_P1_WRAP_T));
556
557 return so;
558 }
559
560 static void
561 vc4_sampler_states_bind(struct pipe_context *pctx,
562 unsigned shader, unsigned start,
563 unsigned nr, void **hwcso)
564 {
565 struct vc4_context *vc4 = vc4_context(pctx);
566 struct vc4_texture_stateobj *stage_tex = vc4_get_stage_tex(vc4, shader);
567
568 assert(start == 0);
569 unsigned i;
570 unsigned new_nr = 0;
571
572 for (i = 0; i < nr; i++) {
573 if (hwcso[i])
574 new_nr = i + 1;
575 stage_tex->samplers[i] = hwcso[i];
576 }
577
578 for (; i < stage_tex->num_samplers; i++) {
579 stage_tex->samplers[i] = NULL;
580 }
581
582 stage_tex->num_samplers = new_nr;
583 }
584
585 static struct pipe_sampler_view *
586 vc4_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *prsc,
587 const struct pipe_sampler_view *cso)
588 {
589 struct vc4_sampler_view *so = malloc(sizeof(*so));
590 struct vc4_resource *rsc = vc4_resource(prsc);
591
592 if (!so)
593 return NULL;
594
595 so->base = *cso;
596
597 pipe_reference(NULL, &prsc->reference);
598
599 /* There is no hardware level clamping, and the start address of a
600 * texture may be misaligned, so in that case we have to copy to a
601 * temporary.
602 *
603 * Also, Raspberry Pi doesn't support sampling from raster textures,
604 * so we also have to copy to a temporary then.
605 */
606 if ((cso->u.tex.first_level &&
607 (cso->u.tex.first_level != cso->u.tex.last_level)) ||
608 rsc->vc4_format == VC4_TEXTURE_TYPE_RGBA32R) {
609 struct vc4_resource *shadow_parent = vc4_resource(prsc);
610 struct pipe_resource tmpl = shadow_parent->base.b;
611 struct vc4_resource *clone;
612
613 tmpl.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
614 tmpl.width0 = u_minify(tmpl.width0, cso->u.tex.first_level);
615 tmpl.height0 = u_minify(tmpl.height0, cso->u.tex.first_level);
616 tmpl.last_level = cso->u.tex.last_level - cso->u.tex.first_level;
617
618 prsc = vc4_resource_create(pctx->screen, &tmpl);
619 if (!prsc) {
620 free(so);
621 return NULL;
622 }
623 rsc = vc4_resource(prsc);
624 clone = vc4_resource(prsc);
625 clone->shadow_parent = &shadow_parent->base.b;
626 /* Flag it as needing update of the contents from the parent. */
627 clone->writes = shadow_parent->writes - 1;
628
629 assert(clone->vc4_format != VC4_TEXTURE_TYPE_RGBA32R);
630 } else if (cso->u.tex.first_level) {
631 so->force_first_level = true;
632 }
633 so->base.texture = prsc;
634 so->base.reference.count = 1;
635 so->base.context = pctx;
636
637 so->texture_p0 =
638 (VC4_SET_FIELD(rsc->slices[0].offset >> 12, VC4_TEX_P0_OFFSET) |
639 VC4_SET_FIELD(rsc->vc4_format & 15, VC4_TEX_P0_TYPE) |
640 VC4_SET_FIELD(so->force_first_level ?
641 cso->u.tex.last_level :
642 cso->u.tex.last_level -
643 cso->u.tex.first_level, VC4_TEX_P0_MIPLVLS) |
644 VC4_SET_FIELD(cso->target == PIPE_TEXTURE_CUBE,
645 VC4_TEX_P0_CMMODE));
646 so->texture_p1 =
647 (VC4_SET_FIELD(rsc->vc4_format >> 4, VC4_TEX_P1_TYPE4) |
648 VC4_SET_FIELD(prsc->height0 & 2047, VC4_TEX_P1_HEIGHT) |
649 VC4_SET_FIELD(prsc->width0 & 2047, VC4_TEX_P1_WIDTH));
650
651 return &so->base;
652 }
653
654 static void
655 vc4_sampler_view_destroy(struct pipe_context *pctx,
656 struct pipe_sampler_view *view)
657 {
658 pipe_resource_reference(&view->texture, NULL);
659 free(view);
660 }
661
662 static void
663 vc4_set_sampler_views(struct pipe_context *pctx, unsigned shader,
664 unsigned start, unsigned nr,
665 struct pipe_sampler_view **views)
666 {
667 struct vc4_context *vc4 = vc4_context(pctx);
668 struct vc4_texture_stateobj *stage_tex = vc4_get_stage_tex(vc4, shader);
669 unsigned i;
670 unsigned new_nr = 0;
671
672 assert(start == 0);
673
674 for (i = 0; i < nr; i++) {
675 if (views[i])
676 new_nr = i + 1;
677 pipe_sampler_view_reference(&stage_tex->textures[i], views[i]);
678 }
679
680 for (; i < stage_tex->num_textures; i++) {
681 pipe_sampler_view_reference(&stage_tex->textures[i], NULL);
682 }
683
684 stage_tex->num_textures = new_nr;
685 }
686
687 void
688 vc4_state_init(struct pipe_context *pctx)
689 {
690 pctx->set_blend_color = vc4_set_blend_color;
691 pctx->set_stencil_ref = vc4_set_stencil_ref;
692 pctx->set_clip_state = vc4_set_clip_state;
693 pctx->set_sample_mask = vc4_set_sample_mask;
694 pctx->set_constant_buffer = vc4_set_constant_buffer;
695 pctx->set_framebuffer_state = vc4_set_framebuffer_state;
696 pctx->set_polygon_stipple = vc4_set_polygon_stipple;
697 pctx->set_scissor_states = vc4_set_scissor_states;
698 pctx->set_viewport_states = vc4_set_viewport_states;
699
700 pctx->set_vertex_buffers = vc4_set_vertex_buffers;
701 pctx->set_index_buffer = vc4_set_index_buffer;
702
703 pctx->create_blend_state = vc4_create_blend_state;
704 pctx->bind_blend_state = vc4_blend_state_bind;
705 pctx->delete_blend_state = vc4_generic_cso_state_delete;
706
707 pctx->create_rasterizer_state = vc4_create_rasterizer_state;
708 pctx->bind_rasterizer_state = vc4_rasterizer_state_bind;
709 pctx->delete_rasterizer_state = vc4_generic_cso_state_delete;
710
711 pctx->create_depth_stencil_alpha_state = vc4_create_depth_stencil_alpha_state;
712 pctx->bind_depth_stencil_alpha_state = vc4_zsa_state_bind;
713 pctx->delete_depth_stencil_alpha_state = vc4_generic_cso_state_delete;
714
715 pctx->create_vertex_elements_state = vc4_vertex_state_create;
716 pctx->delete_vertex_elements_state = vc4_generic_cso_state_delete;
717 pctx->bind_vertex_elements_state = vc4_vertex_state_bind;
718
719 pctx->create_sampler_state = vc4_create_sampler_state;
720 pctx->delete_sampler_state = vc4_generic_cso_state_delete;
721 pctx->bind_sampler_states = vc4_sampler_states_bind;
722
723 pctx->create_sampler_view = vc4_create_sampler_view;
724 pctx->sampler_view_destroy = vc4_sampler_view_destroy;
725 pctx->set_sampler_views = vc4_set_sampler_views;
726 }