broadcom/vc4: Allow binding non-zero constant buffers.
[mesa.git] / src / gallium / drivers / vc4 / vc4_state.c
1 /*
2 * Copyright © 2014 Broadcom
3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "pipe/p_state.h"
26 #include "util/u_inlines.h"
27 #include "util/u_math.h"
28 #include "util/u_memory.h"
29 #include "util/u_helpers.h"
30
31 #include "vc4_context.h"
32
33 static void *
34 vc4_generic_cso_state_create(const void *src, uint32_t size)
35 {
36 void *dst = calloc(1, size);
37 if (!dst)
38 return NULL;
39 memcpy(dst, src, size);
40 return dst;
41 }
42
43 static void
44 vc4_generic_cso_state_delete(struct pipe_context *pctx, void *hwcso)
45 {
46 free(hwcso);
47 }
48
49 static void
50 vc4_set_blend_color(struct pipe_context *pctx,
51 const struct pipe_blend_color *blend_color)
52 {
53 struct vc4_context *vc4 = vc4_context(pctx);
54 vc4->blend_color.f = *blend_color;
55 for (int i = 0; i < 4; i++)
56 vc4->blend_color.ub[i] = float_to_ubyte(blend_color->color[i]);
57 vc4->dirty |= VC4_DIRTY_BLEND_COLOR;
58 }
59
60 static void
61 vc4_set_stencil_ref(struct pipe_context *pctx,
62 const struct pipe_stencil_ref *stencil_ref)
63 {
64 struct vc4_context *vc4 = vc4_context(pctx);
65 vc4->stencil_ref =* stencil_ref;
66 vc4->dirty |= VC4_DIRTY_STENCIL_REF;
67 }
68
69 static void
70 vc4_set_clip_state(struct pipe_context *pctx,
71 const struct pipe_clip_state *clip)
72 {
73 struct vc4_context *vc4 = vc4_context(pctx);
74 vc4->clip = *clip;
75 vc4->dirty |= VC4_DIRTY_CLIP;
76 }
77
78 static void
79 vc4_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
80 {
81 struct vc4_context *vc4 = vc4_context(pctx);
82 vc4->sample_mask = sample_mask & ((1 << VC4_MAX_SAMPLES) - 1);
83 vc4->dirty |= VC4_DIRTY_SAMPLE_MASK;
84 }
85
86 static uint16_t
87 float_to_187_half(float f)
88 {
89 return fui(f) >> 16;
90 }
91
92 static void *
93 vc4_create_rasterizer_state(struct pipe_context *pctx,
94 const struct pipe_rasterizer_state *cso)
95 {
96 struct vc4_rasterizer_state *so;
97 struct V3D21_DEPTH_OFFSET depth_offset = { V3D21_DEPTH_OFFSET_header };
98 struct V3D21_POINT_SIZE point_size = { V3D21_POINT_SIZE_header };
99 struct V3D21_LINE_WIDTH line_width = { V3D21_LINE_WIDTH_header };
100
101 so = CALLOC_STRUCT(vc4_rasterizer_state);
102 if (!so)
103 return NULL;
104
105 so->base = *cso;
106
107 if (!(cso->cull_face & PIPE_FACE_FRONT))
108 so->config_bits[0] |= VC4_CONFIG_BITS_ENABLE_PRIM_FRONT;
109 if (!(cso->cull_face & PIPE_FACE_BACK))
110 so->config_bits[0] |= VC4_CONFIG_BITS_ENABLE_PRIM_BACK;
111
112 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
113 * BCM21553).
114 */
115 point_size.point_size = MAX2(cso->point_size, .125f);
116
117 line_width.line_width = cso->line_width;
118
119 if (cso->front_ccw)
120 so->config_bits[0] |= VC4_CONFIG_BITS_CW_PRIMITIVES;
121
122 if (cso->offset_tri) {
123 so->config_bits[0] |= VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET;
124
125 depth_offset.depth_offset_units =
126 float_to_187_half(cso->offset_units);
127 depth_offset.depth_offset_factor =
128 float_to_187_half(cso->offset_scale);
129 }
130
131 if (cso->multisample)
132 so->config_bits[0] |= VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X;
133
134 V3D21_DEPTH_OFFSET_pack(NULL, so->packed.depth_offset, &depth_offset);
135 V3D21_POINT_SIZE_pack(NULL, so->packed.point_size, &point_size);
136 V3D21_LINE_WIDTH_pack(NULL, so->packed.line_width, &line_width);
137
138 if (cso->tile_raster_order_fixed) {
139 so->tile_raster_order_flags |= VC4_SUBMIT_CL_FIXED_RCL_ORDER;
140 if (cso->tile_raster_order_increasing_x) {
141 so->tile_raster_order_flags |=
142 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X;
143 }
144 if (cso->tile_raster_order_increasing_y) {
145 so->tile_raster_order_flags |=
146 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y;
147 }
148 }
149
150 return so;
151 }
152
153 /* Blend state is baked into shaders. */
154 static void *
155 vc4_create_blend_state(struct pipe_context *pctx,
156 const struct pipe_blend_state *cso)
157 {
158 return vc4_generic_cso_state_create(cso, sizeof(*cso));
159 }
160
161 /**
162 * The TLB_STENCIL_SETUP data has a little bitfield for common writemask
163 * values, so you don't have to do a separate writemask setup.
164 */
165 static uint8_t
166 tlb_stencil_setup_writemask(uint8_t mask)
167 {
168 switch (mask) {
169 case 0x1: return 0;
170 case 0x3: return 1;
171 case 0xf: return 2;
172 case 0xff: return 3;
173 default: return 0xff;
174 }
175 }
176
177 static uint32_t
178 tlb_stencil_setup_bits(const struct pipe_stencil_state *state,
179 uint8_t writemask_bits)
180 {
181 static const uint8_t op_map[] = {
182 [PIPE_STENCIL_OP_ZERO] = 0,
183 [PIPE_STENCIL_OP_KEEP] = 1,
184 [PIPE_STENCIL_OP_REPLACE] = 2,
185 [PIPE_STENCIL_OP_INCR] = 3,
186 [PIPE_STENCIL_OP_DECR] = 4,
187 [PIPE_STENCIL_OP_INVERT] = 5,
188 [PIPE_STENCIL_OP_INCR_WRAP] = 6,
189 [PIPE_STENCIL_OP_DECR_WRAP] = 7,
190 };
191 uint32_t bits = 0;
192
193 if (writemask_bits != 0xff)
194 bits |= writemask_bits << 28;
195 bits |= op_map[state->zfail_op] << 25;
196 bits |= op_map[state->zpass_op] << 22;
197 bits |= op_map[state->fail_op] << 19;
198 bits |= state->func << 16;
199 /* Ref is filled in at uniform upload time */
200 bits |= state->valuemask << 0;
201
202 return bits;
203 }
204
205 static void *
206 vc4_create_depth_stencil_alpha_state(struct pipe_context *pctx,
207 const struct pipe_depth_stencil_alpha_state *cso)
208 {
209 struct vc4_depth_stencil_alpha_state *so;
210
211 so = CALLOC_STRUCT(vc4_depth_stencil_alpha_state);
212 if (!so)
213 return NULL;
214
215 so->base = *cso;
216
217 /* We always keep the early Z state correct, since a later state using
218 * early Z may want it.
219 */
220 so->config_bits[2] |= VC4_CONFIG_BITS_EARLY_Z_UPDATE;
221
222 if (cso->depth.enabled) {
223 if (cso->depth.writemask) {
224 so->config_bits[1] |= VC4_CONFIG_BITS_Z_UPDATE;
225 }
226 so->config_bits[1] |= (cso->depth.func <<
227 VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT);
228
229 /* We only handle early Z in the < direction because otherwise
230 * we'd have to runtime guess which direction to set in the
231 * render config.
232 */
233 if ((cso->depth.func == PIPE_FUNC_LESS ||
234 cso->depth.func == PIPE_FUNC_LEQUAL) &&
235 (!cso->stencil[0].enabled ||
236 (cso->stencil[0].zfail_op == PIPE_STENCIL_OP_KEEP &&
237 (!cso->stencil[1].enabled ||
238 cso->stencil[1].zfail_op == PIPE_STENCIL_OP_KEEP)))) {
239 so->config_bits[2] |= VC4_CONFIG_BITS_EARLY_Z;
240 }
241 } else {
242 so->config_bits[1] |= (PIPE_FUNC_ALWAYS <<
243 VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT);
244 }
245
246 if (cso->stencil[0].enabled) {
247 const struct pipe_stencil_state *front = &cso->stencil[0];
248 const struct pipe_stencil_state *back = &cso->stencil[1];
249
250 uint8_t front_writemask_bits =
251 tlb_stencil_setup_writemask(front->writemask);
252 uint8_t back_writemask = front->writemask;
253 uint8_t back_writemask_bits = front_writemask_bits;
254
255 so->stencil_uniforms[0] =
256 tlb_stencil_setup_bits(front, front_writemask_bits);
257 if (back->enabled) {
258 back_writemask = back->writemask;
259 back_writemask_bits =
260 tlb_stencil_setup_writemask(back->writemask);
261
262 so->stencil_uniforms[0] |= (1 << 30);
263 so->stencil_uniforms[1] =
264 tlb_stencil_setup_bits(back, back_writemask_bits);
265 so->stencil_uniforms[1] |= (2 << 30);
266 } else {
267 so->stencil_uniforms[0] |= (3 << 30);
268 }
269
270 if (front_writemask_bits == 0xff ||
271 back_writemask_bits == 0xff) {
272 so->stencil_uniforms[2] = (front->writemask |
273 (back_writemask << 8));
274 }
275 }
276
277 return so;
278 }
279
280 static void
281 vc4_set_polygon_stipple(struct pipe_context *pctx,
282 const struct pipe_poly_stipple *stipple)
283 {
284 struct vc4_context *vc4 = vc4_context(pctx);
285 vc4->stipple = *stipple;
286 vc4->dirty |= VC4_DIRTY_STIPPLE;
287 }
288
289 static void
290 vc4_set_scissor_states(struct pipe_context *pctx,
291 unsigned start_slot,
292 unsigned num_scissors,
293 const struct pipe_scissor_state *scissor)
294 {
295 struct vc4_context *vc4 = vc4_context(pctx);
296
297 vc4->scissor = *scissor;
298 vc4->dirty |= VC4_DIRTY_SCISSOR;
299 }
300
301 static void
302 vc4_set_viewport_states(struct pipe_context *pctx,
303 unsigned start_slot,
304 unsigned num_viewports,
305 const struct pipe_viewport_state *viewport)
306 {
307 struct vc4_context *vc4 = vc4_context(pctx);
308 vc4->viewport = *viewport;
309 vc4->dirty |= VC4_DIRTY_VIEWPORT;
310 }
311
312 static void
313 vc4_set_vertex_buffers(struct pipe_context *pctx,
314 unsigned start_slot, unsigned count,
315 const struct pipe_vertex_buffer *vb)
316 {
317 struct vc4_context *vc4 = vc4_context(pctx);
318 struct vc4_vertexbuf_stateobj *so = &vc4->vertexbuf;
319
320 util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb,
321 start_slot, count);
322 so->count = util_last_bit(so->enabled_mask);
323
324 vc4->dirty |= VC4_DIRTY_VTXBUF;
325 }
326
327 static void
328 vc4_blend_state_bind(struct pipe_context *pctx, void *hwcso)
329 {
330 struct vc4_context *vc4 = vc4_context(pctx);
331 vc4->blend = hwcso;
332 vc4->dirty |= VC4_DIRTY_BLEND;
333 }
334
335 static void
336 vc4_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
337 {
338 struct vc4_context *vc4 = vc4_context(pctx);
339 struct vc4_rasterizer_state *rast = hwcso;
340
341 if (vc4->rasterizer && rast &&
342 vc4->rasterizer->base.flatshade != rast->base.flatshade) {
343 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
344 }
345
346 vc4->rasterizer = hwcso;
347 vc4->dirty |= VC4_DIRTY_RASTERIZER;
348 }
349
350 static void
351 vc4_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
352 {
353 struct vc4_context *vc4 = vc4_context(pctx);
354 vc4->zsa = hwcso;
355 vc4->dirty |= VC4_DIRTY_ZSA;
356 }
357
358 static void *
359 vc4_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
360 const struct pipe_vertex_element *elements)
361 {
362 struct vc4_vertex_stateobj *so = CALLOC_STRUCT(vc4_vertex_stateobj);
363
364 if (!so)
365 return NULL;
366
367 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
368 so->num_elements = num_elements;
369
370 return so;
371 }
372
373 static void
374 vc4_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
375 {
376 struct vc4_context *vc4 = vc4_context(pctx);
377 vc4->vtx = hwcso;
378 vc4->dirty |= VC4_DIRTY_VTXSTATE;
379 }
380
381 static void
382 vc4_set_constant_buffer(struct pipe_context *pctx,
383 enum pipe_shader_type shader, uint index,
384 const struct pipe_constant_buffer *cb)
385 {
386 struct vc4_context *vc4 = vc4_context(pctx);
387 struct vc4_constbuf_stateobj *so = &vc4->constbuf[shader];
388
389 /* Note that the state tracker can unbind constant buffers by
390 * passing NULL here.
391 */
392 if (unlikely(!cb)) {
393 so->enabled_mask &= ~(1 << index);
394 so->dirty_mask &= ~(1 << index);
395 return;
396 }
397
398 if (index == 1 && so->cb[index].buffer_size != cb->buffer_size)
399 vc4->dirty |= VC4_DIRTY_UBO_1_SIZE;
400
401 pipe_resource_reference(&so->cb[index].buffer, cb->buffer);
402 so->cb[index].buffer_offset = cb->buffer_offset;
403 so->cb[index].buffer_size = cb->buffer_size;
404 so->cb[index].user_buffer = cb->user_buffer;
405
406 so->enabled_mask |= 1 << index;
407 so->dirty_mask |= 1 << index;
408 vc4->dirty |= VC4_DIRTY_CONSTBUF;
409 }
410
411 static void
412 vc4_set_framebuffer_state(struct pipe_context *pctx,
413 const struct pipe_framebuffer_state *framebuffer)
414 {
415 struct vc4_context *vc4 = vc4_context(pctx);
416 struct pipe_framebuffer_state *cso = &vc4->framebuffer;
417 unsigned i;
418
419 vc4->job = NULL;
420
421 for (i = 0; i < framebuffer->nr_cbufs; i++)
422 pipe_surface_reference(&cso->cbufs[i], framebuffer->cbufs[i]);
423 for (; i < vc4->framebuffer.nr_cbufs; i++)
424 pipe_surface_reference(&cso->cbufs[i], NULL);
425
426 cso->nr_cbufs = framebuffer->nr_cbufs;
427
428 pipe_surface_reference(&cso->zsbuf, framebuffer->zsbuf);
429
430 cso->width = framebuffer->width;
431 cso->height = framebuffer->height;
432
433 /* Nonzero texture mipmap levels are laid out as if they were in
434 * power-of-two-sized spaces. The renderbuffer config infers its
435 * stride from the width parameter, so we need to configure our
436 * framebuffer. Note that if the z/color buffers were mismatched
437 * sizes, we wouldn't be able to do this.
438 */
439 if (cso->cbufs[0] && cso->cbufs[0]->u.tex.level) {
440 struct vc4_resource *rsc =
441 vc4_resource(cso->cbufs[0]->texture);
442 cso->width =
443 (rsc->slices[cso->cbufs[0]->u.tex.level].stride /
444 rsc->cpp);
445 } else if (cso->zsbuf && cso->zsbuf->u.tex.level){
446 struct vc4_resource *rsc =
447 vc4_resource(cso->zsbuf->texture);
448 cso->width =
449 (rsc->slices[cso->zsbuf->u.tex.level].stride /
450 rsc->cpp);
451 }
452
453 vc4->dirty |= VC4_DIRTY_FRAMEBUFFER;
454 }
455
456 static struct vc4_texture_stateobj *
457 vc4_get_stage_tex(struct vc4_context *vc4, enum pipe_shader_type shader)
458 {
459 switch (shader) {
460 case PIPE_SHADER_FRAGMENT:
461 vc4->dirty |= VC4_DIRTY_FRAGTEX;
462 return &vc4->fragtex;
463 break;
464 case PIPE_SHADER_VERTEX:
465 vc4->dirty |= VC4_DIRTY_VERTTEX;
466 return &vc4->verttex;
467 break;
468 default:
469 fprintf(stderr, "Unknown shader target %d\n", shader);
470 abort();
471 }
472 }
473
474 static uint32_t translate_wrap(uint32_t p_wrap, bool using_nearest)
475 {
476 switch (p_wrap) {
477 case PIPE_TEX_WRAP_REPEAT:
478 return 0;
479 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
480 return 1;
481 case PIPE_TEX_WRAP_MIRROR_REPEAT:
482 return 2;
483 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
484 return 3;
485 case PIPE_TEX_WRAP_CLAMP:
486 return (using_nearest ? 1 : 3);
487 default:
488 fprintf(stderr, "Unknown wrap mode %d\n", p_wrap);
489 assert(!"not reached");
490 return 0;
491 }
492 }
493
494 static void *
495 vc4_create_sampler_state(struct pipe_context *pctx,
496 const struct pipe_sampler_state *cso)
497 {
498 static const uint8_t minfilter_map[6] = {
499 VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR,
500 VC4_TEX_P1_MINFILT_LIN_MIP_NEAR,
501 VC4_TEX_P1_MINFILT_NEAR_MIP_LIN,
502 VC4_TEX_P1_MINFILT_LIN_MIP_LIN,
503 VC4_TEX_P1_MINFILT_NEAREST,
504 VC4_TEX_P1_MINFILT_LINEAR,
505 };
506 static const uint32_t magfilter_map[] = {
507 [PIPE_TEX_FILTER_NEAREST] = VC4_TEX_P1_MAGFILT_NEAREST,
508 [PIPE_TEX_FILTER_LINEAR] = VC4_TEX_P1_MAGFILT_LINEAR,
509 };
510 bool either_nearest =
511 (cso->mag_img_filter == PIPE_TEX_MIPFILTER_NEAREST ||
512 cso->min_img_filter == PIPE_TEX_MIPFILTER_NEAREST);
513 struct vc4_sampler_state *so = CALLOC_STRUCT(vc4_sampler_state);
514
515 if (!so)
516 return NULL;
517
518 memcpy(so, cso, sizeof(*cso));
519
520 so->texture_p1 =
521 (VC4_SET_FIELD(magfilter_map[cso->mag_img_filter],
522 VC4_TEX_P1_MAGFILT) |
523 VC4_SET_FIELD(minfilter_map[cso->min_mip_filter * 2 +
524 cso->min_img_filter],
525 VC4_TEX_P1_MINFILT) |
526 VC4_SET_FIELD(translate_wrap(cso->wrap_s, either_nearest),
527 VC4_TEX_P1_WRAP_S) |
528 VC4_SET_FIELD(translate_wrap(cso->wrap_t, either_nearest),
529 VC4_TEX_P1_WRAP_T));
530
531 return so;
532 }
533
534 static void
535 vc4_sampler_states_bind(struct pipe_context *pctx,
536 enum pipe_shader_type shader, unsigned start,
537 unsigned nr, void **hwcso)
538 {
539 struct vc4_context *vc4 = vc4_context(pctx);
540 struct vc4_texture_stateobj *stage_tex = vc4_get_stage_tex(vc4, shader);
541
542 assert(start == 0);
543 unsigned i;
544 unsigned new_nr = 0;
545
546 for (i = 0; i < nr; i++) {
547 if (hwcso[i])
548 new_nr = i + 1;
549 stage_tex->samplers[i] = hwcso[i];
550 }
551
552 for (; i < stage_tex->num_samplers; i++) {
553 stage_tex->samplers[i] = NULL;
554 }
555
556 stage_tex->num_samplers = new_nr;
557 }
558
559 static struct pipe_sampler_view *
560 vc4_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *prsc,
561 const struct pipe_sampler_view *cso)
562 {
563 struct vc4_sampler_view *so = CALLOC_STRUCT(vc4_sampler_view);
564 struct vc4_resource *rsc = vc4_resource(prsc);
565
566 if (!so)
567 return NULL;
568
569 so->base = *cso;
570
571 so->base.texture = NULL;
572 pipe_resource_reference(&so->base.texture, prsc);
573 so->base.reference.count = 1;
574 so->base.context = pctx;
575
576 /* There is no hardware level clamping, and the start address of a
577 * texture may be misaligned, so in that case we have to copy to a
578 * temporary.
579 *
580 * Also, Raspberry Pi doesn't support sampling from raster textures,
581 * so we also have to copy to a temporary then.
582 */
583 if ((cso->u.tex.first_level &&
584 (cso->u.tex.first_level != cso->u.tex.last_level)) ||
585 rsc->vc4_format == VC4_TEXTURE_TYPE_RGBA32R ||
586 rsc->vc4_format == ~0) {
587 struct vc4_resource *shadow_parent = rsc;
588 struct pipe_resource tmpl = {
589 .target = prsc->target,
590 .format = prsc->format,
591 .width0 = u_minify(prsc->width0,
592 cso->u.tex.first_level),
593 .height0 = u_minify(prsc->height0,
594 cso->u.tex.first_level),
595 .bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET,
596 .last_level = cso->u.tex.last_level - cso->u.tex.first_level,
597 .nr_samples = prsc->nr_samples,
598 };
599
600 /* Create the shadow texture. The rest of the texture
601 * parameter setup will use the shadow.
602 */
603 prsc = vc4_resource_create(pctx->screen, &tmpl);
604 if (!prsc) {
605 free(so);
606 return NULL;
607 }
608 rsc = vc4_resource(prsc);
609 vc4_bo_label(vc4_screen(pctx->screen), rsc->bo,
610 "tiling shadow %dx%d",
611 tmpl.width0, tmpl.height0);
612
613 /* Flag it as needing update of the contents from the parent. */
614 rsc->writes = shadow_parent->writes - 1;
615 assert(rsc->vc4_format != VC4_TEXTURE_TYPE_RGBA32R);
616
617 so->texture = prsc;
618 } else {
619 pipe_resource_reference(&so->texture, prsc);
620
621 if (cso->u.tex.first_level) {
622 so->force_first_level = true;
623 }
624 }
625
626 so->texture_p0 =
627 (VC4_SET_FIELD(rsc->slices[0].offset >> 12, VC4_TEX_P0_OFFSET) |
628 VC4_SET_FIELD(rsc->vc4_format & 15, VC4_TEX_P0_TYPE) |
629 VC4_SET_FIELD(so->force_first_level ?
630 cso->u.tex.last_level :
631 cso->u.tex.last_level -
632 cso->u.tex.first_level, VC4_TEX_P0_MIPLVLS) |
633 VC4_SET_FIELD(cso->target == PIPE_TEXTURE_CUBE,
634 VC4_TEX_P0_CMMODE));
635 so->texture_p1 =
636 (VC4_SET_FIELD(rsc->vc4_format >> 4, VC4_TEX_P1_TYPE4) |
637 VC4_SET_FIELD(prsc->height0 & 2047, VC4_TEX_P1_HEIGHT) |
638 VC4_SET_FIELD(prsc->width0 & 2047, VC4_TEX_P1_WIDTH));
639
640 if (prsc->format == PIPE_FORMAT_ETC1_RGB8)
641 so->texture_p1 |= VC4_TEX_P1_ETCFLIP_MASK;
642
643 return &so->base;
644 }
645
646 static void
647 vc4_sampler_view_destroy(struct pipe_context *pctx,
648 struct pipe_sampler_view *pview)
649 {
650 struct vc4_sampler_view *view = vc4_sampler_view(pview);
651 pipe_resource_reference(&pview->texture, NULL);
652 pipe_resource_reference(&view->texture, NULL);
653 free(view);
654 }
655
656 static void
657 vc4_set_sampler_views(struct pipe_context *pctx,
658 enum pipe_shader_type shader,
659 unsigned start, unsigned nr,
660 struct pipe_sampler_view **views)
661 {
662 struct vc4_context *vc4 = vc4_context(pctx);
663 struct vc4_texture_stateobj *stage_tex = vc4_get_stage_tex(vc4, shader);
664 unsigned i;
665 unsigned new_nr = 0;
666
667 assert(start == 0);
668
669 for (i = 0; i < nr; i++) {
670 if (views[i])
671 new_nr = i + 1;
672 pipe_sampler_view_reference(&stage_tex->textures[i], views[i]);
673 }
674
675 for (; i < stage_tex->num_textures; i++) {
676 pipe_sampler_view_reference(&stage_tex->textures[i], NULL);
677 }
678
679 stage_tex->num_textures = new_nr;
680 }
681
682 void
683 vc4_state_init(struct pipe_context *pctx)
684 {
685 pctx->set_blend_color = vc4_set_blend_color;
686 pctx->set_stencil_ref = vc4_set_stencil_ref;
687 pctx->set_clip_state = vc4_set_clip_state;
688 pctx->set_sample_mask = vc4_set_sample_mask;
689 pctx->set_constant_buffer = vc4_set_constant_buffer;
690 pctx->set_framebuffer_state = vc4_set_framebuffer_state;
691 pctx->set_polygon_stipple = vc4_set_polygon_stipple;
692 pctx->set_scissor_states = vc4_set_scissor_states;
693 pctx->set_viewport_states = vc4_set_viewport_states;
694
695 pctx->set_vertex_buffers = vc4_set_vertex_buffers;
696
697 pctx->create_blend_state = vc4_create_blend_state;
698 pctx->bind_blend_state = vc4_blend_state_bind;
699 pctx->delete_blend_state = vc4_generic_cso_state_delete;
700
701 pctx->create_rasterizer_state = vc4_create_rasterizer_state;
702 pctx->bind_rasterizer_state = vc4_rasterizer_state_bind;
703 pctx->delete_rasterizer_state = vc4_generic_cso_state_delete;
704
705 pctx->create_depth_stencil_alpha_state = vc4_create_depth_stencil_alpha_state;
706 pctx->bind_depth_stencil_alpha_state = vc4_zsa_state_bind;
707 pctx->delete_depth_stencil_alpha_state = vc4_generic_cso_state_delete;
708
709 pctx->create_vertex_elements_state = vc4_vertex_state_create;
710 pctx->delete_vertex_elements_state = vc4_generic_cso_state_delete;
711 pctx->bind_vertex_elements_state = vc4_vertex_state_bind;
712
713 pctx->create_sampler_state = vc4_create_sampler_state;
714 pctx->delete_sampler_state = vc4_generic_cso_state_delete;
715 pctx->bind_sampler_states = vc4_sampler_states_bind;
716
717 pctx->create_sampler_view = vc4_create_sampler_view;
718 pctx->sampler_view_destroy = vc4_sampler_view_destroy;
719 pctx->set_sampler_views = vc4_set_sampler_views;
720 }