2 * Copyright 2010 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "pipe/p_defines.h"
24 #include "util/u_framebuffer.h"
25 #include "util/u_helpers.h"
26 #include "util/u_inlines.h"
27 #include "util/u_transfer.h"
28 #include "util/format_srgb.h"
30 #include "tgsi/tgsi_parse.h"
31 #include "compiler/nir/nir.h"
33 #include "nv50/nv50_stateobj.h"
34 #include "nv50/nv50_context.h"
35 #include "nv50/nv50_query_hw.h"
37 #include "nv50/nv50_3d.xml.h"
38 #include "nv50/g80_texture.xml.h"
40 #include "nouveau_gldefs.h"
43 * ! pipe_sampler_state.normalized_coords is ignored - rectangle textures will
44 * use non-normalized coordinates, everything else won't
45 * (The relevant bit is in the TIC entry and not the TSC entry.)
47 * ! pipe_sampler_state.seamless_cube_map is ignored - seamless filtering is
48 * always activated on NVA0 +
49 * (Give me the global bit, otherwise it's not worth the CPU work.)
51 * ! pipe_sampler_state.border_color is not swizzled according to the texture
52 * swizzle in pipe_sampler_view
53 * (This will be ugly with indirect independent texture/sampler access,
54 * we'd have to emulate the logic in the shader. GL doesn't have that,
55 * D3D doesn't have swizzle, if we knew what we were implementing we'd be
58 * ! pipe_rasterizer_state.line_last_pixel is ignored - it is never drawn
60 * ! pipe_rasterizer_state.flatshade_first also applies to QUADS
61 * (There's a GL query for that, forcing an exception is just ridiculous.)
63 * ! pipe_rasterizer_state.sprite_coord_enable is masked with 0xff on NVC0
64 * (The hardware only has 8 slots meant for TexCoord and we have to assign
65 * in advance to maintain elegant separate shader objects.)
68 static inline uint32_t
69 nv50_colormask(unsigned mask
)
73 if (mask
& PIPE_MASK_R
)
75 if (mask
& PIPE_MASK_G
)
77 if (mask
& PIPE_MASK_B
)
79 if (mask
& PIPE_MASK_A
)
85 #define NV50_BLEND_FACTOR_CASE(a, b) \
86 case PIPE_BLENDFACTOR_##a: return NV50_BLEND_FACTOR_##b
88 static inline uint32_t
89 nv50_blend_fac(unsigned factor
)
92 NV50_BLEND_FACTOR_CASE(ONE
, ONE
);
93 NV50_BLEND_FACTOR_CASE(SRC_COLOR
, SRC_COLOR
);
94 NV50_BLEND_FACTOR_CASE(SRC_ALPHA
, SRC_ALPHA
);
95 NV50_BLEND_FACTOR_CASE(DST_ALPHA
, DST_ALPHA
);
96 NV50_BLEND_FACTOR_CASE(DST_COLOR
, DST_COLOR
);
97 NV50_BLEND_FACTOR_CASE(SRC_ALPHA_SATURATE
, SRC_ALPHA_SATURATE
);
98 NV50_BLEND_FACTOR_CASE(CONST_COLOR
, CONSTANT_COLOR
);
99 NV50_BLEND_FACTOR_CASE(CONST_ALPHA
, CONSTANT_ALPHA
);
100 NV50_BLEND_FACTOR_CASE(SRC1_COLOR
, SRC1_COLOR
);
101 NV50_BLEND_FACTOR_CASE(SRC1_ALPHA
, SRC1_ALPHA
);
102 NV50_BLEND_FACTOR_CASE(ZERO
, ZERO
);
103 NV50_BLEND_FACTOR_CASE(INV_SRC_COLOR
, ONE_MINUS_SRC_COLOR
);
104 NV50_BLEND_FACTOR_CASE(INV_SRC_ALPHA
, ONE_MINUS_SRC_ALPHA
);
105 NV50_BLEND_FACTOR_CASE(INV_DST_ALPHA
, ONE_MINUS_DST_ALPHA
);
106 NV50_BLEND_FACTOR_CASE(INV_DST_COLOR
, ONE_MINUS_DST_COLOR
);
107 NV50_BLEND_FACTOR_CASE(INV_CONST_COLOR
, ONE_MINUS_CONSTANT_COLOR
);
108 NV50_BLEND_FACTOR_CASE(INV_CONST_ALPHA
, ONE_MINUS_CONSTANT_ALPHA
);
109 NV50_BLEND_FACTOR_CASE(INV_SRC1_COLOR
, ONE_MINUS_SRC1_COLOR
);
110 NV50_BLEND_FACTOR_CASE(INV_SRC1_ALPHA
, ONE_MINUS_SRC1_ALPHA
);
112 return NV50_BLEND_FACTOR_ZERO
;
117 nv50_blend_state_create(struct pipe_context
*pipe
,
118 const struct pipe_blend_state
*cso
)
120 struct nv50_blend_stateobj
*so
= CALLOC_STRUCT(nv50_blend_stateobj
);
122 bool emit_common_func
= cso
->rt
[0].blend_enable
;
125 if (nv50_context(pipe
)->screen
->tesla
->oclass
>= NVA3_3D_CLASS
) {
126 SB_BEGIN_3D(so
, BLEND_INDEPENDENT
, 1);
127 SB_DATA (so
, cso
->independent_blend_enable
);
132 SB_BEGIN_3D(so
, COLOR_MASK_COMMON
, 1);
133 SB_DATA (so
, !cso
->independent_blend_enable
);
135 SB_BEGIN_3D(so
, BLEND_ENABLE_COMMON
, 1);
136 SB_DATA (so
, !cso
->independent_blend_enable
);
138 if (cso
->independent_blend_enable
) {
139 SB_BEGIN_3D(so
, BLEND_ENABLE(0), 8);
140 for (i
= 0; i
< 8; ++i
) {
141 SB_DATA(so
, cso
->rt
[i
].blend_enable
);
142 if (cso
->rt
[i
].blend_enable
)
143 emit_common_func
= true;
146 if (nv50_context(pipe
)->screen
->tesla
->oclass
>= NVA3_3D_CLASS
) {
147 emit_common_func
= false;
149 for (i
= 0; i
< 8; ++i
) {
150 if (!cso
->rt
[i
].blend_enable
)
152 SB_BEGIN_3D_(so
, NVA3_3D_IBLEND_EQUATION_RGB(i
), 6);
153 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[i
].rgb_func
));
154 SB_DATA (so
, nv50_blend_fac(cso
->rt
[i
].rgb_src_factor
));
155 SB_DATA (so
, nv50_blend_fac(cso
->rt
[i
].rgb_dst_factor
));
156 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[i
].alpha_func
));
157 SB_DATA (so
, nv50_blend_fac(cso
->rt
[i
].alpha_src_factor
));
158 SB_DATA (so
, nv50_blend_fac(cso
->rt
[i
].alpha_dst_factor
));
162 SB_BEGIN_3D(so
, BLEND_ENABLE(0), 1);
163 SB_DATA (so
, cso
->rt
[0].blend_enable
);
166 if (emit_common_func
) {
167 SB_BEGIN_3D(so
, BLEND_EQUATION_RGB
, 5);
168 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[0].rgb_func
));
169 SB_DATA (so
, nv50_blend_fac(cso
->rt
[0].rgb_src_factor
));
170 SB_DATA (so
, nv50_blend_fac(cso
->rt
[0].rgb_dst_factor
));
171 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[0].alpha_func
));
172 SB_DATA (so
, nv50_blend_fac(cso
->rt
[0].alpha_src_factor
));
173 SB_BEGIN_3D(so
, BLEND_FUNC_DST_ALPHA
, 1);
174 SB_DATA (so
, nv50_blend_fac(cso
->rt
[0].alpha_dst_factor
));
177 if (cso
->logicop_enable
) {
178 SB_BEGIN_3D(so
, LOGIC_OP_ENABLE
, 2);
180 SB_DATA (so
, nvgl_logicop_func(cso
->logicop_func
));
182 SB_BEGIN_3D(so
, LOGIC_OP_ENABLE
, 1);
186 if (cso
->independent_blend_enable
) {
187 SB_BEGIN_3D(so
, COLOR_MASK(0), 8);
188 for (i
= 0; i
< 8; ++i
)
189 SB_DATA(so
, nv50_colormask(cso
->rt
[i
].colormask
));
191 SB_BEGIN_3D(so
, COLOR_MASK(0), 1);
192 SB_DATA (so
, nv50_colormask(cso
->rt
[0].colormask
));
196 if (cso
->alpha_to_coverage
)
197 ms
|= NV50_3D_MULTISAMPLE_CTRL_ALPHA_TO_COVERAGE
;
198 if (cso
->alpha_to_one
)
199 ms
|= NV50_3D_MULTISAMPLE_CTRL_ALPHA_TO_ONE
;
201 SB_BEGIN_3D(so
, MULTISAMPLE_CTRL
, 1);
204 assert(so
->size
<= ARRAY_SIZE(so
->state
));
209 nv50_blend_state_bind(struct pipe_context
*pipe
, void *hwcso
)
211 struct nv50_context
*nv50
= nv50_context(pipe
);
214 nv50
->dirty_3d
|= NV50_NEW_3D_BLEND
;
218 nv50_blend_state_delete(struct pipe_context
*pipe
, void *hwcso
)
223 /* NOTE: ignoring line_last_pixel */
225 nv50_rasterizer_state_create(struct pipe_context
*pipe
,
226 const struct pipe_rasterizer_state
*cso
)
228 struct nv50_rasterizer_stateobj
*so
;
231 so
= CALLOC_STRUCT(nv50_rasterizer_stateobj
);
236 #ifndef NV50_SCISSORS_CLIPPING
237 for (int i
= 0; i
< NV50_MAX_VIEWPORTS
; i
++) {
238 SB_BEGIN_3D(so
, SCISSOR_ENABLE(i
), 1);
239 SB_DATA (so
, cso
->scissor
);
243 SB_BEGIN_3D(so
, SHADE_MODEL
, 1);
244 SB_DATA (so
, cso
->flatshade
? NV50_3D_SHADE_MODEL_FLAT
:
245 NV50_3D_SHADE_MODEL_SMOOTH
);
246 SB_BEGIN_3D(so
, PROVOKING_VERTEX_LAST
, 1);
247 SB_DATA (so
, !cso
->flatshade_first
);
248 SB_BEGIN_3D(so
, VERTEX_TWO_SIDE_ENABLE
, 1);
249 SB_DATA (so
, cso
->light_twoside
);
251 SB_BEGIN_3D(so
, FRAG_COLOR_CLAMP_EN
, 1);
252 SB_DATA (so
, cso
->clamp_fragment_color
? 0x11111111 : 0x00000000);
254 SB_BEGIN_3D(so
, MULTISAMPLE_ENABLE
, 1);
255 SB_DATA (so
, cso
->multisample
);
257 SB_BEGIN_3D(so
, LINE_WIDTH
, 1);
258 SB_DATA (so
, fui(cso
->line_width
));
259 SB_BEGIN_3D(so
, LINE_SMOOTH_ENABLE
, 1);
260 SB_DATA (so
, cso
->line_smooth
);
262 SB_BEGIN_3D(so
, LINE_STIPPLE_ENABLE
, 1);
263 if (cso
->line_stipple_enable
) {
265 SB_BEGIN_3D(so
, LINE_STIPPLE
, 1);
266 SB_DATA (so
, (cso
->line_stipple_pattern
<< 8) |
267 cso
->line_stipple_factor
);
272 if (!cso
->point_size_per_vertex
) {
273 SB_BEGIN_3D(so
, POINT_SIZE
, 1);
274 SB_DATA (so
, fui(cso
->point_size
));
276 SB_BEGIN_3D(so
, POINT_SPRITE_ENABLE
, 1);
277 SB_DATA (so
, cso
->point_quad_rasterization
);
278 SB_BEGIN_3D(so
, POINT_SMOOTH_ENABLE
, 1);
279 SB_DATA (so
, cso
->point_smooth
);
281 SB_BEGIN_3D(so
, POLYGON_MODE_FRONT
, 3);
282 SB_DATA (so
, nvgl_polygon_mode(cso
->fill_front
));
283 SB_DATA (so
, nvgl_polygon_mode(cso
->fill_back
));
284 SB_DATA (so
, cso
->poly_smooth
);
286 SB_BEGIN_3D(so
, CULL_FACE_ENABLE
, 3);
287 SB_DATA (so
, cso
->cull_face
!= PIPE_FACE_NONE
);
288 SB_DATA (so
, cso
->front_ccw
? NV50_3D_FRONT_FACE_CCW
:
289 NV50_3D_FRONT_FACE_CW
);
290 switch (cso
->cull_face
) {
291 case PIPE_FACE_FRONT_AND_BACK
:
292 SB_DATA(so
, NV50_3D_CULL_FACE_FRONT_AND_BACK
);
294 case PIPE_FACE_FRONT
:
295 SB_DATA(so
, NV50_3D_CULL_FACE_FRONT
);
299 SB_DATA(so
, NV50_3D_CULL_FACE_BACK
);
303 SB_BEGIN_3D(so
, POLYGON_STIPPLE_ENABLE
, 1);
304 SB_DATA (so
, cso
->poly_stipple_enable
);
305 SB_BEGIN_3D(so
, POLYGON_OFFSET_POINT_ENABLE
, 3);
306 SB_DATA (so
, cso
->offset_point
);
307 SB_DATA (so
, cso
->offset_line
);
308 SB_DATA (so
, cso
->offset_tri
);
310 if (cso
->offset_point
|| cso
->offset_line
|| cso
->offset_tri
) {
311 SB_BEGIN_3D(so
, POLYGON_OFFSET_FACTOR
, 1);
312 SB_DATA (so
, fui(cso
->offset_scale
));
313 SB_BEGIN_3D(so
, POLYGON_OFFSET_UNITS
, 1);
314 SB_DATA (so
, fui(cso
->offset_units
* 2.0f
));
315 SB_BEGIN_3D(so
, POLYGON_OFFSET_CLAMP
, 1);
316 SB_DATA (so
, fui(cso
->offset_clamp
));
319 if (cso
->depth_clip_near
) {
323 NV50_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_NEAR
|
324 NV50_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_FAR
|
325 NV50_3D_VIEW_VOLUME_CLIP_CTRL_UNK12_UNK1
;
327 #ifndef NV50_SCISSORS_CLIPPING
329 NV50_3D_VIEW_VOLUME_CLIP_CTRL_UNK7
|
330 NV50_3D_VIEW_VOLUME_CLIP_CTRL_UNK12_UNK1
;
332 SB_BEGIN_3D(so
, VIEW_VOLUME_CLIP_CTRL
, 1);
335 SB_BEGIN_3D(so
, DEPTH_CLIP_NEGATIVE_Z
, 1);
336 SB_DATA (so
, cso
->clip_halfz
);
338 SB_BEGIN_3D(so
, PIXEL_CENTER_INTEGER
, 1);
339 SB_DATA (so
, !cso
->half_pixel_center
);
341 assert(so
->size
<= ARRAY_SIZE(so
->state
));
346 nv50_rasterizer_state_bind(struct pipe_context
*pipe
, void *hwcso
)
348 struct nv50_context
*nv50
= nv50_context(pipe
);
351 nv50
->dirty_3d
|= NV50_NEW_3D_RASTERIZER
;
355 nv50_rasterizer_state_delete(struct pipe_context
*pipe
, void *hwcso
)
361 nv50_zsa_state_create(struct pipe_context
*pipe
,
362 const struct pipe_depth_stencil_alpha_state
*cso
)
364 struct nv50_zsa_stateobj
*so
= CALLOC_STRUCT(nv50_zsa_stateobj
);
368 SB_BEGIN_3D(so
, DEPTH_WRITE_ENABLE
, 1);
369 SB_DATA (so
, cso
->depth
.writemask
);
370 SB_BEGIN_3D(so
, DEPTH_TEST_ENABLE
, 1);
371 if (cso
->depth
.enabled
) {
373 SB_BEGIN_3D(so
, DEPTH_TEST_FUNC
, 1);
374 SB_DATA (so
, nvgl_comparison_op(cso
->depth
.func
));
379 SB_BEGIN_3D(so
, DEPTH_BOUNDS_EN
, 1);
380 if (cso
->depth
.bounds_test
) {
382 SB_BEGIN_3D(so
, DEPTH_BOUNDS(0), 2);
383 SB_DATA (so
, fui(cso
->depth
.bounds_min
));
384 SB_DATA (so
, fui(cso
->depth
.bounds_max
));
389 if (cso
->stencil
[0].enabled
) {
390 SB_BEGIN_3D(so
, STENCIL_ENABLE
, 5);
392 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].fail_op
));
393 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].zfail_op
));
394 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].zpass_op
));
395 SB_DATA (so
, nvgl_comparison_op(cso
->stencil
[0].func
));
396 SB_BEGIN_3D(so
, STENCIL_FRONT_MASK
, 2);
397 SB_DATA (so
, cso
->stencil
[0].writemask
);
398 SB_DATA (so
, cso
->stencil
[0].valuemask
);
400 SB_BEGIN_3D(so
, STENCIL_ENABLE
, 1);
404 if (cso
->stencil
[1].enabled
) {
405 assert(cso
->stencil
[0].enabled
);
406 SB_BEGIN_3D(so
, STENCIL_TWO_SIDE_ENABLE
, 5);
408 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].fail_op
));
409 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].zfail_op
));
410 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].zpass_op
));
411 SB_DATA (so
, nvgl_comparison_op(cso
->stencil
[1].func
));
412 SB_BEGIN_3D(so
, STENCIL_BACK_MASK
, 2);
413 SB_DATA (so
, cso
->stencil
[1].writemask
);
414 SB_DATA (so
, cso
->stencil
[1].valuemask
);
416 SB_BEGIN_3D(so
, STENCIL_TWO_SIDE_ENABLE
, 1);
420 SB_BEGIN_3D(so
, ALPHA_TEST_ENABLE
, 1);
421 if (cso
->alpha
.enabled
) {
423 SB_BEGIN_3D(so
, ALPHA_TEST_REF
, 2);
424 SB_DATA (so
, fui(cso
->alpha
.ref_value
));
425 SB_DATA (so
, nvgl_comparison_op(cso
->alpha
.func
));
430 SB_BEGIN_3D(so
, CB_ADDR
, 1);
431 SB_DATA (so
, NV50_CB_AUX_ALPHATEST_OFFSET
<< (8 - 2) | NV50_CB_AUX
);
432 SB_BEGIN_3D(so
, CB_DATA(0), 1);
433 SB_DATA (so
, fui(cso
->alpha
.ref_value
));
435 assert(so
->size
<= ARRAY_SIZE(so
->state
));
440 nv50_zsa_state_bind(struct pipe_context
*pipe
, void *hwcso
)
442 struct nv50_context
*nv50
= nv50_context(pipe
);
445 nv50
->dirty_3d
|= NV50_NEW_3D_ZSA
;
449 nv50_zsa_state_delete(struct pipe_context
*pipe
, void *hwcso
)
454 /* ====================== SAMPLERS AND TEXTURES ================================
457 static inline unsigned
458 nv50_tsc_wrap_mode(unsigned wrap
)
461 case PIPE_TEX_WRAP_REPEAT
:
462 return G80_TSC_WRAP_WRAP
;
463 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
464 return G80_TSC_WRAP_MIRROR
;
465 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
466 return G80_TSC_WRAP_CLAMP_TO_EDGE
;
467 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
468 return G80_TSC_WRAP_BORDER
;
469 case PIPE_TEX_WRAP_CLAMP
:
470 return G80_TSC_WRAP_CLAMP_OGL
;
471 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
:
472 return G80_TSC_WRAP_MIRROR_ONCE_CLAMP_TO_EDGE
;
473 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
:
474 return G80_TSC_WRAP_MIRROR_ONCE_BORDER
;
475 case PIPE_TEX_WRAP_MIRROR_CLAMP
:
476 return G80_TSC_WRAP_MIRROR_ONCE_CLAMP_OGL
;
478 NOUVEAU_ERR("unknown wrap mode: %d\n", wrap
);
479 return G80_TSC_WRAP_WRAP
;
484 nv50_sampler_state_create(struct pipe_context
*pipe
,
485 const struct pipe_sampler_state
*cso
)
487 struct nv50_tsc_entry
*so
= MALLOC_STRUCT(nv50_tsc_entry
);
492 so
->tsc
[0] = (0x00026000 |
493 (nv50_tsc_wrap_mode(cso
->wrap_s
) << 0) |
494 (nv50_tsc_wrap_mode(cso
->wrap_t
) << 3) |
495 (nv50_tsc_wrap_mode(cso
->wrap_r
) << 6));
497 switch (cso
->mag_img_filter
) {
498 case PIPE_TEX_FILTER_LINEAR
:
499 so
->tsc
[1] = G80_TSC_1_MAG_FILTER_LINEAR
;
501 case PIPE_TEX_FILTER_NEAREST
:
503 so
->tsc
[1] = G80_TSC_1_MAG_FILTER_NEAREST
;
507 switch (cso
->min_img_filter
) {
508 case PIPE_TEX_FILTER_LINEAR
:
509 so
->tsc
[1] |= G80_TSC_1_MIN_FILTER_LINEAR
;
511 case PIPE_TEX_FILTER_NEAREST
:
513 so
->tsc
[1] |= G80_TSC_1_MIN_FILTER_NEAREST
;
517 switch (cso
->min_mip_filter
) {
518 case PIPE_TEX_MIPFILTER_LINEAR
:
519 so
->tsc
[1] |= G80_TSC_1_MIP_FILTER_LINEAR
;
521 case PIPE_TEX_MIPFILTER_NEAREST
:
522 so
->tsc
[1] |= G80_TSC_1_MIP_FILTER_NEAREST
;
524 case PIPE_TEX_MIPFILTER_NONE
:
526 so
->tsc
[1] |= G80_TSC_1_MIP_FILTER_NONE
;
530 if (nouveau_screen(pipe
->screen
)->class_3d
>= NVE4_3D_CLASS
) {
531 if (cso
->seamless_cube_map
)
532 so
->tsc
[1] |= GK104_TSC_1_CUBEMAP_INTERFACE_FILTERING
;
533 if (!cso
->normalized_coords
)
534 so
->tsc
[1] |= GK104_TSC_1_FLOAT_COORD_NORMALIZATION_FORCE_UNNORMALIZED_COORDS
;
536 so
->seamless_cube_map
= cso
->seamless_cube_map
;
539 if (cso
->max_anisotropy
>= 16)
540 so
->tsc
[0] |= (7 << 20);
542 if (cso
->max_anisotropy
>= 12)
543 so
->tsc
[0] |= (6 << 20);
545 so
->tsc
[0] |= (cso
->max_anisotropy
>> 1) << 20;
547 if (cso
->max_anisotropy
>= 4)
548 so
->tsc
[1] |= 6 << G80_TSC_1_TRILIN_OPT__SHIFT
;
550 if (cso
->max_anisotropy
>= 2)
551 so
->tsc
[1] |= 4 << G80_TSC_1_TRILIN_OPT__SHIFT
;
554 if (cso
->compare_mode
== PIPE_TEX_COMPARE_R_TO_TEXTURE
) {
555 /* NOTE: must be deactivated for non-shadow textures */
556 so
->tsc
[0] |= (1 << 9);
557 so
->tsc
[0] |= (nvgl_comparison_op(cso
->compare_func
) & 0x7) << 10;
560 f
[0] = CLAMP(cso
->lod_bias
, -16.0f
, 15.0f
);
561 so
->tsc
[1] |= ((int)(f
[0] * 256.0f
) & 0x1fff) << 12;
563 f
[0] = CLAMP(cso
->min_lod
, 0.0f
, 15.0f
);
564 f
[1] = CLAMP(cso
->max_lod
, 0.0f
, 15.0f
);
566 (((int)(f
[1] * 256.0f
) & 0xfff) << 12) | ((int)(f
[0] * 256.0f
) & 0xfff);
569 util_format_linear_float_to_srgb_8unorm(cso
->border_color
.f
[0]) << 24;
571 util_format_linear_float_to_srgb_8unorm(cso
->border_color
.f
[1]) << 12;
573 util_format_linear_float_to_srgb_8unorm(cso
->border_color
.f
[2]) << 20;
575 so
->tsc
[4] = fui(cso
->border_color
.f
[0]);
576 so
->tsc
[5] = fui(cso
->border_color
.f
[1]);
577 so
->tsc
[6] = fui(cso
->border_color
.f
[2]);
578 so
->tsc
[7] = fui(cso
->border_color
.f
[3]);
584 nv50_sampler_state_delete(struct pipe_context
*pipe
, void *hwcso
)
588 for (s
= 0; s
< 3; ++s
) {
589 assert(nv50_context(pipe
)->num_samplers
[s
] <= PIPE_MAX_SAMPLERS
);
590 for (i
= 0; i
< nv50_context(pipe
)->num_samplers
[s
]; ++i
)
591 if (nv50_context(pipe
)->samplers
[s
][i
] == hwcso
)
592 nv50_context(pipe
)->samplers
[s
][i
] = NULL
;
595 nv50_screen_tsc_free(nv50_context(pipe
)->screen
, nv50_tsc_entry(hwcso
));
601 nv50_stage_sampler_states_bind(struct nv50_context
*nv50
, int s
,
602 unsigned nr
, void **hwcsos
)
604 unsigned highest_found
= 0;
607 assert(nr
<= PIPE_MAX_SAMPLERS
);
608 for (i
= 0; i
< nr
; ++i
) {
609 struct nv50_tsc_entry
*hwcso
= hwcsos
? nv50_tsc_entry(hwcsos
[i
]) : NULL
;
610 struct nv50_tsc_entry
*old
= nv50
->samplers
[s
][i
];
615 nv50
->samplers
[s
][i
] = hwcso
;
617 nv50_screen_tsc_unlock(nv50
->screen
, old
);
619 assert(nv50
->num_samplers
[s
] <= PIPE_MAX_SAMPLERS
);
620 if (nr
>= nv50
->num_samplers
[s
])
621 nv50
->num_samplers
[s
] = highest_found
+ 1;
623 nv50
->dirty_3d
|= NV50_NEW_3D_SAMPLERS
;
627 nv50_vp_sampler_states_bind(struct pipe_context
*pipe
, unsigned nr
, void **s
)
629 nv50_stage_sampler_states_bind(nv50_context(pipe
), 0, nr
, s
);
633 nv50_fp_sampler_states_bind(struct pipe_context
*pipe
, unsigned nr
, void **s
)
635 nv50_stage_sampler_states_bind(nv50_context(pipe
), 2, nr
, s
);
639 nv50_gp_sampler_states_bind(struct pipe_context
*pipe
, unsigned nr
, void **s
)
641 nv50_stage_sampler_states_bind(nv50_context(pipe
), 1, nr
, s
);
645 nv50_bind_sampler_states(struct pipe_context
*pipe
,
646 enum pipe_shader_type shader
, unsigned start
,
647 unsigned num_samplers
, void **samplers
)
651 case PIPE_SHADER_VERTEX
:
652 nv50_vp_sampler_states_bind(pipe
, num_samplers
, samplers
);
654 case PIPE_SHADER_GEOMETRY
:
655 nv50_gp_sampler_states_bind(pipe
, num_samplers
, samplers
);
657 case PIPE_SHADER_FRAGMENT
:
658 nv50_fp_sampler_states_bind(pipe
, num_samplers
, samplers
);
661 assert(!"unexpected shader type");
668 /* NOTE: only called when not referenced anywhere, won't be bound */
670 nv50_sampler_view_destroy(struct pipe_context
*pipe
,
671 struct pipe_sampler_view
*view
)
673 pipe_resource_reference(&view
->texture
, NULL
);
675 nv50_screen_tic_free(nv50_context(pipe
)->screen
, nv50_tic_entry(view
));
677 FREE(nv50_tic_entry(view
));
681 nv50_stage_set_sampler_views(struct nv50_context
*nv50
, int s
,
683 struct pipe_sampler_view
**views
)
687 assert(nr
<= PIPE_MAX_SAMPLERS
);
688 for (i
= 0; i
< nr
; ++i
) {
689 struct pipe_sampler_view
*view
= views
? views
[i
] : NULL
;
690 struct nv50_tic_entry
*old
= nv50_tic_entry(nv50
->textures
[s
][i
]);
692 nv50_screen_tic_unlock(nv50
->screen
, old
);
694 if (view
&& view
->texture
) {
695 struct pipe_resource
*res
= view
->texture
;
696 if (res
->target
== PIPE_BUFFER
&&
697 (res
->flags
& PIPE_RESOURCE_FLAG_MAP_COHERENT
))
698 nv50
->textures_coherent
[s
] |= 1 << i
;
700 nv50
->textures_coherent
[s
] &= ~(1 << i
);
702 nv50
->textures_coherent
[s
] &= ~(1 << i
);
705 pipe_sampler_view_reference(&nv50
->textures
[s
][i
], view
);
708 assert(nv50
->num_textures
[s
] <= PIPE_MAX_SAMPLERS
);
709 for (i
= nr
; i
< nv50
->num_textures
[s
]; ++i
) {
710 struct nv50_tic_entry
*old
= nv50_tic_entry(nv50
->textures
[s
][i
]);
713 nv50_screen_tic_unlock(nv50
->screen
, old
);
715 pipe_sampler_view_reference(&nv50
->textures
[s
][i
], NULL
);
718 nv50
->num_textures
[s
] = nr
;
720 nouveau_bufctx_reset(nv50
->bufctx_3d
, NV50_BIND_3D_TEXTURES
);
722 nv50
->dirty_3d
|= NV50_NEW_3D_TEXTURES
;
726 nv50_set_sampler_views(struct pipe_context
*pipe
, enum pipe_shader_type shader
,
727 unsigned start
, unsigned nr
,
728 struct pipe_sampler_view
**views
)
732 case PIPE_SHADER_VERTEX
:
733 nv50_stage_set_sampler_views(nv50_context(pipe
), 0, nr
, views
);
735 case PIPE_SHADER_GEOMETRY
:
736 nv50_stage_set_sampler_views(nv50_context(pipe
), 1, nr
, views
);
738 case PIPE_SHADER_FRAGMENT
:
739 nv50_stage_set_sampler_views(nv50_context(pipe
), 2, nr
, views
);
748 /* ============================= SHADERS =======================================
752 nv50_sp_state_create(struct pipe_context
*pipe
,
753 const struct pipe_shader_state
*cso
, unsigned type
)
755 struct nv50_program
*prog
;
757 prog
= CALLOC_STRUCT(nv50_program
);
762 prog
->pipe
.type
= cso
->type
;
765 case PIPE_SHADER_IR_TGSI
:
766 prog
->pipe
.tokens
= tgsi_dup_tokens(cso
->tokens
);
768 case PIPE_SHADER_IR_NIR
:
769 prog
->pipe
.ir
.nir
= cso
->ir
.nir
;
772 assert(!"unsupported IR!");
777 if (cso
->stream_output
.num_outputs
)
778 prog
->pipe
.stream_output
= cso
->stream_output
;
780 prog
->translated
= nv50_program_translate(
781 prog
, nv50_context(pipe
)->screen
->base
.device
->chipset
,
782 &nouveau_context(pipe
)->debug
);
788 nv50_sp_state_delete(struct pipe_context
*pipe
, void *hwcso
)
790 struct nv50_program
*prog
= (struct nv50_program
*)hwcso
;
792 nv50_program_destroy(nv50_context(pipe
), prog
);
794 if (prog
->pipe
.type
== PIPE_SHADER_IR_TGSI
)
795 FREE((void *)prog
->pipe
.tokens
);
796 else if (prog
->pipe
.type
== PIPE_SHADER_IR_NIR
)
797 ralloc_free(prog
->pipe
.ir
.nir
);
802 nv50_vp_state_create(struct pipe_context
*pipe
,
803 const struct pipe_shader_state
*cso
)
805 return nv50_sp_state_create(pipe
, cso
, PIPE_SHADER_VERTEX
);
809 nv50_vp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
811 struct nv50_context
*nv50
= nv50_context(pipe
);
813 nv50
->vertprog
= hwcso
;
814 nv50
->dirty_3d
|= NV50_NEW_3D_VERTPROG
;
818 nv50_fp_state_create(struct pipe_context
*pipe
,
819 const struct pipe_shader_state
*cso
)
821 return nv50_sp_state_create(pipe
, cso
, PIPE_SHADER_FRAGMENT
);
825 nv50_fp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
827 struct nv50_context
*nv50
= nv50_context(pipe
);
829 nv50
->fragprog
= hwcso
;
830 nv50
->dirty_3d
|= NV50_NEW_3D_FRAGPROG
;
834 nv50_gp_state_create(struct pipe_context
*pipe
,
835 const struct pipe_shader_state
*cso
)
837 return nv50_sp_state_create(pipe
, cso
, PIPE_SHADER_GEOMETRY
);
841 nv50_gp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
843 struct nv50_context
*nv50
= nv50_context(pipe
);
845 nv50
->gmtyprog
= hwcso
;
846 nv50
->dirty_3d
|= NV50_NEW_3D_GMTYPROG
;
850 nv50_cp_state_create(struct pipe_context
*pipe
,
851 const struct pipe_compute_state
*cso
)
853 struct nv50_program
*prog
;
855 prog
= CALLOC_STRUCT(nv50_program
);
858 prog
->type
= PIPE_SHADER_COMPUTE
;
859 prog
->pipe
.type
= cso
->ir_type
;
861 switch(cso
->ir_type
) {
862 case PIPE_SHADER_IR_TGSI
:
863 prog
->pipe
.tokens
= tgsi_dup_tokens((const struct tgsi_token
*)cso
->prog
);
865 case PIPE_SHADER_IR_NIR
:
866 prog
->pipe
.ir
.nir
= (nir_shader
*)cso
->prog
;
869 assert(!"unsupported IR!");
874 prog
->cp
.smem_size
= cso
->req_local_mem
;
875 prog
->cp
.lmem_size
= cso
->req_private_mem
;
876 prog
->parm_size
= cso
->req_input_mem
;
882 nv50_cp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
884 struct nv50_context
*nv50
= nv50_context(pipe
);
886 nv50
->compprog
= hwcso
;
887 nv50
->dirty_cp
|= NV50_NEW_CP_PROGRAM
;
891 nv50_set_constant_buffer(struct pipe_context
*pipe
,
892 enum pipe_shader_type shader
, uint index
,
893 const struct pipe_constant_buffer
*cb
)
895 struct nv50_context
*nv50
= nv50_context(pipe
);
896 struct pipe_resource
*res
= cb
? cb
->buffer
: NULL
;
897 const unsigned s
= nv50_context_shader_stage(shader
);
898 const unsigned i
= index
;
900 if (shader
== PIPE_SHADER_COMPUTE
)
903 assert(i
< NV50_MAX_PIPE_CONSTBUFS
);
904 if (nv50
->constbuf
[s
][i
].user
)
905 nv50
->constbuf
[s
][i
].u
.buf
= NULL
;
907 if (nv50
->constbuf
[s
][i
].u
.buf
) {
908 nouveau_bufctx_reset(nv50
->bufctx_3d
, NV50_BIND_3D_CB(s
, i
));
909 nv04_resource(nv50
->constbuf
[s
][i
].u
.buf
)->cb_bindings
[s
] &= ~(1 << i
);
911 pipe_resource_reference(&nv50
->constbuf
[s
][i
].u
.buf
, res
);
913 nv50
->constbuf
[s
][i
].user
= (cb
&& cb
->user_buffer
) ? true : false;
914 if (nv50
->constbuf
[s
][i
].user
) {
915 nv50
->constbuf
[s
][i
].u
.data
= cb
->user_buffer
;
916 nv50
->constbuf
[s
][i
].size
= MIN2(cb
->buffer_size
, 0x10000);
917 nv50
->constbuf_valid
[s
] |= 1 << i
;
918 nv50
->constbuf_coherent
[s
] &= ~(1 << i
);
921 nv50
->constbuf
[s
][i
].offset
= cb
->buffer_offset
;
922 nv50
->constbuf
[s
][i
].size
= MIN2(align(cb
->buffer_size
, 0x100), 0x10000);
923 nv50
->constbuf_valid
[s
] |= 1 << i
;
924 if (res
->flags
& PIPE_RESOURCE_FLAG_MAP_COHERENT
)
925 nv50
->constbuf_coherent
[s
] |= 1 << i
;
927 nv50
->constbuf_coherent
[s
] &= ~(1 << i
);
929 nv50
->constbuf_valid
[s
] &= ~(1 << i
);
930 nv50
->constbuf_coherent
[s
] &= ~(1 << i
);
932 nv50
->constbuf_dirty
[s
] |= 1 << i
;
934 nv50
->dirty_3d
|= NV50_NEW_3D_CONSTBUF
;
937 /* =============================================================================
941 nv50_set_blend_color(struct pipe_context
*pipe
,
942 const struct pipe_blend_color
*bcol
)
944 struct nv50_context
*nv50
= nv50_context(pipe
);
946 nv50
->blend_colour
= *bcol
;
947 nv50
->dirty_3d
|= NV50_NEW_3D_BLEND_COLOUR
;
951 nv50_set_stencil_ref(struct pipe_context
*pipe
,
952 const struct pipe_stencil_ref
*sr
)
954 struct nv50_context
*nv50
= nv50_context(pipe
);
956 nv50
->stencil_ref
= *sr
;
957 nv50
->dirty_3d
|= NV50_NEW_3D_STENCIL_REF
;
961 nv50_set_clip_state(struct pipe_context
*pipe
,
962 const struct pipe_clip_state
*clip
)
964 struct nv50_context
*nv50
= nv50_context(pipe
);
966 memcpy(nv50
->clip
.ucp
, clip
->ucp
, sizeof(clip
->ucp
));
968 nv50
->dirty_3d
|= NV50_NEW_3D_CLIP
;
972 nv50_set_sample_mask(struct pipe_context
*pipe
, unsigned sample_mask
)
974 struct nv50_context
*nv50
= nv50_context(pipe
);
976 nv50
->sample_mask
= sample_mask
;
977 nv50
->dirty_3d
|= NV50_NEW_3D_SAMPLE_MASK
;
981 nv50_set_min_samples(struct pipe_context
*pipe
, unsigned min_samples
)
983 struct nv50_context
*nv50
= nv50_context(pipe
);
985 if (nv50
->min_samples
!= min_samples
) {
986 nv50
->min_samples
= min_samples
;
987 nv50
->dirty_3d
|= NV50_NEW_3D_MIN_SAMPLES
;
992 nv50_set_framebuffer_state(struct pipe_context
*pipe
,
993 const struct pipe_framebuffer_state
*fb
)
995 struct nv50_context
*nv50
= nv50_context(pipe
);
997 nouveau_bufctx_reset(nv50
->bufctx_3d
, NV50_BIND_3D_FB
);
999 util_copy_framebuffer_state(&nv50
->framebuffer
, fb
);
1001 nv50
->dirty_3d
|= NV50_NEW_3D_FRAMEBUFFER
| NV50_NEW_3D_TEXTURES
;
1005 nv50_set_polygon_stipple(struct pipe_context
*pipe
,
1006 const struct pipe_poly_stipple
*stipple
)
1008 struct nv50_context
*nv50
= nv50_context(pipe
);
1010 nv50
->stipple
= *stipple
;
1011 nv50
->dirty_3d
|= NV50_NEW_3D_STIPPLE
;
1015 nv50_set_scissor_states(struct pipe_context
*pipe
,
1016 unsigned start_slot
,
1017 unsigned num_scissors
,
1018 const struct pipe_scissor_state
*scissor
)
1020 struct nv50_context
*nv50
= nv50_context(pipe
);
1023 assert(start_slot
+ num_scissors
<= NV50_MAX_VIEWPORTS
);
1024 for (i
= 0; i
< num_scissors
; i
++) {
1025 if (!memcmp(&nv50
->scissors
[start_slot
+ i
], &scissor
[i
], sizeof(*scissor
)))
1027 nv50
->scissors
[start_slot
+ i
] = scissor
[i
];
1028 nv50
->scissors_dirty
|= 1 << (start_slot
+ i
);
1029 nv50
->dirty_3d
|= NV50_NEW_3D_SCISSOR
;
1034 nv50_set_viewport_states(struct pipe_context
*pipe
,
1035 unsigned start_slot
,
1036 unsigned num_viewports
,
1037 const struct pipe_viewport_state
*vpt
)
1039 struct nv50_context
*nv50
= nv50_context(pipe
);
1042 assert(start_slot
+ num_viewports
<= NV50_MAX_VIEWPORTS
);
1043 for (i
= 0; i
< num_viewports
; i
++) {
1044 if (!memcmp(&nv50
->viewports
[start_slot
+ i
], &vpt
[i
], sizeof(*vpt
)))
1046 nv50
->viewports
[start_slot
+ i
] = vpt
[i
];
1047 nv50
->viewports_dirty
|= 1 << (start_slot
+ i
);
1048 nv50
->dirty_3d
|= NV50_NEW_3D_VIEWPORT
;
1053 nv50_set_window_rectangles(struct pipe_context
*pipe
,
1055 unsigned num_rectangles
,
1056 const struct pipe_scissor_state
*rectangles
)
1058 struct nv50_context
*nv50
= nv50_context(pipe
);
1060 nv50
->window_rect
.inclusive
= include
;
1061 nv50
->window_rect
.rects
= MIN2(num_rectangles
, NV50_MAX_WINDOW_RECTANGLES
);
1062 memcpy(nv50
->window_rect
.rect
, rectangles
,
1063 sizeof(struct pipe_scissor_state
) * nv50
->window_rect
.rects
);
1065 nv50
->dirty_3d
|= NV50_NEW_3D_WINDOW_RECTS
;
1069 nv50_set_vertex_buffers(struct pipe_context
*pipe
,
1070 unsigned start_slot
, unsigned count
,
1071 const struct pipe_vertex_buffer
*vb
)
1073 struct nv50_context
*nv50
= nv50_context(pipe
);
1076 nouveau_bufctx_reset(nv50
->bufctx_3d
, NV50_BIND_3D_VERTEX
);
1077 nv50
->dirty_3d
|= NV50_NEW_3D_ARRAYS
;
1079 util_set_vertex_buffers_count(nv50
->vtxbuf
, &nv50
->num_vtxbufs
, vb
,
1083 nv50
->vbo_user
&= ~(((1ull << count
) - 1) << start_slot
);
1084 nv50
->vbo_constant
&= ~(((1ull << count
) - 1) << start_slot
);
1085 nv50
->vtxbufs_coherent
&= ~(((1ull << count
) - 1) << start_slot
);
1089 for (i
= 0; i
< count
; ++i
) {
1090 unsigned dst_index
= start_slot
+ i
;
1092 if (vb
[i
].is_user_buffer
) {
1093 nv50
->vbo_user
|= 1 << dst_index
;
1095 nv50
->vbo_constant
|= 1 << dst_index
;
1097 nv50
->vbo_constant
&= ~(1 << dst_index
);
1098 nv50
->vtxbufs_coherent
&= ~(1 << dst_index
);
1100 nv50
->vbo_user
&= ~(1 << dst_index
);
1101 nv50
->vbo_constant
&= ~(1 << dst_index
);
1103 if (vb
[i
].buffer
.resource
&&
1104 vb
[i
].buffer
.resource
->flags
& PIPE_RESOURCE_FLAG_MAP_COHERENT
)
1105 nv50
->vtxbufs_coherent
|= (1 << dst_index
);
1107 nv50
->vtxbufs_coherent
&= ~(1 << dst_index
);
1113 nv50_vertex_state_bind(struct pipe_context
*pipe
, void *hwcso
)
1115 struct nv50_context
*nv50
= nv50_context(pipe
);
1117 nv50
->vertex
= hwcso
;
1118 nv50
->dirty_3d
|= NV50_NEW_3D_VERTEX
;
1121 static struct pipe_stream_output_target
*
1122 nv50_so_target_create(struct pipe_context
*pipe
,
1123 struct pipe_resource
*res
,
1124 unsigned offset
, unsigned size
)
1126 struct nv04_resource
*buf
= (struct nv04_resource
*)res
;
1127 struct nv50_so_target
*targ
= MALLOC_STRUCT(nv50_so_target
);
1131 if (nouveau_context(pipe
)->screen
->class_3d
>= NVA0_3D_CLASS
) {
1132 targ
->pq
= pipe
->create_query(pipe
,
1133 NVA0_HW_QUERY_STREAM_OUTPUT_BUFFER_OFFSET
, 0);
1143 targ
->pipe
.buffer_size
= size
;
1144 targ
->pipe
.buffer_offset
= offset
;
1145 targ
->pipe
.context
= pipe
;
1146 targ
->pipe
.buffer
= NULL
;
1147 pipe_resource_reference(&targ
->pipe
.buffer
, res
);
1148 pipe_reference_init(&targ
->pipe
.reference
, 1);
1150 assert(buf
->base
.target
== PIPE_BUFFER
);
1151 util_range_add(&buf
->base
, &buf
->valid_buffer_range
, offset
, offset
+ size
);
1157 nva0_so_target_save_offset(struct pipe_context
*pipe
,
1158 struct pipe_stream_output_target
*ptarg
,
1159 unsigned index
, bool serialize
)
1161 struct nv50_so_target
*targ
= nv50_so_target(ptarg
);
1164 struct nouveau_pushbuf
*push
= nv50_context(pipe
)->base
.pushbuf
;
1165 PUSH_SPACE(push
, 2);
1166 BEGIN_NV04(push
, SUBC_3D(NV50_GRAPH_SERIALIZE
), 1);
1167 PUSH_DATA (push
, 0);
1170 nv50_query(targ
->pq
)->index
= index
;
1171 pipe
->end_query(pipe
, targ
->pq
);
1175 nv50_so_target_destroy(struct pipe_context
*pipe
,
1176 struct pipe_stream_output_target
*ptarg
)
1178 struct nv50_so_target
*targ
= nv50_so_target(ptarg
);
1180 pipe
->destroy_query(pipe
, targ
->pq
);
1181 pipe_resource_reference(&targ
->pipe
.buffer
, NULL
);
1186 nv50_set_stream_output_targets(struct pipe_context
*pipe
,
1187 unsigned num_targets
,
1188 struct pipe_stream_output_target
**targets
,
1189 const unsigned *offsets
)
1191 struct nv50_context
*nv50
= nv50_context(pipe
);
1193 bool serialize
= true;
1194 const bool can_resume
= nv50
->screen
->base
.class_3d
>= NVA0_3D_CLASS
;
1196 assert(num_targets
<= 4);
1198 for (i
= 0; i
< num_targets
; ++i
) {
1199 const bool changed
= nv50
->so_target
[i
] != targets
[i
];
1200 const bool append
= (offsets
[i
] == (unsigned)-1);
1201 if (!changed
&& append
)
1203 nv50
->so_targets_dirty
|= 1 << i
;
1205 if (can_resume
&& changed
&& nv50
->so_target
[i
]) {
1206 nva0_so_target_save_offset(pipe
, nv50
->so_target
[i
], i
, serialize
);
1210 if (targets
[i
] && !append
)
1211 nv50_so_target(targets
[i
])->clean
= true;
1213 pipe_so_target_reference(&nv50
->so_target
[i
], targets
[i
]);
1215 for (; i
< nv50
->num_so_targets
; ++i
) {
1216 if (can_resume
&& nv50
->so_target
[i
]) {
1217 nva0_so_target_save_offset(pipe
, nv50
->so_target
[i
], i
, serialize
);
1220 pipe_so_target_reference(&nv50
->so_target
[i
], NULL
);
1221 nv50
->so_targets_dirty
|= 1 << i
;
1223 nv50
->num_so_targets
= num_targets
;
1225 if (nv50
->so_targets_dirty
) {
1226 nouveau_bufctx_reset(nv50
->bufctx_3d
, NV50_BIND_3D_SO
);
1227 nv50
->dirty_3d
|= NV50_NEW_3D_STRMOUT
;
1232 nv50_set_compute_resources(struct pipe_context
*pipe
,
1233 unsigned start
, unsigned nr
,
1234 struct pipe_surface
**resources
)
1236 /* TODO: bind surfaces */
1240 nv50_set_global_handle(uint32_t *phandle
, struct pipe_resource
*res
)
1242 struct nv04_resource
*buf
= nv04_resource(res
);
1244 uint64_t limit
= (buf
->address
+ buf
->base
.width0
) - 1;
1245 if (limit
< (1ULL << 32)) {
1246 *phandle
= (uint32_t)buf
->address
;
1248 NOUVEAU_ERR("Cannot map into TGSI_RESOURCE_GLOBAL: "
1249 "resource not contained within 32-bit address space !\n");
1258 nv50_set_global_bindings(struct pipe_context
*pipe
,
1259 unsigned start
, unsigned nr
,
1260 struct pipe_resource
**resources
,
1263 struct nv50_context
*nv50
= nv50_context(pipe
);
1264 struct pipe_resource
**ptr
;
1266 const unsigned end
= start
+ nr
;
1268 if (nv50
->global_residents
.size
<= (end
* sizeof(struct pipe_resource
*))) {
1269 const unsigned old_size
= nv50
->global_residents
.size
;
1270 util_dynarray_resize(&nv50
->global_residents
, struct pipe_resource
*, end
);
1271 memset((uint8_t *)nv50
->global_residents
.data
+ old_size
, 0,
1272 nv50
->global_residents
.size
- old_size
);
1276 ptr
= util_dynarray_element(
1277 &nv50
->global_residents
, struct pipe_resource
*, start
);
1278 for (i
= 0; i
< nr
; ++i
) {
1279 pipe_resource_reference(&ptr
[i
], resources
[i
]);
1280 nv50_set_global_handle(handles
[i
], resources
[i
]);
1283 ptr
= util_dynarray_element(
1284 &nv50
->global_residents
, struct pipe_resource
*, start
);
1285 for (i
= 0; i
< nr
; ++i
)
1286 pipe_resource_reference(&ptr
[i
], NULL
);
1289 nouveau_bufctx_reset(nv50
->bufctx_cp
, NV50_BIND_CP_GLOBAL
);
1291 nv50
->dirty_cp
|= NV50_NEW_CP_GLOBALS
;
1295 nv50_init_state_functions(struct nv50_context
*nv50
)
1297 struct pipe_context
*pipe
= &nv50
->base
.pipe
;
1299 pipe
->create_blend_state
= nv50_blend_state_create
;
1300 pipe
->bind_blend_state
= nv50_blend_state_bind
;
1301 pipe
->delete_blend_state
= nv50_blend_state_delete
;
1303 pipe
->create_rasterizer_state
= nv50_rasterizer_state_create
;
1304 pipe
->bind_rasterizer_state
= nv50_rasterizer_state_bind
;
1305 pipe
->delete_rasterizer_state
= nv50_rasterizer_state_delete
;
1307 pipe
->create_depth_stencil_alpha_state
= nv50_zsa_state_create
;
1308 pipe
->bind_depth_stencil_alpha_state
= nv50_zsa_state_bind
;
1309 pipe
->delete_depth_stencil_alpha_state
= nv50_zsa_state_delete
;
1311 pipe
->create_sampler_state
= nv50_sampler_state_create
;
1312 pipe
->delete_sampler_state
= nv50_sampler_state_delete
;
1313 pipe
->bind_sampler_states
= nv50_bind_sampler_states
;
1315 pipe
->create_sampler_view
= nv50_create_sampler_view
;
1316 pipe
->sampler_view_destroy
= nv50_sampler_view_destroy
;
1317 pipe
->set_sampler_views
= nv50_set_sampler_views
;
1319 pipe
->create_vs_state
= nv50_vp_state_create
;
1320 pipe
->create_fs_state
= nv50_fp_state_create
;
1321 pipe
->create_gs_state
= nv50_gp_state_create
;
1322 pipe
->create_compute_state
= nv50_cp_state_create
;
1323 pipe
->bind_vs_state
= nv50_vp_state_bind
;
1324 pipe
->bind_fs_state
= nv50_fp_state_bind
;
1325 pipe
->bind_gs_state
= nv50_gp_state_bind
;
1326 pipe
->bind_compute_state
= nv50_cp_state_bind
;
1327 pipe
->delete_vs_state
= nv50_sp_state_delete
;
1328 pipe
->delete_fs_state
= nv50_sp_state_delete
;
1329 pipe
->delete_gs_state
= nv50_sp_state_delete
;
1330 pipe
->delete_compute_state
= nv50_sp_state_delete
;
1332 pipe
->set_blend_color
= nv50_set_blend_color
;
1333 pipe
->set_stencil_ref
= nv50_set_stencil_ref
;
1334 pipe
->set_clip_state
= nv50_set_clip_state
;
1335 pipe
->set_sample_mask
= nv50_set_sample_mask
;
1336 pipe
->set_min_samples
= nv50_set_min_samples
;
1337 pipe
->set_constant_buffer
= nv50_set_constant_buffer
;
1338 pipe
->set_framebuffer_state
= nv50_set_framebuffer_state
;
1339 pipe
->set_polygon_stipple
= nv50_set_polygon_stipple
;
1340 pipe
->set_scissor_states
= nv50_set_scissor_states
;
1341 pipe
->set_viewport_states
= nv50_set_viewport_states
;
1342 pipe
->set_window_rectangles
= nv50_set_window_rectangles
;
1344 pipe
->create_vertex_elements_state
= nv50_vertex_state_create
;
1345 pipe
->delete_vertex_elements_state
= nv50_vertex_state_delete
;
1346 pipe
->bind_vertex_elements_state
= nv50_vertex_state_bind
;
1348 pipe
->set_vertex_buffers
= nv50_set_vertex_buffers
;
1350 pipe
->create_stream_output_target
= nv50_so_target_create
;
1351 pipe
->stream_output_target_destroy
= nv50_so_target_destroy
;
1352 pipe
->set_stream_output_targets
= nv50_set_stream_output_targets
;
1354 pipe
->set_global_binding
= nv50_set_global_bindings
;
1355 pipe
->set_compute_resources
= nv50_set_compute_resources
;
1357 nv50
->sample_mask
= ~0;
1358 nv50
->min_samples
= 1;