2 * Copyright 2010 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 #include "pipe/p_defines.h"
24 #include "util/u_inlines.h"
25 #include "util/u_transfer.h"
27 #include "tgsi/tgsi_parse.h"
29 #include "nvc0_stateobj.h"
30 #include "nvc0_context.h"
32 #include "nvc0_3d.xml.h"
33 #include "nv50/nv50_texture.xml.h"
35 #include "nouveau/nouveau_gldefs.h"
37 static INLINE
uint32_t
38 nvc0_colormask(unsigned mask
)
42 if (mask
& PIPE_MASK_R
)
44 if (mask
& PIPE_MASK_G
)
46 if (mask
& PIPE_MASK_B
)
48 if (mask
& PIPE_MASK_A
)
54 #define NVC0_BLEND_FACTOR_CASE(a, b) \
55 case PIPE_BLENDFACTOR_##a: return NV50_3D_BLEND_FACTOR_##b
57 static INLINE
uint32_t
58 nvc0_blend_fac(unsigned factor
)
61 NVC0_BLEND_FACTOR_CASE(ONE
, ONE
);
62 NVC0_BLEND_FACTOR_CASE(SRC_COLOR
, SRC_COLOR
);
63 NVC0_BLEND_FACTOR_CASE(SRC_ALPHA
, SRC_ALPHA
);
64 NVC0_BLEND_FACTOR_CASE(DST_ALPHA
, DST_ALPHA
);
65 NVC0_BLEND_FACTOR_CASE(DST_COLOR
, DST_COLOR
);
66 NVC0_BLEND_FACTOR_CASE(SRC_ALPHA_SATURATE
, SRC_ALPHA_SATURATE
);
67 NVC0_BLEND_FACTOR_CASE(CONST_COLOR
, CONSTANT_COLOR
);
68 NVC0_BLEND_FACTOR_CASE(CONST_ALPHA
, CONSTANT_ALPHA
);
69 NVC0_BLEND_FACTOR_CASE(SRC1_COLOR
, SRC1_COLOR
);
70 NVC0_BLEND_FACTOR_CASE(SRC1_ALPHA
, SRC1_ALPHA
);
71 NVC0_BLEND_FACTOR_CASE(ZERO
, ZERO
);
72 NVC0_BLEND_FACTOR_CASE(INV_SRC_COLOR
, ONE_MINUS_SRC_COLOR
);
73 NVC0_BLEND_FACTOR_CASE(INV_SRC_ALPHA
, ONE_MINUS_SRC_ALPHA
);
74 NVC0_BLEND_FACTOR_CASE(INV_DST_ALPHA
, ONE_MINUS_DST_ALPHA
);
75 NVC0_BLEND_FACTOR_CASE(INV_DST_COLOR
, ONE_MINUS_DST_COLOR
);
76 NVC0_BLEND_FACTOR_CASE(INV_CONST_COLOR
, ONE_MINUS_CONSTANT_COLOR
);
77 NVC0_BLEND_FACTOR_CASE(INV_CONST_ALPHA
, ONE_MINUS_CONSTANT_ALPHA
);
78 NVC0_BLEND_FACTOR_CASE(INV_SRC1_COLOR
, ONE_MINUS_SRC1_COLOR
);
79 NVC0_BLEND_FACTOR_CASE(INV_SRC1_ALPHA
, ONE_MINUS_SRC1_ALPHA
);
81 return NV50_3D_BLEND_FACTOR_ZERO
;
86 nvc0_blend_state_create(struct pipe_context
*pipe
,
87 const struct pipe_blend_state
*cso
)
89 struct nvc0_blend_stateobj
*so
= CALLOC_STRUCT(nvc0_blend_stateobj
);
91 int r
; /* reference */
94 boolean indep_masks
= FALSE
;
95 boolean indep_funcs
= FALSE
;
99 /* check which states actually have differing values */
100 if (cso
->independent_blend_enable
) {
101 for (r
= 0; r
< 8 && !cso
->rt
[r
].blend_enable
; ++r
);
103 for (i
= r
+ 1; i
< 8; ++i
) {
104 if (!cso
->rt
[i
].blend_enable
)
107 if (cso
->rt
[i
].rgb_func
!= cso
->rt
[r
].rgb_func
||
108 cso
->rt
[i
].rgb_src_factor
!= cso
->rt
[r
].rgb_src_factor
||
109 cso
->rt
[i
].rgb_dst_factor
!= cso
->rt
[r
].rgb_dst_factor
||
110 cso
->rt
[i
].alpha_func
!= cso
->rt
[r
].alpha_func
||
111 cso
->rt
[i
].alpha_src_factor
!= cso
->rt
[r
].alpha_src_factor
||
112 cso
->rt
[i
].alpha_dst_factor
!= cso
->rt
[r
].alpha_dst_factor
) {
118 blend_en
|= (cso
->rt
[i
].blend_enable
? 1 : 0) << i
;
120 for (i
= 1; i
< 8; ++i
) {
121 if (cso
->rt
[i
].colormask
!= cso
->rt
[0].colormask
) {
128 if (cso
->rt
[0].blend_enable
)
132 if (cso
->logicop_enable
) {
133 SB_BEGIN_3D(so
, LOGIC_OP_ENABLE
, 2);
135 SB_DATA (so
, nvgl_logicop_func(cso
->logicop_func
));
137 SB_IMMED_3D(so
, MACRO_BLEND_ENABLES
, 0);
139 SB_IMMED_3D(so
, LOGIC_OP_ENABLE
, 0);
141 SB_IMMED_3D(so
, BLEND_INDEPENDENT
, indep_funcs
);
142 SB_IMMED_3D(so
, MACRO_BLEND_ENABLES
, blend_en
);
144 for (i
= 0; i
< 8; ++i
) {
145 if (cso
->rt
[i
].blend_enable
) {
146 SB_BEGIN_3D(so
, IBLEND_EQUATION_RGB(i
), 6);
147 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[i
].rgb_func
));
148 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].rgb_src_factor
));
149 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].rgb_dst_factor
));
150 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[i
].alpha_func
));
151 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].alpha_src_factor
));
152 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].alpha_dst_factor
));
157 SB_BEGIN_3D(so
, BLEND_EQUATION_RGB
, 5);
158 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[r
].rgb_func
));
159 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].rgb_src_factor
));
160 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].rgb_dst_factor
));
161 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[r
].alpha_func
));
162 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].alpha_src_factor
));
163 SB_BEGIN_3D(so
, BLEND_FUNC_DST_ALPHA
, 1);
164 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].alpha_dst_factor
));
167 SB_IMMED_3D(so
, COLOR_MASK_COMMON
, !indep_masks
);
169 SB_BEGIN_3D(so
, COLOR_MASK(0), 8);
170 for (i
= 0; i
< 8; ++i
)
171 SB_DATA(so
, nvc0_colormask(cso
->rt
[i
].colormask
));
173 SB_BEGIN_3D(so
, COLOR_MASK(0), 1);
174 SB_DATA (so
, nvc0_colormask(cso
->rt
[0].colormask
));
179 if (cso
->alpha_to_coverage
)
180 ms
|= NVC0_3D_MULTISAMPLE_CTRL_ALPHA_TO_COVERAGE
;
181 if (cso
->alpha_to_one
)
182 ms
|= NVC0_3D_MULTISAMPLE_CTRL_ALPHA_TO_ONE
;
184 SB_BEGIN_3D(so
, MULTISAMPLE_CTRL
, 1);
187 assert(so
->size
<= (sizeof(so
->state
) / sizeof(so
->state
[0])));
192 nvc0_blend_state_bind(struct pipe_context
*pipe
, void *hwcso
)
194 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
197 nvc0
->dirty
|= NVC0_NEW_BLEND
;
201 nvc0_blend_state_delete(struct pipe_context
*pipe
, void *hwcso
)
206 /* NOTE: ignoring line_last_pixel, using FALSE (set on screen init) */
208 nvc0_rasterizer_state_create(struct pipe_context
*pipe
,
209 const struct pipe_rasterizer_state
*cso
)
211 struct nvc0_rasterizer_stateobj
*so
;
214 so
= CALLOC_STRUCT(nvc0_rasterizer_stateobj
);
219 /* Scissor enables are handled in scissor state, we will not want to
220 * always emit 16 commands, one for each scissor rectangle, here.
223 SB_BEGIN_3D(so
, SHADE_MODEL
, 1);
224 SB_DATA (so
, cso
->flatshade
? NVC0_3D_SHADE_MODEL_FLAT
:
225 NVC0_3D_SHADE_MODEL_SMOOTH
);
226 SB_IMMED_3D(so
, PROVOKING_VERTEX_LAST
, !cso
->flatshade_first
);
227 SB_IMMED_3D(so
, VERTEX_TWO_SIDE_ENABLE
, cso
->light_twoside
);
229 SB_IMMED_3D(so
, VERT_COLOR_CLAMP_EN
, cso
->clamp_vertex_color
);
230 SB_BEGIN_3D(so
, FRAG_COLOR_CLAMP_EN
, 1);
231 SB_DATA (so
, cso
->clamp_fragment_color
? 0x11111111 : 0x00000000);
233 SB_IMMED_3D(so
, MULTISAMPLE_ENABLE
, cso
->multisample
);
235 SB_IMMED_3D(so
, LINE_SMOOTH_ENABLE
, cso
->line_smooth
);
236 if (cso
->line_smooth
)
237 SB_BEGIN_3D(so
, LINE_WIDTH_SMOOTH
, 1);
239 SB_BEGIN_3D(so
, LINE_WIDTH_ALIASED
, 1);
240 SB_DATA (so
, fui(cso
->line_width
));
242 SB_IMMED_3D(so
, LINE_STIPPLE_ENABLE
, cso
->line_stipple_enable
);
243 if (cso
->line_stipple_enable
) {
244 SB_BEGIN_3D(so
, LINE_STIPPLE_PATTERN
, 1);
245 SB_DATA (so
, (cso
->line_stipple_pattern
<< 8) |
246 cso
->line_stipple_factor
);
250 SB_IMMED_3D(so
, VP_POINT_SIZE_EN
, cso
->point_size_per_vertex
);
251 if (!cso
->point_size_per_vertex
) {
252 SB_BEGIN_3D(so
, POINT_SIZE
, 1);
253 SB_DATA (so
, fui(cso
->point_size
));
256 reg
= (cso
->sprite_coord_mode
== PIPE_SPRITE_COORD_UPPER_LEFT
) ?
257 NVC0_3D_POINT_COORD_REPLACE_COORD_ORIGIN_UPPER_LEFT
:
258 NVC0_3D_POINT_COORD_REPLACE_COORD_ORIGIN_LOWER_LEFT
;
260 SB_BEGIN_3D(so
, POINT_COORD_REPLACE
, 1);
261 SB_DATA (so
, ((cso
->sprite_coord_enable
& 0xff) << 3) | reg
);
262 SB_IMMED_3D(so
, POINT_SPRITE_ENABLE
, cso
->point_quad_rasterization
);
263 SB_IMMED_3D(so
, POINT_SMOOTH_ENABLE
, cso
->point_smooth
);
265 SB_BEGIN_3D(so
, MACRO_POLYGON_MODE_FRONT
, 1);
266 SB_DATA (so
, nvgl_polygon_mode(cso
->fill_front
));
267 SB_BEGIN_3D(so
, MACRO_POLYGON_MODE_BACK
, 1);
268 SB_DATA (so
, nvgl_polygon_mode(cso
->fill_back
));
269 SB_IMMED_3D(so
, POLYGON_SMOOTH_ENABLE
, cso
->poly_smooth
);
271 SB_BEGIN_3D(so
, CULL_FACE_ENABLE
, 3);
272 SB_DATA (so
, cso
->cull_face
!= PIPE_FACE_NONE
);
273 SB_DATA (so
, cso
->front_ccw
? NVC0_3D_FRONT_FACE_CCW
:
274 NVC0_3D_FRONT_FACE_CW
);
275 switch (cso
->cull_face
) {
276 case PIPE_FACE_FRONT_AND_BACK
:
277 SB_DATA(so
, NVC0_3D_CULL_FACE_FRONT_AND_BACK
);
279 case PIPE_FACE_FRONT
:
280 SB_DATA(so
, NVC0_3D_CULL_FACE_FRONT
);
284 SB_DATA(so
, NVC0_3D_CULL_FACE_BACK
);
288 SB_IMMED_3D(so
, POLYGON_STIPPLE_ENABLE
, cso
->poly_stipple_enable
);
289 SB_BEGIN_3D(so
, POLYGON_OFFSET_POINT_ENABLE
, 3);
290 SB_DATA (so
, cso
->offset_point
);
291 SB_DATA (so
, cso
->offset_line
);
292 SB_DATA (so
, cso
->offset_tri
);
294 if (cso
->offset_point
|| cso
->offset_line
|| cso
->offset_tri
) {
295 SB_BEGIN_3D(so
, POLYGON_OFFSET_FACTOR
, 1);
296 SB_DATA (so
, fui(cso
->offset_scale
));
297 SB_BEGIN_3D(so
, POLYGON_OFFSET_UNITS
, 1);
298 SB_DATA (so
, fui(cso
->offset_units
* 2.0f
));
299 SB_BEGIN_3D(so
, POLYGON_OFFSET_CLAMP
, 1);
300 SB_DATA (so
, fui(cso
->offset_clamp
));
304 reg
= NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1
;
307 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1
|
308 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_NEAR
|
309 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_FAR
|
310 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK12_UNK2
;
312 SB_BEGIN_3D(so
, VIEW_VOLUME_CLIP_CTRL
, 1);
315 assert(so
->size
<= (sizeof(so
->state
) / sizeof(so
->state
[0])));
320 nvc0_rasterizer_state_bind(struct pipe_context
*pipe
, void *hwcso
)
322 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
325 nvc0
->dirty
|= NVC0_NEW_RASTERIZER
;
329 nvc0_rasterizer_state_delete(struct pipe_context
*pipe
, void *hwcso
)
335 nvc0_zsa_state_create(struct pipe_context
*pipe
,
336 const struct pipe_depth_stencil_alpha_state
*cso
)
338 struct nvc0_zsa_stateobj
*so
= CALLOC_STRUCT(nvc0_zsa_stateobj
);
342 SB_IMMED_3D(so
, DEPTH_TEST_ENABLE
, cso
->depth
.enabled
);
343 if (cso
->depth
.enabled
) {
344 SB_IMMED_3D(so
, DEPTH_WRITE_ENABLE
, cso
->depth
.writemask
);
345 SB_BEGIN_3D(so
, DEPTH_TEST_FUNC
, 1);
346 SB_DATA (so
, nvgl_comparison_op(cso
->depth
.func
));
349 if (cso
->stencil
[0].enabled
) {
350 SB_BEGIN_3D(so
, STENCIL_ENABLE
, 5);
352 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].fail_op
));
353 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].zfail_op
));
354 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].zpass_op
));
355 SB_DATA (so
, nvgl_comparison_op(cso
->stencil
[0].func
));
356 SB_BEGIN_3D(so
, STENCIL_FRONT_FUNC_MASK
, 2);
357 SB_DATA (so
, cso
->stencil
[0].valuemask
);
358 SB_DATA (so
, cso
->stencil
[0].writemask
);
360 SB_IMMED_3D(so
, STENCIL_ENABLE
, 0);
363 if (cso
->stencil
[1].enabled
) {
364 assert(cso
->stencil
[0].enabled
);
365 SB_BEGIN_3D(so
, STENCIL_TWO_SIDE_ENABLE
, 5);
367 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].fail_op
));
368 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].zfail_op
));
369 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].zpass_op
));
370 SB_DATA (so
, nvgl_comparison_op(cso
->stencil
[1].func
));
371 SB_BEGIN_3D(so
, STENCIL_BACK_MASK
, 2);
372 SB_DATA (so
, cso
->stencil
[1].writemask
);
373 SB_DATA (so
, cso
->stencil
[1].valuemask
);
375 if (cso
->stencil
[0].enabled
) {
376 SB_IMMED_3D(so
, STENCIL_TWO_SIDE_ENABLE
, 0);
379 SB_IMMED_3D(so
, ALPHA_TEST_ENABLE
, cso
->alpha
.enabled
);
380 if (cso
->alpha
.enabled
) {
381 SB_BEGIN_3D(so
, ALPHA_TEST_REF
, 2);
382 SB_DATA (so
, fui(cso
->alpha
.ref_value
));
383 SB_DATA (so
, nvgl_comparison_op(cso
->alpha
.func
));
386 assert(so
->size
<= (sizeof(so
->state
) / sizeof(so
->state
[0])));
391 nvc0_zsa_state_bind(struct pipe_context
*pipe
, void *hwcso
)
393 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
396 nvc0
->dirty
|= NVC0_NEW_ZSA
;
400 nvc0_zsa_state_delete(struct pipe_context
*pipe
, void *hwcso
)
405 /* ====================== SAMPLERS AND TEXTURES ================================
408 #define NV50_TSC_WRAP_CASE(n) \
409 case PIPE_TEX_WRAP_##n: return NV50_TSC_WRAP_##n
411 static INLINE
unsigned
412 nv50_tsc_wrap_mode(unsigned wrap
)
415 NV50_TSC_WRAP_CASE(REPEAT
);
416 NV50_TSC_WRAP_CASE(MIRROR_REPEAT
);
417 NV50_TSC_WRAP_CASE(CLAMP_TO_EDGE
);
418 NV50_TSC_WRAP_CASE(CLAMP_TO_BORDER
);
419 NV50_TSC_WRAP_CASE(CLAMP
);
420 NV50_TSC_WRAP_CASE(MIRROR_CLAMP_TO_EDGE
);
421 NV50_TSC_WRAP_CASE(MIRROR_CLAMP_TO_BORDER
);
422 NV50_TSC_WRAP_CASE(MIRROR_CLAMP
);
424 NOUVEAU_ERR("unknown wrap mode: %d\n", wrap
);
425 return NV50_TSC_WRAP_REPEAT
;
430 nvc0_sampler_state_delete(struct pipe_context
*pipe
, void *hwcso
)
434 for (s
= 0; s
< 5; ++s
)
435 for (i
= 0; i
< nvc0_context(pipe
)->num_samplers
[s
]; ++i
)
436 if (nvc0_context(pipe
)->samplers
[s
][i
] == hwcso
)
437 nvc0_context(pipe
)->samplers
[s
][i
] = NULL
;
439 nvc0_screen_tsc_free(nvc0_context(pipe
)->screen
, nv50_tsc_entry(hwcso
));
445 nvc0_stage_sampler_states_bind(struct nvc0_context
*nvc0
, int s
,
446 unsigned nr
, void **hwcso
)
450 for (i
= 0; i
< nr
; ++i
) {
451 struct nv50_tsc_entry
*old
= nvc0
->samplers
[s
][i
];
455 nvc0
->samplers_dirty
[s
] |= 1 << i
;
457 nvc0
->samplers
[s
][i
] = nv50_tsc_entry(hwcso
[i
]);
459 nvc0_screen_tsc_unlock(nvc0
->screen
, old
);
461 for (; i
< nvc0
->num_samplers
[s
]; ++i
) {
462 if (nvc0
->samplers
[s
][i
]) {
463 nvc0_screen_tsc_unlock(nvc0
->screen
, nvc0
->samplers
[s
][i
]);
464 nvc0
->samplers
[s
][i
] = NULL
;
468 nvc0
->num_samplers
[s
] = nr
;
470 nvc0
->dirty
|= NVC0_NEW_SAMPLERS
;
474 nvc0_vp_sampler_states_bind(struct pipe_context
*pipe
, unsigned nr
, void **s
)
476 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), 0, nr
, s
);
480 nvc0_fp_sampler_states_bind(struct pipe_context
*pipe
, unsigned nr
, void **s
)
482 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), 4, nr
, s
);
486 nvc0_gp_sampler_states_bind(struct pipe_context
*pipe
, unsigned nr
, void **s
)
488 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), 3, nr
, s
);
491 /* NOTE: only called when not referenced anywhere, won't be bound */
493 nvc0_sampler_view_destroy(struct pipe_context
*pipe
,
494 struct pipe_sampler_view
*view
)
496 pipe_resource_reference(&view
->texture
, NULL
);
498 nvc0_screen_tic_free(nvc0_context(pipe
)->screen
, nv50_tic_entry(view
));
500 FREE(nv50_tic_entry(view
));
504 nvc0_stage_set_sampler_views(struct nvc0_context
*nvc0
, int s
,
506 struct pipe_sampler_view
**views
)
510 for (i
= 0; i
< nr
; ++i
) {
511 struct nv50_tic_entry
*old
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
513 if (views
[i
] == nvc0
->textures
[s
][i
])
515 nvc0
->textures_dirty
[s
] |= 1 << i
;
518 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_TEX(s
, i
));
519 nvc0_screen_tic_unlock(nvc0
->screen
, old
);
522 pipe_sampler_view_reference(&nvc0
->textures
[s
][i
], views
[i
]);
525 for (i
= nr
; i
< nvc0
->num_textures
[s
]; ++i
) {
526 struct nv50_tic_entry
*old
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
528 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_TEX(s
, i
));
529 nvc0_screen_tic_unlock(nvc0
->screen
, old
);
530 pipe_sampler_view_reference(&nvc0
->textures
[s
][i
], NULL
);
534 nvc0
->num_textures
[s
] = nr
;
536 nvc0
->dirty
|= NVC0_NEW_TEXTURES
;
540 nvc0_vp_set_sampler_views(struct pipe_context
*pipe
,
542 struct pipe_sampler_view
**views
)
544 nvc0_stage_set_sampler_views(nvc0_context(pipe
), 0, nr
, views
);
548 nvc0_fp_set_sampler_views(struct pipe_context
*pipe
,
550 struct pipe_sampler_view
**views
)
552 nvc0_stage_set_sampler_views(nvc0_context(pipe
), 4, nr
, views
);
556 nvc0_gp_set_sampler_views(struct pipe_context
*pipe
,
558 struct pipe_sampler_view
**views
)
560 nvc0_stage_set_sampler_views(nvc0_context(pipe
), 3, nr
, views
);
563 /* ============================= SHADERS =======================================
567 nvc0_sp_state_create(struct pipe_context
*pipe
,
568 const struct pipe_shader_state
*cso
, unsigned type
)
570 struct nvc0_program
*prog
;
572 prog
= CALLOC_STRUCT(nvc0_program
);
579 prog
->pipe
.tokens
= tgsi_dup_tokens(cso
->tokens
);
581 if (cso
->stream_output
.num_outputs
)
582 prog
->pipe
.stream_output
= cso
->stream_output
;
588 nvc0_sp_state_delete(struct pipe_context
*pipe
, void *hwcso
)
590 struct nvc0_program
*prog
= (struct nvc0_program
*)hwcso
;
592 nvc0_program_destroy(nvc0_context(pipe
), prog
);
594 FREE((void *)prog
->pipe
.tokens
);
599 nvc0_vp_state_create(struct pipe_context
*pipe
,
600 const struct pipe_shader_state
*cso
)
602 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_VERTEX
);
606 nvc0_vp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
608 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
610 nvc0
->vertprog
= hwcso
;
611 nvc0
->dirty
|= NVC0_NEW_VERTPROG
;
615 nvc0_fp_state_create(struct pipe_context
*pipe
,
616 const struct pipe_shader_state
*cso
)
618 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_FRAGMENT
);
622 nvc0_fp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
624 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
626 nvc0
->fragprog
= hwcso
;
627 nvc0
->dirty
|= NVC0_NEW_FRAGPROG
;
631 nvc0_gp_state_create(struct pipe_context
*pipe
,
632 const struct pipe_shader_state
*cso
)
634 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_GEOMETRY
);
638 nvc0_gp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
640 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
642 nvc0
->gmtyprog
= hwcso
;
643 nvc0
->dirty
|= NVC0_NEW_GMTYPROG
;
647 nvc0_set_constant_buffer(struct pipe_context
*pipe
, uint shader
, uint index
,
648 struct pipe_constant_buffer
*cb
)
650 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
651 struct pipe_resource
*res
= cb
? cb
->buffer
: NULL
;
652 const unsigned s
= nvc0_shader_stage(shader
);
653 const unsigned i
= index
;
655 if (shader
== PIPE_SHADER_COMPUTE
)
658 if (nvc0
->constbuf
[s
][i
].user
)
659 nvc0
->constbuf
[s
][i
].u
.buf
= NULL
;
661 if (nvc0
->constbuf
[s
][i
].u
.buf
)
662 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_CB(s
, i
));
664 pipe_resource_reference(&nvc0
->constbuf
[s
][i
].u
.buf
, res
);
666 nvc0
->constbuf
[s
][i
].user
= (cb
&& cb
->user_buffer
) ? TRUE
: FALSE
;
667 if (nvc0
->constbuf
[s
][i
].user
) {
668 nvc0
->constbuf
[s
][i
].u
.data
= cb
->user_buffer
;
669 nvc0
->constbuf
[s
][i
].size
= cb
->buffer_size
;
672 nvc0
->constbuf
[s
][i
].offset
= cb
->buffer_offset
;
673 nvc0
->constbuf
[s
][i
].size
= align(cb
->buffer_size
, 0x100);
676 nvc0
->constbuf_dirty
[s
] |= 1 << i
;
678 nvc0
->dirty
|= NVC0_NEW_CONSTBUF
;
681 /* =============================================================================
685 nvc0_set_blend_color(struct pipe_context
*pipe
,
686 const struct pipe_blend_color
*bcol
)
688 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
690 nvc0
->blend_colour
= *bcol
;
691 nvc0
->dirty
|= NVC0_NEW_BLEND_COLOUR
;
695 nvc0_set_stencil_ref(struct pipe_context
*pipe
,
696 const struct pipe_stencil_ref
*sr
)
698 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
700 nvc0
->stencil_ref
= *sr
;
701 nvc0
->dirty
|= NVC0_NEW_STENCIL_REF
;
705 nvc0_set_clip_state(struct pipe_context
*pipe
,
706 const struct pipe_clip_state
*clip
)
708 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
710 memcpy(nvc0
->clip
.ucp
, clip
->ucp
, sizeof(clip
->ucp
));
712 nvc0
->dirty
|= NVC0_NEW_CLIP
;
716 nvc0_set_sample_mask(struct pipe_context
*pipe
, unsigned sample_mask
)
718 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
720 nvc0
->sample_mask
= sample_mask
;
721 nvc0
->dirty
|= NVC0_NEW_SAMPLE_MASK
;
726 nvc0_set_framebuffer_state(struct pipe_context
*pipe
,
727 const struct pipe_framebuffer_state
*fb
)
729 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
732 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_FB
);
734 for (i
= 0; i
< fb
->nr_cbufs
; ++i
)
735 pipe_surface_reference(&nvc0
->framebuffer
.cbufs
[i
], fb
->cbufs
[i
]);
736 for (; i
< nvc0
->framebuffer
.nr_cbufs
; ++i
)
737 pipe_surface_reference(&nvc0
->framebuffer
.cbufs
[i
], NULL
);
739 nvc0
->framebuffer
.nr_cbufs
= fb
->nr_cbufs
;
741 nvc0
->framebuffer
.width
= fb
->width
;
742 nvc0
->framebuffer
.height
= fb
->height
;
744 pipe_surface_reference(&nvc0
->framebuffer
.zsbuf
, fb
->zsbuf
);
746 nvc0
->dirty
|= NVC0_NEW_FRAMEBUFFER
;
750 nvc0_set_polygon_stipple(struct pipe_context
*pipe
,
751 const struct pipe_poly_stipple
*stipple
)
753 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
755 nvc0
->stipple
= *stipple
;
756 nvc0
->dirty
|= NVC0_NEW_STIPPLE
;
760 nvc0_set_scissor_state(struct pipe_context
*pipe
,
761 const struct pipe_scissor_state
*scissor
)
763 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
765 nvc0
->scissor
= *scissor
;
766 nvc0
->dirty
|= NVC0_NEW_SCISSOR
;
770 nvc0_set_viewport_state(struct pipe_context
*pipe
,
771 const struct pipe_viewport_state
*vpt
)
773 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
775 nvc0
->viewport
= *vpt
;
776 nvc0
->dirty
|= NVC0_NEW_VIEWPORT
;
780 nvc0_set_vertex_buffers(struct pipe_context
*pipe
,
782 const struct pipe_vertex_buffer
*vb
)
784 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
785 uint32_t constant_vbos
= 0;
790 if (count
!= nvc0
->num_vtxbufs
) {
791 for (i
= 0; i
< count
; ++i
) {
792 pipe_resource_reference(&nvc0
->vtxbuf
[i
].buffer
, vb
[i
].buffer
);
793 if (vb
[i
].user_buffer
) {
794 nvc0
->vbo_user
|= 1 << i
;
795 nvc0
->vtxbuf
[i
].user_buffer
= vb
[i
].user_buffer
;
797 constant_vbos
|= 1 << i
;
799 nvc0
->vtxbuf
[i
].buffer_offset
= vb
[i
].buffer_offset
;
801 nvc0
->vtxbuf
[i
].stride
= vb
[i
].stride
;
803 for (; i
< nvc0
->num_vtxbufs
; ++i
)
804 pipe_resource_reference(&nvc0
->vtxbuf
[i
].buffer
, NULL
);
806 nvc0
->num_vtxbufs
= count
;
807 nvc0
->dirty
|= NVC0_NEW_ARRAYS
;
809 for (i
= 0; i
< count
; ++i
) {
810 if (vb
[i
].user_buffer
) {
811 nvc0
->vtxbuf
[i
].user_buffer
= vb
[i
].user_buffer
;
812 nvc0
->vbo_user
|= 1 << i
;
814 constant_vbos
|= 1 << i
;
815 assert(!vb
[i
].buffer
);
817 if (nvc0
->vtxbuf
[i
].buffer
== vb
[i
].buffer
&&
818 nvc0
->vtxbuf
[i
].buffer_offset
== vb
[i
].buffer_offset
&&
819 nvc0
->vtxbuf
[i
].stride
== vb
[i
].stride
)
821 pipe_resource_reference(&nvc0
->vtxbuf
[i
].buffer
, vb
[i
].buffer
);
822 nvc0
->vtxbuf
[i
].buffer_offset
= vb
[i
].buffer_offset
;
823 nvc0
->vtxbuf
[i
].stride
= vb
[i
].stride
;
824 nvc0
->dirty
|= NVC0_NEW_ARRAYS
;
827 if (constant_vbos
!= nvc0
->constant_vbos
) {
828 nvc0
->constant_vbos
= constant_vbos
;
829 nvc0
->dirty
|= NVC0_NEW_ARRAYS
;
832 if (nvc0
->dirty
& NVC0_NEW_ARRAYS
)
833 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_VTX
);
837 nvc0_set_index_buffer(struct pipe_context
*pipe
,
838 const struct pipe_index_buffer
*ib
)
840 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
842 if (nvc0
->idxbuf
.buffer
)
843 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_IDX
);
846 pipe_resource_reference(&nvc0
->idxbuf
.buffer
, ib
->buffer
);
847 nvc0
->idxbuf
.index_size
= ib
->index_size
;
849 nvc0
->idxbuf
.offset
= ib
->offset
;
850 nvc0
->dirty
|= NVC0_NEW_IDXBUF
;
852 nvc0
->idxbuf
.user_buffer
= ib
->user_buffer
;
853 nvc0
->dirty
&= ~NVC0_NEW_IDXBUF
;
856 nvc0
->dirty
&= ~NVC0_NEW_IDXBUF
;
857 pipe_resource_reference(&nvc0
->idxbuf
.buffer
, NULL
);
862 nvc0_vertex_state_bind(struct pipe_context
*pipe
, void *hwcso
)
864 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
866 nvc0
->vertex
= hwcso
;
867 nvc0
->dirty
|= NVC0_NEW_VERTEX
;
870 static struct pipe_stream_output_target
*
871 nvc0_so_target_create(struct pipe_context
*pipe
,
872 struct pipe_resource
*res
,
873 unsigned offset
, unsigned size
)
875 struct nvc0_so_target
*targ
= MALLOC_STRUCT(nvc0_so_target
);
879 targ
->pq
= pipe
->create_query(pipe
, NVC0_QUERY_TFB_BUFFER_OFFSET
);
886 targ
->pipe
.buffer_size
= size
;
887 targ
->pipe
.buffer_offset
= offset
;
888 targ
->pipe
.context
= pipe
;
889 targ
->pipe
.buffer
= NULL
;
890 pipe_resource_reference(&targ
->pipe
.buffer
, res
);
891 pipe_reference_init(&targ
->pipe
.reference
, 1);
897 nvc0_so_target_destroy(struct pipe_context
*pipe
,
898 struct pipe_stream_output_target
*ptarg
)
900 struct nvc0_so_target
*targ
= nvc0_so_target(ptarg
);
901 pipe
->destroy_query(pipe
, targ
->pq
);
906 nvc0_set_transform_feedback_targets(struct pipe_context
*pipe
,
907 unsigned num_targets
,
908 struct pipe_stream_output_target
**targets
,
909 unsigned append_mask
)
911 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
913 boolean serialize
= TRUE
;
915 assert(num_targets
<= 4);
917 for (i
= 0; i
< num_targets
; ++i
) {
918 if (nvc0
->tfbbuf
[i
] == targets
[i
] && (append_mask
& (1 << i
)))
920 nvc0
->tfbbuf_dirty
|= 1 << i
;
922 if (nvc0
->tfbbuf
[i
] && nvc0
->tfbbuf
[i
] != targets
[i
])
923 nvc0_so_target_save_offset(pipe
, nvc0
->tfbbuf
[i
], i
, &serialize
);
925 if (targets
[i
] && !(append_mask
& (1 << i
)))
926 nvc0_so_target(targets
[i
])->clean
= TRUE
;
928 pipe_so_target_reference(&nvc0
->tfbbuf
[i
], targets
[i
]);
930 for (; i
< nvc0
->num_tfbbufs
; ++i
) {
931 nvc0
->tfbbuf_dirty
|= 1 << i
;
932 nvc0_so_target_save_offset(pipe
, nvc0
->tfbbuf
[i
], i
, &serialize
);
933 pipe_so_target_reference(&nvc0
->tfbbuf
[i
], NULL
);
935 nvc0
->num_tfbbufs
= num_targets
;
937 if (nvc0
->tfbbuf_dirty
)
938 nvc0
->dirty
|= NVC0_NEW_TFB_TARGETS
;
942 nvc0_init_state_functions(struct nvc0_context
*nvc0
)
944 struct pipe_context
*pipe
= &nvc0
->base
.pipe
;
946 pipe
->create_blend_state
= nvc0_blend_state_create
;
947 pipe
->bind_blend_state
= nvc0_blend_state_bind
;
948 pipe
->delete_blend_state
= nvc0_blend_state_delete
;
950 pipe
->create_rasterizer_state
= nvc0_rasterizer_state_create
;
951 pipe
->bind_rasterizer_state
= nvc0_rasterizer_state_bind
;
952 pipe
->delete_rasterizer_state
= nvc0_rasterizer_state_delete
;
954 pipe
->create_depth_stencil_alpha_state
= nvc0_zsa_state_create
;
955 pipe
->bind_depth_stencil_alpha_state
= nvc0_zsa_state_bind
;
956 pipe
->delete_depth_stencil_alpha_state
= nvc0_zsa_state_delete
;
958 pipe
->create_sampler_state
= nv50_sampler_state_create
;
959 pipe
->delete_sampler_state
= nvc0_sampler_state_delete
;
960 pipe
->bind_vertex_sampler_states
= nvc0_vp_sampler_states_bind
;
961 pipe
->bind_fragment_sampler_states
= nvc0_fp_sampler_states_bind
;
962 pipe
->bind_geometry_sampler_states
= nvc0_gp_sampler_states_bind
;
964 pipe
->create_sampler_view
= nvc0_create_sampler_view
;
965 pipe
->sampler_view_destroy
= nvc0_sampler_view_destroy
;
966 pipe
->set_vertex_sampler_views
= nvc0_vp_set_sampler_views
;
967 pipe
->set_fragment_sampler_views
= nvc0_fp_set_sampler_views
;
968 pipe
->set_geometry_sampler_views
= nvc0_gp_set_sampler_views
;
970 pipe
->create_vs_state
= nvc0_vp_state_create
;
971 pipe
->create_fs_state
= nvc0_fp_state_create
;
972 pipe
->create_gs_state
= nvc0_gp_state_create
;
973 pipe
->bind_vs_state
= nvc0_vp_state_bind
;
974 pipe
->bind_fs_state
= nvc0_fp_state_bind
;
975 pipe
->bind_gs_state
= nvc0_gp_state_bind
;
976 pipe
->delete_vs_state
= nvc0_sp_state_delete
;
977 pipe
->delete_fs_state
= nvc0_sp_state_delete
;
978 pipe
->delete_gs_state
= nvc0_sp_state_delete
;
980 pipe
->set_blend_color
= nvc0_set_blend_color
;
981 pipe
->set_stencil_ref
= nvc0_set_stencil_ref
;
982 pipe
->set_clip_state
= nvc0_set_clip_state
;
983 pipe
->set_sample_mask
= nvc0_set_sample_mask
;
984 pipe
->set_constant_buffer
= nvc0_set_constant_buffer
;
985 pipe
->set_framebuffer_state
= nvc0_set_framebuffer_state
;
986 pipe
->set_polygon_stipple
= nvc0_set_polygon_stipple
;
987 pipe
->set_scissor_state
= nvc0_set_scissor_state
;
988 pipe
->set_viewport_state
= nvc0_set_viewport_state
;
990 pipe
->create_vertex_elements_state
= nvc0_vertex_state_create
;
991 pipe
->delete_vertex_elements_state
= nvc0_vertex_state_delete
;
992 pipe
->bind_vertex_elements_state
= nvc0_vertex_state_bind
;
994 pipe
->set_vertex_buffers
= nvc0_set_vertex_buffers
;
995 pipe
->set_index_buffer
= nvc0_set_index_buffer
;
997 pipe
->create_stream_output_target
= nvc0_so_target_create
;
998 pipe
->stream_output_target_destroy
= nvc0_so_target_destroy
;
999 pipe
->set_stream_output_targets
= nvc0_set_transform_feedback_targets
;