2 * Copyright 2010 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "pipe/p_defines.h"
24 #include "util/u_helpers.h"
25 #include "util/u_inlines.h"
26 #include "util/u_transfer.h"
28 #include "tgsi/tgsi_parse.h"
30 #include "nvc0/nvc0_stateobj.h"
31 #include "nvc0/nvc0_context.h"
33 #include "nvc0/nvc0_3d.xml.h"
34 #include "nv50/nv50_texture.xml.h"
36 #include "nouveau_gldefs.h"
38 static INLINE
uint32_t
39 nvc0_colormask(unsigned mask
)
43 if (mask
& PIPE_MASK_R
)
45 if (mask
& PIPE_MASK_G
)
47 if (mask
& PIPE_MASK_B
)
49 if (mask
& PIPE_MASK_A
)
55 #define NVC0_BLEND_FACTOR_CASE(a, b) \
56 case PIPE_BLENDFACTOR_##a: return NV50_3D_BLEND_FACTOR_##b
58 static INLINE
uint32_t
59 nvc0_blend_fac(unsigned factor
)
62 NVC0_BLEND_FACTOR_CASE(ONE
, ONE
);
63 NVC0_BLEND_FACTOR_CASE(SRC_COLOR
, SRC_COLOR
);
64 NVC0_BLEND_FACTOR_CASE(SRC_ALPHA
, SRC_ALPHA
);
65 NVC0_BLEND_FACTOR_CASE(DST_ALPHA
, DST_ALPHA
);
66 NVC0_BLEND_FACTOR_CASE(DST_COLOR
, DST_COLOR
);
67 NVC0_BLEND_FACTOR_CASE(SRC_ALPHA_SATURATE
, SRC_ALPHA_SATURATE
);
68 NVC0_BLEND_FACTOR_CASE(CONST_COLOR
, CONSTANT_COLOR
);
69 NVC0_BLEND_FACTOR_CASE(CONST_ALPHA
, CONSTANT_ALPHA
);
70 NVC0_BLEND_FACTOR_CASE(SRC1_COLOR
, SRC1_COLOR
);
71 NVC0_BLEND_FACTOR_CASE(SRC1_ALPHA
, SRC1_ALPHA
);
72 NVC0_BLEND_FACTOR_CASE(ZERO
, ZERO
);
73 NVC0_BLEND_FACTOR_CASE(INV_SRC_COLOR
, ONE_MINUS_SRC_COLOR
);
74 NVC0_BLEND_FACTOR_CASE(INV_SRC_ALPHA
, ONE_MINUS_SRC_ALPHA
);
75 NVC0_BLEND_FACTOR_CASE(INV_DST_ALPHA
, ONE_MINUS_DST_ALPHA
);
76 NVC0_BLEND_FACTOR_CASE(INV_DST_COLOR
, ONE_MINUS_DST_COLOR
);
77 NVC0_BLEND_FACTOR_CASE(INV_CONST_COLOR
, ONE_MINUS_CONSTANT_COLOR
);
78 NVC0_BLEND_FACTOR_CASE(INV_CONST_ALPHA
, ONE_MINUS_CONSTANT_ALPHA
);
79 NVC0_BLEND_FACTOR_CASE(INV_SRC1_COLOR
, ONE_MINUS_SRC1_COLOR
);
80 NVC0_BLEND_FACTOR_CASE(INV_SRC1_ALPHA
, ONE_MINUS_SRC1_ALPHA
);
82 return NV50_3D_BLEND_FACTOR_ZERO
;
87 nvc0_blend_state_create(struct pipe_context
*pipe
,
88 const struct pipe_blend_state
*cso
)
90 struct nvc0_blend_stateobj
*so
= CALLOC_STRUCT(nvc0_blend_stateobj
);
92 int r
; /* reference */
95 boolean indep_masks
= FALSE
;
96 boolean indep_funcs
= FALSE
;
100 /* check which states actually have differing values */
101 if (cso
->independent_blend_enable
) {
102 for (r
= 0; r
< 8 && !cso
->rt
[r
].blend_enable
; ++r
);
104 for (i
= r
+ 1; i
< 8; ++i
) {
105 if (!cso
->rt
[i
].blend_enable
)
108 if (cso
->rt
[i
].rgb_func
!= cso
->rt
[r
].rgb_func
||
109 cso
->rt
[i
].rgb_src_factor
!= cso
->rt
[r
].rgb_src_factor
||
110 cso
->rt
[i
].rgb_dst_factor
!= cso
->rt
[r
].rgb_dst_factor
||
111 cso
->rt
[i
].alpha_func
!= cso
->rt
[r
].alpha_func
||
112 cso
->rt
[i
].alpha_src_factor
!= cso
->rt
[r
].alpha_src_factor
||
113 cso
->rt
[i
].alpha_dst_factor
!= cso
->rt
[r
].alpha_dst_factor
) {
119 blend_en
|= (cso
->rt
[i
].blend_enable
? 1 : 0) << i
;
121 for (i
= 1; i
< 8; ++i
) {
122 if (cso
->rt
[i
].colormask
!= cso
->rt
[0].colormask
) {
129 if (cso
->rt
[0].blend_enable
)
133 if (cso
->logicop_enable
) {
134 SB_BEGIN_3D(so
, LOGIC_OP_ENABLE
, 2);
136 SB_DATA (so
, nvgl_logicop_func(cso
->logicop_func
));
138 SB_IMMED_3D(so
, MACRO_BLEND_ENABLES
, 0);
140 SB_IMMED_3D(so
, LOGIC_OP_ENABLE
, 0);
142 SB_IMMED_3D(so
, BLEND_INDEPENDENT
, indep_funcs
);
143 SB_IMMED_3D(so
, MACRO_BLEND_ENABLES
, blend_en
);
145 for (i
= 0; i
< 8; ++i
) {
146 if (cso
->rt
[i
].blend_enable
) {
147 SB_BEGIN_3D(so
, IBLEND_EQUATION_RGB(i
), 6);
148 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[i
].rgb_func
));
149 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].rgb_src_factor
));
150 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].rgb_dst_factor
));
151 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[i
].alpha_func
));
152 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].alpha_src_factor
));
153 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].alpha_dst_factor
));
158 SB_BEGIN_3D(so
, BLEND_EQUATION_RGB
, 5);
159 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[r
].rgb_func
));
160 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].rgb_src_factor
));
161 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].rgb_dst_factor
));
162 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[r
].alpha_func
));
163 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].alpha_src_factor
));
164 SB_BEGIN_3D(so
, BLEND_FUNC_DST_ALPHA
, 1);
165 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].alpha_dst_factor
));
168 SB_IMMED_3D(so
, COLOR_MASK_COMMON
, !indep_masks
);
170 SB_BEGIN_3D(so
, COLOR_MASK(0), 8);
171 for (i
= 0; i
< 8; ++i
)
172 SB_DATA(so
, nvc0_colormask(cso
->rt
[i
].colormask
));
174 SB_BEGIN_3D(so
, COLOR_MASK(0), 1);
175 SB_DATA (so
, nvc0_colormask(cso
->rt
[0].colormask
));
180 if (cso
->alpha_to_coverage
)
181 ms
|= NVC0_3D_MULTISAMPLE_CTRL_ALPHA_TO_COVERAGE
;
182 if (cso
->alpha_to_one
)
183 ms
|= NVC0_3D_MULTISAMPLE_CTRL_ALPHA_TO_ONE
;
185 SB_BEGIN_3D(so
, MULTISAMPLE_CTRL
, 1);
188 assert(so
->size
<= (sizeof(so
->state
) / sizeof(so
->state
[0])));
193 nvc0_blend_state_bind(struct pipe_context
*pipe
, void *hwcso
)
195 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
198 nvc0
->dirty
|= NVC0_NEW_BLEND
;
202 nvc0_blend_state_delete(struct pipe_context
*pipe
, void *hwcso
)
207 /* NOTE: ignoring line_last_pixel, using FALSE (set on screen init) */
209 nvc0_rasterizer_state_create(struct pipe_context
*pipe
,
210 const struct pipe_rasterizer_state
*cso
)
212 struct nvc0_rasterizer_stateobj
*so
;
215 so
= CALLOC_STRUCT(nvc0_rasterizer_stateobj
);
220 /* Scissor enables are handled in scissor state, we will not want to
221 * always emit 16 commands, one for each scissor rectangle, here.
224 SB_BEGIN_3D(so
, SHADE_MODEL
, 1);
225 SB_DATA (so
, cso
->flatshade
? NVC0_3D_SHADE_MODEL_FLAT
:
226 NVC0_3D_SHADE_MODEL_SMOOTH
);
227 SB_IMMED_3D(so
, PROVOKING_VERTEX_LAST
, !cso
->flatshade_first
);
228 SB_IMMED_3D(so
, VERTEX_TWO_SIDE_ENABLE
, cso
->light_twoside
);
230 SB_IMMED_3D(so
, VERT_COLOR_CLAMP_EN
, cso
->clamp_vertex_color
);
231 SB_BEGIN_3D(so
, FRAG_COLOR_CLAMP_EN
, 1);
232 SB_DATA (so
, cso
->clamp_fragment_color
? 0x11111111 : 0x00000000);
234 SB_IMMED_3D(so
, MULTISAMPLE_ENABLE
, cso
->multisample
);
236 SB_IMMED_3D(so
, LINE_SMOOTH_ENABLE
, cso
->line_smooth
);
237 if (cso
->line_smooth
)
238 SB_BEGIN_3D(so
, LINE_WIDTH_SMOOTH
, 1);
240 SB_BEGIN_3D(so
, LINE_WIDTH_ALIASED
, 1);
241 SB_DATA (so
, fui(cso
->line_width
));
243 SB_IMMED_3D(so
, LINE_STIPPLE_ENABLE
, cso
->line_stipple_enable
);
244 if (cso
->line_stipple_enable
) {
245 SB_BEGIN_3D(so
, LINE_STIPPLE_PATTERN
, 1);
246 SB_DATA (so
, (cso
->line_stipple_pattern
<< 8) |
247 cso
->line_stipple_factor
);
251 SB_IMMED_3D(so
, VP_POINT_SIZE_EN
, cso
->point_size_per_vertex
);
252 if (!cso
->point_size_per_vertex
) {
253 SB_BEGIN_3D(so
, POINT_SIZE
, 1);
254 SB_DATA (so
, fui(cso
->point_size
));
257 reg
= (cso
->sprite_coord_mode
== PIPE_SPRITE_COORD_UPPER_LEFT
) ?
258 NVC0_3D_POINT_COORD_REPLACE_COORD_ORIGIN_UPPER_LEFT
:
259 NVC0_3D_POINT_COORD_REPLACE_COORD_ORIGIN_LOWER_LEFT
;
261 SB_BEGIN_3D(so
, POINT_COORD_REPLACE
, 1);
262 SB_DATA (so
, ((cso
->sprite_coord_enable
& 0xff) << 3) | reg
);
263 SB_IMMED_3D(so
, POINT_SPRITE_ENABLE
, cso
->point_quad_rasterization
);
264 SB_IMMED_3D(so
, POINT_SMOOTH_ENABLE
, cso
->point_smooth
);
266 SB_BEGIN_3D(so
, MACRO_POLYGON_MODE_FRONT
, 1);
267 SB_DATA (so
, nvgl_polygon_mode(cso
->fill_front
));
268 SB_BEGIN_3D(so
, MACRO_POLYGON_MODE_BACK
, 1);
269 SB_DATA (so
, nvgl_polygon_mode(cso
->fill_back
));
270 SB_IMMED_3D(so
, POLYGON_SMOOTH_ENABLE
, cso
->poly_smooth
);
272 SB_BEGIN_3D(so
, CULL_FACE_ENABLE
, 3);
273 SB_DATA (so
, cso
->cull_face
!= PIPE_FACE_NONE
);
274 SB_DATA (so
, cso
->front_ccw
? NVC0_3D_FRONT_FACE_CCW
:
275 NVC0_3D_FRONT_FACE_CW
);
276 switch (cso
->cull_face
) {
277 case PIPE_FACE_FRONT_AND_BACK
:
278 SB_DATA(so
, NVC0_3D_CULL_FACE_FRONT_AND_BACK
);
280 case PIPE_FACE_FRONT
:
281 SB_DATA(so
, NVC0_3D_CULL_FACE_FRONT
);
285 SB_DATA(so
, NVC0_3D_CULL_FACE_BACK
);
289 SB_IMMED_3D(so
, POLYGON_STIPPLE_ENABLE
, cso
->poly_stipple_enable
);
290 SB_BEGIN_3D(so
, POLYGON_OFFSET_POINT_ENABLE
, 3);
291 SB_DATA (so
, cso
->offset_point
);
292 SB_DATA (so
, cso
->offset_line
);
293 SB_DATA (so
, cso
->offset_tri
);
295 if (cso
->offset_point
|| cso
->offset_line
|| cso
->offset_tri
) {
296 SB_BEGIN_3D(so
, POLYGON_OFFSET_FACTOR
, 1);
297 SB_DATA (so
, fui(cso
->offset_scale
));
298 SB_BEGIN_3D(so
, POLYGON_OFFSET_UNITS
, 1);
299 SB_DATA (so
, fui(cso
->offset_units
* 2.0f
));
300 SB_BEGIN_3D(so
, POLYGON_OFFSET_CLAMP
, 1);
301 SB_DATA (so
, fui(cso
->offset_clamp
));
305 reg
= NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1
;
308 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1
|
309 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_NEAR
|
310 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_FAR
|
311 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK12_UNK2
;
313 SB_BEGIN_3D(so
, VIEW_VOLUME_CLIP_CTRL
, 1);
316 assert(so
->size
<= (sizeof(so
->state
) / sizeof(so
->state
[0])));
321 nvc0_rasterizer_state_bind(struct pipe_context
*pipe
, void *hwcso
)
323 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
326 nvc0
->dirty
|= NVC0_NEW_RASTERIZER
;
330 nvc0_rasterizer_state_delete(struct pipe_context
*pipe
, void *hwcso
)
336 nvc0_zsa_state_create(struct pipe_context
*pipe
,
337 const struct pipe_depth_stencil_alpha_state
*cso
)
339 struct nvc0_zsa_stateobj
*so
= CALLOC_STRUCT(nvc0_zsa_stateobj
);
343 SB_IMMED_3D(so
, DEPTH_TEST_ENABLE
, cso
->depth
.enabled
);
344 if (cso
->depth
.enabled
) {
345 SB_IMMED_3D(so
, DEPTH_WRITE_ENABLE
, cso
->depth
.writemask
);
346 SB_BEGIN_3D(so
, DEPTH_TEST_FUNC
, 1);
347 SB_DATA (so
, nvgl_comparison_op(cso
->depth
.func
));
350 if (cso
->stencil
[0].enabled
) {
351 SB_BEGIN_3D(so
, STENCIL_ENABLE
, 5);
353 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].fail_op
));
354 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].zfail_op
));
355 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].zpass_op
));
356 SB_DATA (so
, nvgl_comparison_op(cso
->stencil
[0].func
));
357 SB_BEGIN_3D(so
, STENCIL_FRONT_FUNC_MASK
, 2);
358 SB_DATA (so
, cso
->stencil
[0].valuemask
);
359 SB_DATA (so
, cso
->stencil
[0].writemask
);
361 SB_IMMED_3D(so
, STENCIL_ENABLE
, 0);
364 if (cso
->stencil
[1].enabled
) {
365 assert(cso
->stencil
[0].enabled
);
366 SB_BEGIN_3D(so
, STENCIL_TWO_SIDE_ENABLE
, 5);
368 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].fail_op
));
369 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].zfail_op
));
370 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].zpass_op
));
371 SB_DATA (so
, nvgl_comparison_op(cso
->stencil
[1].func
));
372 SB_BEGIN_3D(so
, STENCIL_BACK_MASK
, 2);
373 SB_DATA (so
, cso
->stencil
[1].writemask
);
374 SB_DATA (so
, cso
->stencil
[1].valuemask
);
376 if (cso
->stencil
[0].enabled
) {
377 SB_IMMED_3D(so
, STENCIL_TWO_SIDE_ENABLE
, 0);
380 SB_IMMED_3D(so
, ALPHA_TEST_ENABLE
, cso
->alpha
.enabled
);
381 if (cso
->alpha
.enabled
) {
382 SB_BEGIN_3D(so
, ALPHA_TEST_REF
, 2);
383 SB_DATA (so
, fui(cso
->alpha
.ref_value
));
384 SB_DATA (so
, nvgl_comparison_op(cso
->alpha
.func
));
387 assert(so
->size
<= (sizeof(so
->state
) / sizeof(so
->state
[0])));
392 nvc0_zsa_state_bind(struct pipe_context
*pipe
, void *hwcso
)
394 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
397 nvc0
->dirty
|= NVC0_NEW_ZSA
;
401 nvc0_zsa_state_delete(struct pipe_context
*pipe
, void *hwcso
)
406 /* ====================== SAMPLERS AND TEXTURES ================================
409 #define NV50_TSC_WRAP_CASE(n) \
410 case PIPE_TEX_WRAP_##n: return NV50_TSC_WRAP_##n
412 static INLINE
unsigned
413 nv50_tsc_wrap_mode(unsigned wrap
)
416 NV50_TSC_WRAP_CASE(REPEAT
);
417 NV50_TSC_WRAP_CASE(MIRROR_REPEAT
);
418 NV50_TSC_WRAP_CASE(CLAMP_TO_EDGE
);
419 NV50_TSC_WRAP_CASE(CLAMP_TO_BORDER
);
420 NV50_TSC_WRAP_CASE(CLAMP
);
421 NV50_TSC_WRAP_CASE(MIRROR_CLAMP_TO_EDGE
);
422 NV50_TSC_WRAP_CASE(MIRROR_CLAMP_TO_BORDER
);
423 NV50_TSC_WRAP_CASE(MIRROR_CLAMP
);
425 NOUVEAU_ERR("unknown wrap mode: %d\n", wrap
);
426 return NV50_TSC_WRAP_REPEAT
;
431 nvc0_sampler_state_delete(struct pipe_context
*pipe
, void *hwcso
)
435 for (s
= 0; s
< 5; ++s
)
436 for (i
= 0; i
< nvc0_context(pipe
)->num_samplers
[s
]; ++i
)
437 if (nvc0_context(pipe
)->samplers
[s
][i
] == hwcso
)
438 nvc0_context(pipe
)->samplers
[s
][i
] = NULL
;
440 nvc0_screen_tsc_free(nvc0_context(pipe
)->screen
, nv50_tsc_entry(hwcso
));
446 nvc0_stage_sampler_states_bind(struct nvc0_context
*nvc0
, int s
,
447 unsigned nr
, void **hwcso
)
451 for (i
= 0; i
< nr
; ++i
) {
452 struct nv50_tsc_entry
*old
= nvc0
->samplers
[s
][i
];
456 nvc0
->samplers_dirty
[s
] |= 1 << i
;
458 nvc0
->samplers
[s
][i
] = nv50_tsc_entry(hwcso
[i
]);
460 nvc0_screen_tsc_unlock(nvc0
->screen
, old
);
462 for (; i
< nvc0
->num_samplers
[s
]; ++i
) {
463 if (nvc0
->samplers
[s
][i
]) {
464 nvc0_screen_tsc_unlock(nvc0
->screen
, nvc0
->samplers
[s
][i
]);
465 nvc0
->samplers
[s
][i
] = NULL
;
469 nvc0
->num_samplers
[s
] = nr
;
471 nvc0
->dirty
|= NVC0_NEW_SAMPLERS
;
475 nvc0_vp_sampler_states_bind(struct pipe_context
*pipe
, unsigned nr
, void **s
)
477 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), 0, nr
, s
);
481 nvc0_fp_sampler_states_bind(struct pipe_context
*pipe
, unsigned nr
, void **s
)
483 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), 4, nr
, s
);
487 nvc0_gp_sampler_states_bind(struct pipe_context
*pipe
, unsigned nr
, void **s
)
489 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), 3, nr
, s
);
493 nvc0_stage_sampler_states_bind_range(struct nvc0_context
*nvc0
,
495 unsigned start
, unsigned nr
, void **cso
)
497 const unsigned end
= start
+ nr
;
502 for (i
= start
; i
< end
; ++i
) {
503 const unsigned p
= i
- start
;
506 if (cso
[p
] == nvc0
->samplers
[s
][i
])
508 nvc0
->samplers_dirty
[s
] |= 1 << i
;
510 if (nvc0
->samplers
[s
][i
])
511 nvc0_screen_tsc_unlock(nvc0
->screen
, nvc0
->samplers
[s
][i
]);
512 nvc0
->samplers
[s
][i
] = cso
[p
];
515 for (i
= start
; i
< end
; ++i
) {
516 if (nvc0
->samplers
[s
][i
]) {
517 nvc0_screen_tsc_unlock(nvc0
->screen
, nvc0
->samplers
[s
][i
]);
518 nvc0
->samplers
[s
][i
] = NULL
;
519 nvc0
->samplers_dirty
[s
] |= 1 << i
;
524 if (nvc0
->num_samplers
[s
] <= end
) {
525 if (last_valid
< 0) {
526 for (i
= start
; i
&& !nvc0
->samplers
[s
][i
- 1]; --i
);
527 nvc0
->num_samplers
[s
] = i
;
529 nvc0
->num_samplers
[s
] = last_valid
+ 1;
535 nvc0_cp_sampler_states_bind(struct pipe_context
*pipe
,
536 unsigned start
, unsigned nr
, void **cso
)
538 nvc0_stage_sampler_states_bind_range(nvc0_context(pipe
), 5, start
, nr
, cso
);
540 nvc0_context(pipe
)->dirty_cp
|= NVC0_NEW_CP_SAMPLERS
;
544 nvc0_bind_sampler_states(struct pipe_context
*pipe
, unsigned shader
,
545 unsigned start
, unsigned nr
, void **s
)
548 case PIPE_SHADER_VERTEX
:
550 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), 0, nr
, s
);
552 case PIPE_SHADER_GEOMETRY
:
554 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), 3, nr
, s
);
556 case PIPE_SHADER_FRAGMENT
:
558 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), 4, nr
, s
);
560 case PIPE_SHADER_COMPUTE
:
561 nvc0_stage_sampler_states_bind_range(nvc0_context(pipe
), 5,
563 nvc0_context(pipe
)->dirty_cp
|= NVC0_NEW_CP_SAMPLERS
;
569 /* NOTE: only called when not referenced anywhere, won't be bound */
571 nvc0_sampler_view_destroy(struct pipe_context
*pipe
,
572 struct pipe_sampler_view
*view
)
574 pipe_resource_reference(&view
->texture
, NULL
);
576 nvc0_screen_tic_free(nvc0_context(pipe
)->screen
, nv50_tic_entry(view
));
578 FREE(nv50_tic_entry(view
));
582 nvc0_stage_set_sampler_views(struct nvc0_context
*nvc0
, int s
,
584 struct pipe_sampler_view
**views
)
588 for (i
= 0; i
< nr
; ++i
) {
589 struct nv50_tic_entry
*old
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
591 if (views
[i
] == nvc0
->textures
[s
][i
])
593 nvc0
->textures_dirty
[s
] |= 1 << i
;
596 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_TEX(s
, i
));
597 nvc0_screen_tic_unlock(nvc0
->screen
, old
);
600 pipe_sampler_view_reference(&nvc0
->textures
[s
][i
], views
[i
]);
603 for (i
= nr
; i
< nvc0
->num_textures
[s
]; ++i
) {
604 struct nv50_tic_entry
*old
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
606 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_TEX(s
, i
));
607 nvc0_screen_tic_unlock(nvc0
->screen
, old
);
608 pipe_sampler_view_reference(&nvc0
->textures
[s
][i
], NULL
);
612 nvc0
->num_textures
[s
] = nr
;
614 nvc0
->dirty
|= NVC0_NEW_TEXTURES
;
618 nvc0_vp_set_sampler_views(struct pipe_context
*pipe
,
620 struct pipe_sampler_view
**views
)
622 nvc0_stage_set_sampler_views(nvc0_context(pipe
), 0, nr
, views
);
626 nvc0_fp_set_sampler_views(struct pipe_context
*pipe
,
628 struct pipe_sampler_view
**views
)
630 nvc0_stage_set_sampler_views(nvc0_context(pipe
), 4, nr
, views
);
634 nvc0_gp_set_sampler_views(struct pipe_context
*pipe
,
636 struct pipe_sampler_view
**views
)
638 nvc0_stage_set_sampler_views(nvc0_context(pipe
), 3, nr
, views
);
642 nvc0_stage_set_sampler_views_range(struct nvc0_context
*nvc0
, const unsigned s
,
643 unsigned start
, unsigned nr
,
644 struct pipe_sampler_view
**views
)
646 struct nouveau_bufctx
*bctx
= (s
== 5) ? nvc0
->bufctx_cp
: nvc0
->bufctx_3d
;
647 const unsigned end
= start
+ nr
;
648 const unsigned bin
= (s
== 5) ? NVC0_BIND_CP_TEX(0) : NVC0_BIND_TEX(s
, 0);
653 for (i
= start
; i
< end
; ++i
) {
654 const unsigned p
= i
- start
;
657 if (views
[p
] == nvc0
->textures
[s
][i
])
659 nvc0
->textures_dirty
[s
] |= 1 << i
;
661 if (nvc0
->textures
[s
][i
]) {
662 struct nv50_tic_entry
*old
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
663 nouveau_bufctx_reset(bctx
, bin
+ i
);
664 nvc0_screen_tic_unlock(nvc0
->screen
, old
);
666 pipe_sampler_view_reference(&nvc0
->textures
[s
][i
], views
[p
]);
669 for (i
= start
; i
< end
; ++i
) {
670 struct nv50_tic_entry
*old
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
673 nvc0
->textures_dirty
[s
] |= 1 << i
;
675 nvc0_screen_tic_unlock(nvc0
->screen
, old
);
676 pipe_sampler_view_reference(&nvc0
->textures
[s
][i
], NULL
);
677 nouveau_bufctx_reset(bctx
, bin
+ i
);
681 if (nvc0
->num_textures
[s
] <= end
) {
682 if (last_valid
< 0) {
683 for (i
= start
; i
&& !nvc0
->textures
[s
][i
- 1]; --i
);
684 nvc0
->num_textures
[s
] = i
;
686 nvc0
->num_textures
[s
] = last_valid
+ 1;
692 nvc0_cp_set_sampler_views(struct pipe_context
*pipe
,
693 unsigned start
, unsigned nr
,
694 struct pipe_sampler_view
**views
)
696 nvc0_stage_set_sampler_views_range(nvc0_context(pipe
), 5, start
, nr
, views
);
698 nvc0_context(pipe
)->dirty_cp
|= NVC0_NEW_CP_TEXTURES
;
702 /* ============================= SHADERS =======================================
706 nvc0_sp_state_create(struct pipe_context
*pipe
,
707 const struct pipe_shader_state
*cso
, unsigned type
)
709 struct nvc0_program
*prog
;
711 prog
= CALLOC_STRUCT(nvc0_program
);
718 prog
->pipe
.tokens
= tgsi_dup_tokens(cso
->tokens
);
720 if (cso
->stream_output
.num_outputs
)
721 prog
->pipe
.stream_output
= cso
->stream_output
;
727 nvc0_sp_state_delete(struct pipe_context
*pipe
, void *hwcso
)
729 struct nvc0_program
*prog
= (struct nvc0_program
*)hwcso
;
731 nvc0_program_destroy(nvc0_context(pipe
), prog
);
733 FREE((void *)prog
->pipe
.tokens
);
738 nvc0_vp_state_create(struct pipe_context
*pipe
,
739 const struct pipe_shader_state
*cso
)
741 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_VERTEX
);
745 nvc0_vp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
747 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
749 nvc0
->vertprog
= hwcso
;
750 nvc0
->dirty
|= NVC0_NEW_VERTPROG
;
754 nvc0_fp_state_create(struct pipe_context
*pipe
,
755 const struct pipe_shader_state
*cso
)
757 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_FRAGMENT
);
761 nvc0_fp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
763 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
765 nvc0
->fragprog
= hwcso
;
766 nvc0
->dirty
|= NVC0_NEW_FRAGPROG
;
770 nvc0_gp_state_create(struct pipe_context
*pipe
,
771 const struct pipe_shader_state
*cso
)
773 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_GEOMETRY
);
777 nvc0_gp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
779 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
781 nvc0
->gmtyprog
= hwcso
;
782 nvc0
->dirty
|= NVC0_NEW_GMTYPROG
;
786 nvc0_cp_state_create(struct pipe_context
*pipe
,
787 const struct pipe_compute_state
*cso
)
789 struct nvc0_program
*prog
;
791 prog
= CALLOC_STRUCT(nvc0_program
);
794 prog
->type
= PIPE_SHADER_COMPUTE
;
796 prog
->cp
.smem_size
= cso
->req_local_mem
;
797 prog
->cp
.lmem_size
= cso
->req_private_mem
;
798 prog
->parm_size
= cso
->req_input_mem
;
800 prog
->pipe
.tokens
= tgsi_dup_tokens((const struct tgsi_token
*)cso
->prog
);
806 nvc0_cp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
808 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
810 nvc0
->compprog
= hwcso
;
811 nvc0
->dirty_cp
|= NVC0_NEW_CP_PROGRAM
;
815 nvc0_set_constant_buffer(struct pipe_context
*pipe
, uint shader
, uint index
,
816 struct pipe_constant_buffer
*cb
)
818 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
819 struct pipe_resource
*res
= cb
? cb
->buffer
: NULL
;
820 const unsigned s
= nvc0_shader_stage(shader
);
821 const unsigned i
= index
;
823 if (unlikely(shader
== PIPE_SHADER_COMPUTE
)) {
824 assert(!cb
|| !cb
->user_buffer
);
825 if (nvc0
->constbuf
[s
][i
].u
.buf
)
826 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_CB(i
));
828 nvc0
->dirty_cp
|= NVC0_NEW_CP_CONSTBUF
;
830 if (nvc0
->constbuf
[s
][i
].user
)
831 nvc0
->constbuf
[s
][i
].u
.buf
= NULL
;
833 if (nvc0
->constbuf
[s
][i
].u
.buf
)
834 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_CB(s
, i
));
836 nvc0
->dirty
|= NVC0_NEW_CONSTBUF
;
838 nvc0
->constbuf_dirty
[s
] |= 1 << i
;
840 pipe_resource_reference(&nvc0
->constbuf
[s
][i
].u
.buf
, res
);
842 nvc0
->constbuf
[s
][i
].user
= (cb
&& cb
->user_buffer
) ? TRUE
: FALSE
;
843 if (nvc0
->constbuf
[s
][i
].user
) {
844 nvc0
->constbuf
[s
][i
].u
.data
= cb
->user_buffer
;
845 nvc0
->constbuf
[s
][i
].size
= cb
->buffer_size
;
848 nvc0
->constbuf
[s
][i
].offset
= cb
->buffer_offset
;
849 nvc0
->constbuf
[s
][i
].size
= align(cb
->buffer_size
, 0x100);
853 /* =============================================================================
857 nvc0_set_blend_color(struct pipe_context
*pipe
,
858 const struct pipe_blend_color
*bcol
)
860 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
862 nvc0
->blend_colour
= *bcol
;
863 nvc0
->dirty
|= NVC0_NEW_BLEND_COLOUR
;
867 nvc0_set_stencil_ref(struct pipe_context
*pipe
,
868 const struct pipe_stencil_ref
*sr
)
870 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
872 nvc0
->stencil_ref
= *sr
;
873 nvc0
->dirty
|= NVC0_NEW_STENCIL_REF
;
877 nvc0_set_clip_state(struct pipe_context
*pipe
,
878 const struct pipe_clip_state
*clip
)
880 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
882 memcpy(nvc0
->clip
.ucp
, clip
->ucp
, sizeof(clip
->ucp
));
884 nvc0
->dirty
|= NVC0_NEW_CLIP
;
888 nvc0_set_sample_mask(struct pipe_context
*pipe
, unsigned sample_mask
)
890 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
892 nvc0
->sample_mask
= sample_mask
;
893 nvc0
->dirty
|= NVC0_NEW_SAMPLE_MASK
;
898 nvc0_set_framebuffer_state(struct pipe_context
*pipe
,
899 const struct pipe_framebuffer_state
*fb
)
901 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
904 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_FB
);
906 for (i
= 0; i
< fb
->nr_cbufs
; ++i
)
907 pipe_surface_reference(&nvc0
->framebuffer
.cbufs
[i
], fb
->cbufs
[i
]);
908 for (; i
< nvc0
->framebuffer
.nr_cbufs
; ++i
)
909 pipe_surface_reference(&nvc0
->framebuffer
.cbufs
[i
], NULL
);
911 nvc0
->framebuffer
.nr_cbufs
= fb
->nr_cbufs
;
913 nvc0
->framebuffer
.width
= fb
->width
;
914 nvc0
->framebuffer
.height
= fb
->height
;
916 pipe_surface_reference(&nvc0
->framebuffer
.zsbuf
, fb
->zsbuf
);
918 nvc0
->dirty
|= NVC0_NEW_FRAMEBUFFER
;
922 nvc0_set_polygon_stipple(struct pipe_context
*pipe
,
923 const struct pipe_poly_stipple
*stipple
)
925 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
927 nvc0
->stipple
= *stipple
;
928 nvc0
->dirty
|= NVC0_NEW_STIPPLE
;
932 nvc0_set_scissor_states(struct pipe_context
*pipe
,
934 unsigned num_scissors
,
935 const struct pipe_scissor_state
*scissor
)
937 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
939 nvc0
->scissor
= *scissor
;
940 nvc0
->dirty
|= NVC0_NEW_SCISSOR
;
944 nvc0_set_viewport_states(struct pipe_context
*pipe
,
946 unsigned num_viewports
,
947 const struct pipe_viewport_state
*vpt
)
949 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
951 nvc0
->viewport
= *vpt
;
952 nvc0
->dirty
|= NVC0_NEW_VIEWPORT
;
956 nvc0_set_vertex_buffers(struct pipe_context
*pipe
,
957 unsigned start_slot
, unsigned count
,
958 const struct pipe_vertex_buffer
*vb
)
960 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
963 util_set_vertex_buffers_count(nvc0
->vtxbuf
, &nvc0
->num_vtxbufs
, vb
,
967 nvc0
->vbo_user
&= ~(((1ull << count
) - 1) << start_slot
);
968 nvc0
->constant_vbos
&= ~(((1ull << count
) - 1) << start_slot
);
972 for (i
= 0; i
< count
; ++i
) {
973 unsigned dst_index
= start_slot
+ i
;
975 if (vb
[i
].user_buffer
) {
976 nvc0
->vbo_user
|= 1 << dst_index
;
978 nvc0
->constant_vbos
|= 1 << dst_index
;
980 nvc0
->constant_vbos
&= ~(1 << dst_index
);
982 nvc0
->vbo_user
&= ~(1 << dst_index
);
983 nvc0
->constant_vbos
&= ~(1 << dst_index
);
987 nvc0
->dirty
|= NVC0_NEW_ARRAYS
;
988 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_VTX
);
992 nvc0_set_index_buffer(struct pipe_context
*pipe
,
993 const struct pipe_index_buffer
*ib
)
995 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
997 if (nvc0
->idxbuf
.buffer
)
998 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_IDX
);
1001 pipe_resource_reference(&nvc0
->idxbuf
.buffer
, ib
->buffer
);
1002 nvc0
->idxbuf
.index_size
= ib
->index_size
;
1004 nvc0
->idxbuf
.offset
= ib
->offset
;
1005 nvc0
->dirty
|= NVC0_NEW_IDXBUF
;
1007 nvc0
->idxbuf
.user_buffer
= ib
->user_buffer
;
1008 nvc0
->dirty
&= ~NVC0_NEW_IDXBUF
;
1011 nvc0
->dirty
&= ~NVC0_NEW_IDXBUF
;
1012 pipe_resource_reference(&nvc0
->idxbuf
.buffer
, NULL
);
1017 nvc0_vertex_state_bind(struct pipe_context
*pipe
, void *hwcso
)
1019 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
1021 nvc0
->vertex
= hwcso
;
1022 nvc0
->dirty
|= NVC0_NEW_VERTEX
;
1025 static struct pipe_stream_output_target
*
1026 nvc0_so_target_create(struct pipe_context
*pipe
,
1027 struct pipe_resource
*res
,
1028 unsigned offset
, unsigned size
)
1030 struct nvc0_so_target
*targ
= MALLOC_STRUCT(nvc0_so_target
);
1034 targ
->pq
= pipe
->create_query(pipe
, NVC0_QUERY_TFB_BUFFER_OFFSET
);
1041 targ
->pipe
.buffer_size
= size
;
1042 targ
->pipe
.buffer_offset
= offset
;
1043 targ
->pipe
.context
= pipe
;
1044 targ
->pipe
.buffer
= NULL
;
1045 pipe_resource_reference(&targ
->pipe
.buffer
, res
);
1046 pipe_reference_init(&targ
->pipe
.reference
, 1);
1052 nvc0_so_target_destroy(struct pipe_context
*pipe
,
1053 struct pipe_stream_output_target
*ptarg
)
1055 struct nvc0_so_target
*targ
= nvc0_so_target(ptarg
);
1056 pipe
->destroy_query(pipe
, targ
->pq
);
1057 pipe_resource_reference(&targ
->pipe
.buffer
, NULL
);
1062 nvc0_set_transform_feedback_targets(struct pipe_context
*pipe
,
1063 unsigned num_targets
,
1064 struct pipe_stream_output_target
**targets
,
1065 unsigned append_mask
)
1067 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
1069 boolean serialize
= TRUE
;
1071 assert(num_targets
<= 4);
1073 for (i
= 0; i
< num_targets
; ++i
) {
1074 if (nvc0
->tfbbuf
[i
] == targets
[i
] && (append_mask
& (1 << i
)))
1076 nvc0
->tfbbuf_dirty
|= 1 << i
;
1078 if (nvc0
->tfbbuf
[i
] && nvc0
->tfbbuf
[i
] != targets
[i
])
1079 nvc0_so_target_save_offset(pipe
, nvc0
->tfbbuf
[i
], i
, &serialize
);
1081 if (targets
[i
] && !(append_mask
& (1 << i
)))
1082 nvc0_so_target(targets
[i
])->clean
= TRUE
;
1084 pipe_so_target_reference(&nvc0
->tfbbuf
[i
], targets
[i
]);
1086 for (; i
< nvc0
->num_tfbbufs
; ++i
) {
1087 nvc0
->tfbbuf_dirty
|= 1 << i
;
1088 nvc0_so_target_save_offset(pipe
, nvc0
->tfbbuf
[i
], i
, &serialize
);
1089 pipe_so_target_reference(&nvc0
->tfbbuf
[i
], NULL
);
1091 nvc0
->num_tfbbufs
= num_targets
;
1093 if (nvc0
->tfbbuf_dirty
)
1094 nvc0
->dirty
|= NVC0_NEW_TFB_TARGETS
;
1098 nvc0_bind_surfaces_range(struct nvc0_context
*nvc0
, const unsigned t
,
1099 unsigned start
, unsigned nr
,
1100 struct pipe_surface
**psurfaces
)
1102 const unsigned end
= start
+ nr
;
1103 const unsigned mask
= ((1 << nr
) - 1) << start
;
1107 for (i
= start
; i
< end
; ++i
) {
1108 const unsigned p
= i
- start
;
1110 nvc0
->surfaces_valid
[t
] |= (1 << i
);
1112 nvc0
->surfaces_valid
[t
] &= ~(1 << i
);
1113 pipe_surface_reference(&nvc0
->surfaces
[t
][i
], psurfaces
[p
]);
1116 for (i
= start
; i
< end
; ++i
)
1117 pipe_surface_reference(&nvc0
->surfaces
[t
][i
], NULL
);
1118 nvc0
->surfaces_valid
[t
] &= ~mask
;
1120 nvc0
->surfaces_dirty
[t
] |= mask
;
1123 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_SUF
);
1125 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_SUF
);
1129 nvc0_set_compute_resources(struct pipe_context
*pipe
,
1130 unsigned start
, unsigned nr
,
1131 struct pipe_surface
**resources
)
1133 nvc0_bind_surfaces_range(nvc0_context(pipe
), 1, start
, nr
, resources
);
1135 nvc0_context(pipe
)->dirty_cp
|= NVC0_NEW_CP_SURFACES
;
1139 nvc0_set_shader_resources(struct pipe_context
*pipe
,
1140 unsigned start
, unsigned nr
,
1141 struct pipe_surface
**resources
)
1143 nvc0_bind_surfaces_range(nvc0_context(pipe
), 0, start
, nr
, resources
);
1145 nvc0_context(pipe
)->dirty
|= NVC0_NEW_SURFACES
;
1149 nvc0_set_global_handle(uint32_t *phandle
, struct pipe_resource
*res
)
1151 struct nv04_resource
*buf
= nv04_resource(res
);
1153 uint64_t limit
= (buf
->address
+ buf
->base
.width0
) - 1;
1154 if (limit
< (1ULL << 32)) {
1155 *phandle
= (uint32_t)buf
->address
;
1157 NOUVEAU_ERR("Cannot map into TGSI_RESOURCE_GLOBAL: "
1158 "resource not contained within 32-bit address space !\n");
1167 nvc0_set_global_bindings(struct pipe_context
*pipe
,
1168 unsigned start
, unsigned nr
,
1169 struct pipe_resource
**resources
,
1172 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
1173 struct pipe_resource
**ptr
;
1175 const unsigned end
= start
+ nr
;
1177 if (nvc0
->global_residents
.size
<= (end
* sizeof(struct pipe_resource
*))) {
1178 const unsigned old_size
= nvc0
->global_residents
.size
;
1179 const unsigned req_size
= end
* sizeof(struct pipe_resource
*);
1180 util_dynarray_resize(&nvc0
->global_residents
, req_size
);
1181 memset((uint8_t *)nvc0
->global_residents
.data
+ old_size
, 0,
1182 req_size
- old_size
);
1186 ptr
= util_dynarray_element(
1187 &nvc0
->global_residents
, struct pipe_resource
*, start
);
1188 for (i
= 0; i
< nr
; ++i
) {
1189 pipe_resource_reference(&ptr
[i
], resources
[i
]);
1190 nvc0_set_global_handle(handles
[i
], resources
[i
]);
1193 ptr
= util_dynarray_element(
1194 &nvc0
->global_residents
, struct pipe_resource
*, start
);
1195 for (i
= 0; i
< nr
; ++i
)
1196 pipe_resource_reference(&ptr
[i
], NULL
);
1199 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_GLOBAL
);
1201 nvc0
->dirty_cp
= NVC0_NEW_CP_GLOBALS
;
1205 nvc0_init_state_functions(struct nvc0_context
*nvc0
)
1207 struct pipe_context
*pipe
= &nvc0
->base
.pipe
;
1209 pipe
->create_blend_state
= nvc0_blend_state_create
;
1210 pipe
->bind_blend_state
= nvc0_blend_state_bind
;
1211 pipe
->delete_blend_state
= nvc0_blend_state_delete
;
1213 pipe
->create_rasterizer_state
= nvc0_rasterizer_state_create
;
1214 pipe
->bind_rasterizer_state
= nvc0_rasterizer_state_bind
;
1215 pipe
->delete_rasterizer_state
= nvc0_rasterizer_state_delete
;
1217 pipe
->create_depth_stencil_alpha_state
= nvc0_zsa_state_create
;
1218 pipe
->bind_depth_stencil_alpha_state
= nvc0_zsa_state_bind
;
1219 pipe
->delete_depth_stencil_alpha_state
= nvc0_zsa_state_delete
;
1221 pipe
->create_sampler_state
= nv50_sampler_state_create
;
1222 pipe
->delete_sampler_state
= nvc0_sampler_state_delete
;
1223 pipe
->bind_sampler_states
= nvc0_bind_sampler_states
;
1224 pipe
->bind_vertex_sampler_states
= nvc0_vp_sampler_states_bind
;
1225 pipe
->bind_fragment_sampler_states
= nvc0_fp_sampler_states_bind
;
1226 pipe
->bind_geometry_sampler_states
= nvc0_gp_sampler_states_bind
;
1227 pipe
->bind_compute_sampler_states
= nvc0_cp_sampler_states_bind
;
1229 pipe
->create_sampler_view
= nvc0_create_sampler_view
;
1230 pipe
->sampler_view_destroy
= nvc0_sampler_view_destroy
;
1231 pipe
->set_vertex_sampler_views
= nvc0_vp_set_sampler_views
;
1232 pipe
->set_fragment_sampler_views
= nvc0_fp_set_sampler_views
;
1233 pipe
->set_geometry_sampler_views
= nvc0_gp_set_sampler_views
;
1234 pipe
->set_compute_sampler_views
= nvc0_cp_set_sampler_views
;
1236 pipe
->create_vs_state
= nvc0_vp_state_create
;
1237 pipe
->create_fs_state
= nvc0_fp_state_create
;
1238 pipe
->create_gs_state
= nvc0_gp_state_create
;
1239 pipe
->bind_vs_state
= nvc0_vp_state_bind
;
1240 pipe
->bind_fs_state
= nvc0_fp_state_bind
;
1241 pipe
->bind_gs_state
= nvc0_gp_state_bind
;
1242 pipe
->delete_vs_state
= nvc0_sp_state_delete
;
1243 pipe
->delete_fs_state
= nvc0_sp_state_delete
;
1244 pipe
->delete_gs_state
= nvc0_sp_state_delete
;
1246 pipe
->create_compute_state
= nvc0_cp_state_create
;
1247 pipe
->bind_compute_state
= nvc0_cp_state_bind
;
1248 pipe
->delete_compute_state
= nvc0_sp_state_delete
;
1250 pipe
->set_blend_color
= nvc0_set_blend_color
;
1251 pipe
->set_stencil_ref
= nvc0_set_stencil_ref
;
1252 pipe
->set_clip_state
= nvc0_set_clip_state
;
1253 pipe
->set_sample_mask
= nvc0_set_sample_mask
;
1254 pipe
->set_constant_buffer
= nvc0_set_constant_buffer
;
1255 pipe
->set_framebuffer_state
= nvc0_set_framebuffer_state
;
1256 pipe
->set_polygon_stipple
= nvc0_set_polygon_stipple
;
1257 pipe
->set_scissor_states
= nvc0_set_scissor_states
;
1258 pipe
->set_viewport_states
= nvc0_set_viewport_states
;
1260 pipe
->create_vertex_elements_state
= nvc0_vertex_state_create
;
1261 pipe
->delete_vertex_elements_state
= nvc0_vertex_state_delete
;
1262 pipe
->bind_vertex_elements_state
= nvc0_vertex_state_bind
;
1264 pipe
->set_vertex_buffers
= nvc0_set_vertex_buffers
;
1265 pipe
->set_index_buffer
= nvc0_set_index_buffer
;
1267 pipe
->create_stream_output_target
= nvc0_so_target_create
;
1268 pipe
->stream_output_target_destroy
= nvc0_so_target_destroy
;
1269 pipe
->set_stream_output_targets
= nvc0_set_transform_feedback_targets
;
1271 pipe
->set_global_binding
= nvc0_set_global_bindings
;
1272 pipe
->set_compute_resources
= nvc0_set_compute_resources
;
1273 pipe
->set_shader_resources
= nvc0_set_shader_resources
;