2 * Copyright 2010 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "pipe/p_defines.h"
24 #include "util/u_framebuffer.h"
25 #include "util/u_helpers.h"
26 #include "util/u_inlines.h"
27 #include "util/u_transfer.h"
29 #include "tgsi/tgsi_parse.h"
31 #include "nvc0/nvc0_stateobj.h"
32 #include "nvc0/nvc0_context.h"
33 #include "nvc0/nvc0_query_hw.h"
35 #include "nvc0/nvc0_3d.xml.h"
36 #include "nv50/nv50_texture.xml.h"
38 #include "nouveau_gldefs.h"
40 static inline uint32_t
41 nvc0_colormask(unsigned mask
)
45 if (mask
& PIPE_MASK_R
)
47 if (mask
& PIPE_MASK_G
)
49 if (mask
& PIPE_MASK_B
)
51 if (mask
& PIPE_MASK_A
)
57 #define NVC0_BLEND_FACTOR_CASE(a, b) \
58 case PIPE_BLENDFACTOR_##a: return NV50_BLEND_FACTOR_##b
60 static inline uint32_t
61 nvc0_blend_fac(unsigned factor
)
64 NVC0_BLEND_FACTOR_CASE(ONE
, ONE
);
65 NVC0_BLEND_FACTOR_CASE(SRC_COLOR
, SRC_COLOR
);
66 NVC0_BLEND_FACTOR_CASE(SRC_ALPHA
, SRC_ALPHA
);
67 NVC0_BLEND_FACTOR_CASE(DST_ALPHA
, DST_ALPHA
);
68 NVC0_BLEND_FACTOR_CASE(DST_COLOR
, DST_COLOR
);
69 NVC0_BLEND_FACTOR_CASE(SRC_ALPHA_SATURATE
, SRC_ALPHA_SATURATE
);
70 NVC0_BLEND_FACTOR_CASE(CONST_COLOR
, CONSTANT_COLOR
);
71 NVC0_BLEND_FACTOR_CASE(CONST_ALPHA
, CONSTANT_ALPHA
);
72 NVC0_BLEND_FACTOR_CASE(SRC1_COLOR
, SRC1_COLOR
);
73 NVC0_BLEND_FACTOR_CASE(SRC1_ALPHA
, SRC1_ALPHA
);
74 NVC0_BLEND_FACTOR_CASE(ZERO
, ZERO
);
75 NVC0_BLEND_FACTOR_CASE(INV_SRC_COLOR
, ONE_MINUS_SRC_COLOR
);
76 NVC0_BLEND_FACTOR_CASE(INV_SRC_ALPHA
, ONE_MINUS_SRC_ALPHA
);
77 NVC0_BLEND_FACTOR_CASE(INV_DST_ALPHA
, ONE_MINUS_DST_ALPHA
);
78 NVC0_BLEND_FACTOR_CASE(INV_DST_COLOR
, ONE_MINUS_DST_COLOR
);
79 NVC0_BLEND_FACTOR_CASE(INV_CONST_COLOR
, ONE_MINUS_CONSTANT_COLOR
);
80 NVC0_BLEND_FACTOR_CASE(INV_CONST_ALPHA
, ONE_MINUS_CONSTANT_ALPHA
);
81 NVC0_BLEND_FACTOR_CASE(INV_SRC1_COLOR
, ONE_MINUS_SRC1_COLOR
);
82 NVC0_BLEND_FACTOR_CASE(INV_SRC1_ALPHA
, ONE_MINUS_SRC1_ALPHA
);
84 return NV50_BLEND_FACTOR_ZERO
;
89 nvc0_blend_state_create(struct pipe_context
*pipe
,
90 const struct pipe_blend_state
*cso
)
92 struct nvc0_blend_stateobj
*so
= CALLOC_STRUCT(nvc0_blend_stateobj
);
94 int r
; /* reference */
96 bool indep_masks
= false;
97 bool indep_funcs
= false;
101 /* check which states actually have differing values */
102 if (cso
->independent_blend_enable
) {
103 for (r
= 0; r
< 8 && !cso
->rt
[r
].blend_enable
; ++r
);
105 for (i
= r
+ 1; i
< 8; ++i
) {
106 if (!cso
->rt
[i
].blend_enable
)
109 if (cso
->rt
[i
].rgb_func
!= cso
->rt
[r
].rgb_func
||
110 cso
->rt
[i
].rgb_src_factor
!= cso
->rt
[r
].rgb_src_factor
||
111 cso
->rt
[i
].rgb_dst_factor
!= cso
->rt
[r
].rgb_dst_factor
||
112 cso
->rt
[i
].alpha_func
!= cso
->rt
[r
].alpha_func
||
113 cso
->rt
[i
].alpha_src_factor
!= cso
->rt
[r
].alpha_src_factor
||
114 cso
->rt
[i
].alpha_dst_factor
!= cso
->rt
[r
].alpha_dst_factor
) {
120 blend_en
|= (cso
->rt
[i
].blend_enable
? 1 : 0) << i
;
122 for (i
= 1; i
< 8; ++i
) {
123 if (cso
->rt
[i
].colormask
!= cso
->rt
[0].colormask
) {
130 if (cso
->rt
[0].blend_enable
)
134 if (cso
->logicop_enable
) {
135 SB_BEGIN_3D(so
, LOGIC_OP_ENABLE
, 2);
137 SB_DATA (so
, nvgl_logicop_func(cso
->logicop_func
));
139 SB_IMMED_3D(so
, MACRO_BLEND_ENABLES
, 0);
141 SB_IMMED_3D(so
, LOGIC_OP_ENABLE
, 0);
143 SB_IMMED_3D(so
, BLEND_INDEPENDENT
, indep_funcs
);
144 SB_IMMED_3D(so
, MACRO_BLEND_ENABLES
, blend_en
);
146 for (i
= 0; i
< 8; ++i
) {
147 if (cso
->rt
[i
].blend_enable
) {
148 SB_BEGIN_3D(so
, IBLEND_EQUATION_RGB(i
), 6);
149 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[i
].rgb_func
));
150 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].rgb_src_factor
));
151 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].rgb_dst_factor
));
152 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[i
].alpha_func
));
153 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].alpha_src_factor
));
154 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].alpha_dst_factor
));
159 SB_BEGIN_3D(so
, BLEND_EQUATION_RGB
, 5);
160 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[r
].rgb_func
));
161 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].rgb_src_factor
));
162 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].rgb_dst_factor
));
163 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[r
].alpha_func
));
164 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].alpha_src_factor
));
165 SB_BEGIN_3D(so
, BLEND_FUNC_DST_ALPHA
, 1);
166 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].alpha_dst_factor
));
169 SB_IMMED_3D(so
, COLOR_MASK_COMMON
, !indep_masks
);
171 SB_BEGIN_3D(so
, COLOR_MASK(0), 8);
172 for (i
= 0; i
< 8; ++i
)
173 SB_DATA(so
, nvc0_colormask(cso
->rt
[i
].colormask
));
175 SB_BEGIN_3D(so
, COLOR_MASK(0), 1);
176 SB_DATA (so
, nvc0_colormask(cso
->rt
[0].colormask
));
180 assert(so
->size
<= ARRAY_SIZE(so
->state
));
185 nvc0_blend_state_bind(struct pipe_context
*pipe
, void *hwcso
)
187 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
190 nvc0
->dirty
|= NVC0_NEW_BLEND
;
194 nvc0_blend_state_delete(struct pipe_context
*pipe
, void *hwcso
)
199 /* NOTE: ignoring line_last_pixel */
201 nvc0_rasterizer_state_create(struct pipe_context
*pipe
,
202 const struct pipe_rasterizer_state
*cso
)
204 struct nvc0_rasterizer_stateobj
*so
;
207 so
= CALLOC_STRUCT(nvc0_rasterizer_stateobj
);
212 /* Scissor enables are handled in scissor state, we will not want to
213 * always emit 16 commands, one for each scissor rectangle, here.
216 SB_IMMED_3D(so
, PROVOKING_VERTEX_LAST
, !cso
->flatshade_first
);
217 SB_IMMED_3D(so
, VERTEX_TWO_SIDE_ENABLE
, cso
->light_twoside
);
219 SB_IMMED_3D(so
, VERT_COLOR_CLAMP_EN
, cso
->clamp_vertex_color
);
220 SB_BEGIN_3D(so
, FRAG_COLOR_CLAMP_EN
, 1);
221 SB_DATA (so
, cso
->clamp_fragment_color
? 0x11111111 : 0x00000000);
223 SB_IMMED_3D(so
, MULTISAMPLE_ENABLE
, cso
->multisample
);
225 SB_IMMED_3D(so
, LINE_SMOOTH_ENABLE
, cso
->line_smooth
);
226 if (cso
->line_smooth
|| cso
->multisample
)
227 SB_BEGIN_3D(so
, LINE_WIDTH_SMOOTH
, 1);
229 SB_BEGIN_3D(so
, LINE_WIDTH_ALIASED
, 1);
230 SB_DATA (so
, fui(cso
->line_width
));
232 SB_IMMED_3D(so
, LINE_STIPPLE_ENABLE
, cso
->line_stipple_enable
);
233 if (cso
->line_stipple_enable
) {
234 SB_BEGIN_3D(so
, LINE_STIPPLE_PATTERN
, 1);
235 SB_DATA (so
, (cso
->line_stipple_pattern
<< 8) |
236 cso
->line_stipple_factor
);
240 SB_IMMED_3D(so
, VP_POINT_SIZE
, cso
->point_size_per_vertex
);
241 if (!cso
->point_size_per_vertex
) {
242 SB_BEGIN_3D(so
, POINT_SIZE
, 1);
243 SB_DATA (so
, fui(cso
->point_size
));
246 reg
= (cso
->sprite_coord_mode
== PIPE_SPRITE_COORD_UPPER_LEFT
) ?
247 NVC0_3D_POINT_COORD_REPLACE_COORD_ORIGIN_UPPER_LEFT
:
248 NVC0_3D_POINT_COORD_REPLACE_COORD_ORIGIN_LOWER_LEFT
;
250 SB_BEGIN_3D(so
, POINT_COORD_REPLACE
, 1);
251 SB_DATA (so
, ((cso
->sprite_coord_enable
& 0xff) << 3) | reg
);
252 SB_IMMED_3D(so
, POINT_SPRITE_ENABLE
, cso
->point_quad_rasterization
);
253 SB_IMMED_3D(so
, POINT_SMOOTH_ENABLE
, cso
->point_smooth
);
255 SB_BEGIN_3D(so
, MACRO_POLYGON_MODE_FRONT
, 1);
256 SB_DATA (so
, nvgl_polygon_mode(cso
->fill_front
));
257 SB_BEGIN_3D(so
, MACRO_POLYGON_MODE_BACK
, 1);
258 SB_DATA (so
, nvgl_polygon_mode(cso
->fill_back
));
259 SB_IMMED_3D(so
, POLYGON_SMOOTH_ENABLE
, cso
->poly_smooth
);
261 SB_BEGIN_3D(so
, CULL_FACE_ENABLE
, 3);
262 SB_DATA (so
, cso
->cull_face
!= PIPE_FACE_NONE
);
263 SB_DATA (so
, cso
->front_ccw
? NVC0_3D_FRONT_FACE_CCW
:
264 NVC0_3D_FRONT_FACE_CW
);
265 switch (cso
->cull_face
) {
266 case PIPE_FACE_FRONT_AND_BACK
:
267 SB_DATA(so
, NVC0_3D_CULL_FACE_FRONT_AND_BACK
);
269 case PIPE_FACE_FRONT
:
270 SB_DATA(so
, NVC0_3D_CULL_FACE_FRONT
);
274 SB_DATA(so
, NVC0_3D_CULL_FACE_BACK
);
278 SB_IMMED_3D(so
, POLYGON_STIPPLE_ENABLE
, cso
->poly_stipple_enable
);
279 SB_BEGIN_3D(so
, POLYGON_OFFSET_POINT_ENABLE
, 3);
280 SB_DATA (so
, cso
->offset_point
);
281 SB_DATA (so
, cso
->offset_line
);
282 SB_DATA (so
, cso
->offset_tri
);
284 if (cso
->offset_point
|| cso
->offset_line
|| cso
->offset_tri
) {
285 SB_BEGIN_3D(so
, POLYGON_OFFSET_FACTOR
, 1);
286 SB_DATA (so
, fui(cso
->offset_scale
));
287 SB_BEGIN_3D(so
, POLYGON_OFFSET_UNITS
, 1);
288 SB_DATA (so
, fui(cso
->offset_units
* 2.0f
));
289 SB_BEGIN_3D(so
, POLYGON_OFFSET_CLAMP
, 1);
290 SB_DATA (so
, fui(cso
->offset_clamp
));
294 reg
= NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1
;
297 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1
|
298 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_NEAR
|
299 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_FAR
|
300 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK12_UNK2
;
302 SB_BEGIN_3D(so
, VIEW_VOLUME_CLIP_CTRL
, 1);
305 SB_IMMED_3D(so
, DEPTH_CLIP_NEGATIVE_Z
, cso
->clip_halfz
);
307 SB_IMMED_3D(so
, PIXEL_CENTER_INTEGER
, !cso
->half_pixel_center
);
309 assert(so
->size
<= ARRAY_SIZE(so
->state
));
314 nvc0_rasterizer_state_bind(struct pipe_context
*pipe
, void *hwcso
)
316 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
319 nvc0
->dirty
|= NVC0_NEW_RASTERIZER
;
323 nvc0_rasterizer_state_delete(struct pipe_context
*pipe
, void *hwcso
)
329 nvc0_zsa_state_create(struct pipe_context
*pipe
,
330 const struct pipe_depth_stencil_alpha_state
*cso
)
332 struct nvc0_zsa_stateobj
*so
= CALLOC_STRUCT(nvc0_zsa_stateobj
);
336 SB_IMMED_3D(so
, DEPTH_TEST_ENABLE
, cso
->depth
.enabled
);
337 if (cso
->depth
.enabled
) {
338 SB_IMMED_3D(so
, DEPTH_WRITE_ENABLE
, cso
->depth
.writemask
);
339 SB_BEGIN_3D(so
, DEPTH_TEST_FUNC
, 1);
340 SB_DATA (so
, nvgl_comparison_op(cso
->depth
.func
));
343 SB_IMMED_3D(so
, DEPTH_BOUNDS_EN
, cso
->depth
.bounds_test
);
344 if (cso
->depth
.bounds_test
) {
345 SB_BEGIN_3D(so
, DEPTH_BOUNDS(0), 2);
346 SB_DATA (so
, fui(cso
->depth
.bounds_min
));
347 SB_DATA (so
, fui(cso
->depth
.bounds_max
));
350 if (cso
->stencil
[0].enabled
) {
351 SB_BEGIN_3D(so
, STENCIL_ENABLE
, 5);
353 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].fail_op
));
354 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].zfail_op
));
355 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].zpass_op
));
356 SB_DATA (so
, nvgl_comparison_op(cso
->stencil
[0].func
));
357 SB_BEGIN_3D(so
, STENCIL_FRONT_FUNC_MASK
, 2);
358 SB_DATA (so
, cso
->stencil
[0].valuemask
);
359 SB_DATA (so
, cso
->stencil
[0].writemask
);
361 SB_IMMED_3D(so
, STENCIL_ENABLE
, 0);
364 if (cso
->stencil
[1].enabled
) {
365 assert(cso
->stencil
[0].enabled
);
366 SB_BEGIN_3D(so
, STENCIL_TWO_SIDE_ENABLE
, 5);
368 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].fail_op
));
369 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].zfail_op
));
370 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].zpass_op
));
371 SB_DATA (so
, nvgl_comparison_op(cso
->stencil
[1].func
));
372 SB_BEGIN_3D(so
, STENCIL_BACK_MASK
, 2);
373 SB_DATA (so
, cso
->stencil
[1].writemask
);
374 SB_DATA (so
, cso
->stencil
[1].valuemask
);
376 if (cso
->stencil
[0].enabled
) {
377 SB_IMMED_3D(so
, STENCIL_TWO_SIDE_ENABLE
, 0);
380 SB_IMMED_3D(so
, ALPHA_TEST_ENABLE
, cso
->alpha
.enabled
);
381 if (cso
->alpha
.enabled
) {
382 SB_BEGIN_3D(so
, ALPHA_TEST_REF
, 2);
383 SB_DATA (so
, fui(cso
->alpha
.ref_value
));
384 SB_DATA (so
, nvgl_comparison_op(cso
->alpha
.func
));
387 assert(so
->size
<= ARRAY_SIZE(so
->state
));
392 nvc0_zsa_state_bind(struct pipe_context
*pipe
, void *hwcso
)
394 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
397 nvc0
->dirty
|= NVC0_NEW_ZSA
;
401 nvc0_zsa_state_delete(struct pipe_context
*pipe
, void *hwcso
)
406 /* ====================== SAMPLERS AND TEXTURES ================================
409 #define NV50_TSC_WRAP_CASE(n) \
410 case PIPE_TEX_WRAP_##n: return NV50_TSC_WRAP_##n
413 nvc0_sampler_state_delete(struct pipe_context
*pipe
, void *hwcso
)
417 for (s
= 0; s
< 5; ++s
)
418 for (i
= 0; i
< nvc0_context(pipe
)->num_samplers
[s
]; ++i
)
419 if (nvc0_context(pipe
)->samplers
[s
][i
] == hwcso
)
420 nvc0_context(pipe
)->samplers
[s
][i
] = NULL
;
422 nvc0_screen_tsc_free(nvc0_context(pipe
)->screen
, nv50_tsc_entry(hwcso
));
428 nvc0_stage_sampler_states_bind(struct nvc0_context
*nvc0
, int s
,
429 unsigned nr
, void **hwcso
)
433 for (i
= 0; i
< nr
; ++i
) {
434 struct nv50_tsc_entry
*old
= nvc0
->samplers
[s
][i
];
438 nvc0
->samplers_dirty
[s
] |= 1 << i
;
440 nvc0
->samplers
[s
][i
] = nv50_tsc_entry(hwcso
[i
]);
442 nvc0_screen_tsc_unlock(nvc0
->screen
, old
);
444 for (; i
< nvc0
->num_samplers
[s
]; ++i
) {
445 if (nvc0
->samplers
[s
][i
]) {
446 nvc0_screen_tsc_unlock(nvc0
->screen
, nvc0
->samplers
[s
][i
]);
447 nvc0
->samplers
[s
][i
] = NULL
;
451 nvc0
->num_samplers
[s
] = nr
;
453 nvc0
->dirty
|= NVC0_NEW_SAMPLERS
;
457 nvc0_stage_sampler_states_bind_range(struct nvc0_context
*nvc0
,
459 unsigned start
, unsigned nr
, void **cso
)
461 const unsigned end
= start
+ nr
;
466 for (i
= start
; i
< end
; ++i
) {
467 const unsigned p
= i
- start
;
470 if (cso
[p
] == nvc0
->samplers
[s
][i
])
472 nvc0
->samplers_dirty
[s
] |= 1 << i
;
474 if (nvc0
->samplers
[s
][i
])
475 nvc0_screen_tsc_unlock(nvc0
->screen
, nvc0
->samplers
[s
][i
]);
476 nvc0
->samplers
[s
][i
] = cso
[p
];
479 for (i
= start
; i
< end
; ++i
) {
480 if (nvc0
->samplers
[s
][i
]) {
481 nvc0_screen_tsc_unlock(nvc0
->screen
, nvc0
->samplers
[s
][i
]);
482 nvc0
->samplers
[s
][i
] = NULL
;
483 nvc0
->samplers_dirty
[s
] |= 1 << i
;
488 if (nvc0
->num_samplers
[s
] <= end
) {
489 if (last_valid
< 0) {
490 for (i
= start
; i
&& !nvc0
->samplers
[s
][i
- 1]; --i
);
491 nvc0
->num_samplers
[s
] = i
;
493 nvc0
->num_samplers
[s
] = last_valid
+ 1;
499 nvc0_bind_sampler_states(struct pipe_context
*pipe
, unsigned shader
,
500 unsigned start
, unsigned nr
, void **s
)
503 case PIPE_SHADER_VERTEX
:
505 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), 0, nr
, s
);
507 case PIPE_SHADER_TESS_CTRL
:
509 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), 1, nr
, s
);
511 case PIPE_SHADER_TESS_EVAL
:
513 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), 2, nr
, s
);
515 case PIPE_SHADER_GEOMETRY
:
517 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), 3, nr
, s
);
519 case PIPE_SHADER_FRAGMENT
:
521 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), 4, nr
, s
);
523 case PIPE_SHADER_COMPUTE
:
524 nvc0_stage_sampler_states_bind_range(nvc0_context(pipe
), 5,
526 nvc0_context(pipe
)->dirty_cp
|= NVC0_NEW_CP_SAMPLERS
;
532 /* NOTE: only called when not referenced anywhere, won't be bound */
534 nvc0_sampler_view_destroy(struct pipe_context
*pipe
,
535 struct pipe_sampler_view
*view
)
537 pipe_resource_reference(&view
->texture
, NULL
);
539 nvc0_screen_tic_free(nvc0_context(pipe
)->screen
, nv50_tic_entry(view
));
541 FREE(nv50_tic_entry(view
));
545 nvc0_stage_set_sampler_views(struct nvc0_context
*nvc0
, int s
,
547 struct pipe_sampler_view
**views
)
551 for (i
= 0; i
< nr
; ++i
) {
552 struct nv50_tic_entry
*old
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
554 if (views
[i
] == nvc0
->textures
[s
][i
])
556 nvc0
->textures_dirty
[s
] |= 1 << i
;
558 if (views
[i
] && views
[i
]->texture
) {
559 struct pipe_resource
*res
= views
[i
]->texture
;
560 if (res
->target
== PIPE_BUFFER
&&
561 (res
->flags
& PIPE_RESOURCE_FLAG_MAP_COHERENT
))
562 nvc0
->textures_coherent
[s
] |= 1 << i
;
564 nvc0
->textures_coherent
[s
] &= ~(1 << i
);
566 nvc0
->textures_coherent
[s
] &= ~(1 << i
);
570 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_TEX(s
, i
));
571 nvc0_screen_tic_unlock(nvc0
->screen
, old
);
574 pipe_sampler_view_reference(&nvc0
->textures
[s
][i
], views
[i
]);
577 for (i
= nr
; i
< nvc0
->num_textures
[s
]; ++i
) {
578 struct nv50_tic_entry
*old
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
580 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_TEX(s
, i
));
581 nvc0_screen_tic_unlock(nvc0
->screen
, old
);
582 pipe_sampler_view_reference(&nvc0
->textures
[s
][i
], NULL
);
586 nvc0
->num_textures
[s
] = nr
;
588 nvc0
->dirty
|= NVC0_NEW_TEXTURES
;
592 nvc0_stage_set_sampler_views_range(struct nvc0_context
*nvc0
, const unsigned s
,
593 unsigned start
, unsigned nr
,
594 struct pipe_sampler_view
**views
)
596 struct nouveau_bufctx
*bctx
= (s
== 5) ? nvc0
->bufctx_cp
: nvc0
->bufctx_3d
;
597 const unsigned end
= start
+ nr
;
598 const unsigned bin
= (s
== 5) ? NVC0_BIND_CP_TEX(0) : NVC0_BIND_TEX(s
, 0);
603 for (i
= start
; i
< end
; ++i
) {
604 const unsigned p
= i
- start
;
607 if (views
[p
] == nvc0
->textures
[s
][i
])
609 nvc0
->textures_dirty
[s
] |= 1 << i
;
611 if (views
[p
] && views
[p
]->texture
) {
612 struct pipe_resource
*res
= views
[p
]->texture
;
613 if (res
->target
== PIPE_BUFFER
&&
614 (res
->flags
& PIPE_RESOURCE_FLAG_MAP_COHERENT
))
615 nvc0
->textures_coherent
[s
] |= 1 << i
;
617 nvc0
->textures_coherent
[s
] &= ~(1 << i
);
619 nvc0
->textures_coherent
[s
] &= ~(1 << i
);
622 if (nvc0
->textures
[s
][i
]) {
623 struct nv50_tic_entry
*old
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
624 nouveau_bufctx_reset(bctx
, bin
+ i
);
625 nvc0_screen_tic_unlock(nvc0
->screen
, old
);
627 pipe_sampler_view_reference(&nvc0
->textures
[s
][i
], views
[p
]);
630 for (i
= start
; i
< end
; ++i
) {
631 struct nv50_tic_entry
*old
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
634 nvc0
->textures_dirty
[s
] |= 1 << i
;
636 nvc0_screen_tic_unlock(nvc0
->screen
, old
);
637 pipe_sampler_view_reference(&nvc0
->textures
[s
][i
], NULL
);
638 nouveau_bufctx_reset(bctx
, bin
+ i
);
642 if (nvc0
->num_textures
[s
] <= end
) {
643 if (last_valid
< 0) {
644 for (i
= start
; i
&& !nvc0
->textures
[s
][i
- 1]; --i
);
645 nvc0
->num_textures
[s
] = i
;
647 nvc0
->num_textures
[s
] = last_valid
+ 1;
653 nvc0_set_sampler_views(struct pipe_context
*pipe
, unsigned shader
,
654 unsigned start
, unsigned nr
,
655 struct pipe_sampler_view
**views
)
659 case PIPE_SHADER_VERTEX
:
660 nvc0_stage_set_sampler_views(nvc0_context(pipe
), 0, nr
, views
);
662 case PIPE_SHADER_TESS_CTRL
:
663 nvc0_stage_set_sampler_views(nvc0_context(pipe
), 1, nr
, views
);
665 case PIPE_SHADER_TESS_EVAL
:
666 nvc0_stage_set_sampler_views(nvc0_context(pipe
), 2, nr
, views
);
668 case PIPE_SHADER_GEOMETRY
:
669 nvc0_stage_set_sampler_views(nvc0_context(pipe
), 3, nr
, views
);
671 case PIPE_SHADER_FRAGMENT
:
672 nvc0_stage_set_sampler_views(nvc0_context(pipe
), 4, nr
, views
);
674 case PIPE_SHADER_COMPUTE
:
675 nvc0_stage_set_sampler_views_range(nvc0_context(pipe
), 5,
677 nvc0_context(pipe
)->dirty_cp
|= NVC0_NEW_CP_TEXTURES
;
685 /* ============================= SHADERS =======================================
689 nvc0_sp_state_create(struct pipe_context
*pipe
,
690 const struct pipe_shader_state
*cso
, unsigned type
)
692 struct nvc0_program
*prog
;
694 prog
= CALLOC_STRUCT(nvc0_program
);
701 prog
->pipe
.tokens
= tgsi_dup_tokens(cso
->tokens
);
703 if (cso
->stream_output
.num_outputs
)
704 prog
->pipe
.stream_output
= cso
->stream_output
;
706 prog
->translated
= nvc0_program_translate(
707 prog
, nvc0_context(pipe
)->screen
->base
.device
->chipset
,
708 &nouveau_context(pipe
)->debug
);
714 nvc0_sp_state_delete(struct pipe_context
*pipe
, void *hwcso
)
716 struct nvc0_program
*prog
= (struct nvc0_program
*)hwcso
;
718 nvc0_program_destroy(nvc0_context(pipe
), prog
);
720 FREE((void *)prog
->pipe
.tokens
);
725 nvc0_vp_state_create(struct pipe_context
*pipe
,
726 const struct pipe_shader_state
*cso
)
728 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_VERTEX
);
732 nvc0_vp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
734 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
736 nvc0
->vertprog
= hwcso
;
737 nvc0
->dirty
|= NVC0_NEW_VERTPROG
;
741 nvc0_fp_state_create(struct pipe_context
*pipe
,
742 const struct pipe_shader_state
*cso
)
744 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_FRAGMENT
);
748 nvc0_fp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
750 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
752 nvc0
->fragprog
= hwcso
;
753 nvc0
->dirty
|= NVC0_NEW_FRAGPROG
;
757 nvc0_gp_state_create(struct pipe_context
*pipe
,
758 const struct pipe_shader_state
*cso
)
760 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_GEOMETRY
);
764 nvc0_gp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
766 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
768 nvc0
->gmtyprog
= hwcso
;
769 nvc0
->dirty
|= NVC0_NEW_GMTYPROG
;
773 nvc0_tcp_state_create(struct pipe_context
*pipe
,
774 const struct pipe_shader_state
*cso
)
776 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_TESS_CTRL
);
780 nvc0_tcp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
782 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
784 nvc0
->tctlprog
= hwcso
;
785 nvc0
->dirty
|= NVC0_NEW_TCTLPROG
;
789 nvc0_tep_state_create(struct pipe_context
*pipe
,
790 const struct pipe_shader_state
*cso
)
792 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_TESS_EVAL
);
796 nvc0_tep_state_bind(struct pipe_context
*pipe
, void *hwcso
)
798 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
800 nvc0
->tevlprog
= hwcso
;
801 nvc0
->dirty
|= NVC0_NEW_TEVLPROG
;
805 nvc0_cp_state_create(struct pipe_context
*pipe
,
806 const struct pipe_compute_state
*cso
)
808 struct nvc0_program
*prog
;
810 prog
= CALLOC_STRUCT(nvc0_program
);
813 prog
->type
= PIPE_SHADER_COMPUTE
;
815 prog
->cp
.smem_size
= cso
->req_local_mem
;
816 prog
->cp
.lmem_size
= cso
->req_private_mem
;
817 prog
->parm_size
= cso
->req_input_mem
;
819 prog
->pipe
.tokens
= tgsi_dup_tokens((const struct tgsi_token
*)cso
->prog
);
825 nvc0_cp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
827 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
829 nvc0
->compprog
= hwcso
;
830 nvc0
->dirty_cp
|= NVC0_NEW_CP_PROGRAM
;
834 nvc0_set_constant_buffer(struct pipe_context
*pipe
, uint shader
, uint index
,
835 struct pipe_constant_buffer
*cb
)
837 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
838 struct pipe_resource
*res
= cb
? cb
->buffer
: NULL
;
839 const unsigned s
= nvc0_shader_stage(shader
);
840 const unsigned i
= index
;
842 if (unlikely(shader
== PIPE_SHADER_COMPUTE
)) {
843 assert(!cb
|| !cb
->user_buffer
);
844 if (nvc0
->constbuf
[s
][i
].u
.buf
)
845 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_CB(i
));
847 nvc0
->dirty_cp
|= NVC0_NEW_CP_CONSTBUF
;
849 if (nvc0
->constbuf
[s
][i
].user
)
850 nvc0
->constbuf
[s
][i
].u
.buf
= NULL
;
852 if (nvc0
->constbuf
[s
][i
].u
.buf
)
853 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_CB(s
, i
));
855 nvc0
->dirty
|= NVC0_NEW_CONSTBUF
;
857 nvc0
->constbuf_dirty
[s
] |= 1 << i
;
859 if (nvc0
->constbuf
[s
][i
].u
.buf
)
860 nv04_resource(nvc0
->constbuf
[s
][i
].u
.buf
)->cb_bindings
[s
] &= ~(1 << i
);
861 pipe_resource_reference(&nvc0
->constbuf
[s
][i
].u
.buf
, res
);
863 nvc0
->constbuf
[s
][i
].user
= (cb
&& cb
->user_buffer
) ? true : false;
864 if (nvc0
->constbuf
[s
][i
].user
) {
865 nvc0
->constbuf
[s
][i
].u
.data
= cb
->user_buffer
;
866 nvc0
->constbuf
[s
][i
].size
= MIN2(cb
->buffer_size
, 0x10000);
867 nvc0
->constbuf_valid
[s
] |= 1 << i
;
868 nvc0
->constbuf_coherent
[s
] &= ~(1 << i
);
871 nvc0
->constbuf
[s
][i
].offset
= cb
->buffer_offset
;
872 nvc0
->constbuf
[s
][i
].size
= MIN2(align(cb
->buffer_size
, 0x100), 0x10000);
873 nvc0
->constbuf_valid
[s
] |= 1 << i
;
874 if (res
&& res
->flags
& PIPE_RESOURCE_FLAG_MAP_COHERENT
)
875 nvc0
->constbuf_coherent
[s
] |= 1 << i
;
877 nvc0
->constbuf_coherent
[s
] &= ~(1 << i
);
880 nvc0
->constbuf_valid
[s
] &= ~(1 << i
);
881 nvc0
->constbuf_coherent
[s
] &= ~(1 << i
);
885 /* =============================================================================
889 nvc0_set_blend_color(struct pipe_context
*pipe
,
890 const struct pipe_blend_color
*bcol
)
892 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
894 nvc0
->blend_colour
= *bcol
;
895 nvc0
->dirty
|= NVC0_NEW_BLEND_COLOUR
;
899 nvc0_set_stencil_ref(struct pipe_context
*pipe
,
900 const struct pipe_stencil_ref
*sr
)
902 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
904 nvc0
->stencil_ref
= *sr
;
905 nvc0
->dirty
|= NVC0_NEW_STENCIL_REF
;
909 nvc0_set_clip_state(struct pipe_context
*pipe
,
910 const struct pipe_clip_state
*clip
)
912 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
914 memcpy(nvc0
->clip
.ucp
, clip
->ucp
, sizeof(clip
->ucp
));
916 nvc0
->dirty
|= NVC0_NEW_CLIP
;
920 nvc0_set_sample_mask(struct pipe_context
*pipe
, unsigned sample_mask
)
922 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
924 nvc0
->sample_mask
= sample_mask
;
925 nvc0
->dirty
|= NVC0_NEW_SAMPLE_MASK
;
929 nvc0_set_min_samples(struct pipe_context
*pipe
, unsigned min_samples
)
931 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
933 if (nvc0
->min_samples
!= min_samples
) {
934 nvc0
->min_samples
= min_samples
;
935 nvc0
->dirty
|= NVC0_NEW_MIN_SAMPLES
;
940 nvc0_set_framebuffer_state(struct pipe_context
*pipe
,
941 const struct pipe_framebuffer_state
*fb
)
943 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
945 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_FB
);
947 util_copy_framebuffer_state(&nvc0
->framebuffer
, fb
);
949 nvc0
->dirty
|= NVC0_NEW_FRAMEBUFFER
;
953 nvc0_set_polygon_stipple(struct pipe_context
*pipe
,
954 const struct pipe_poly_stipple
*stipple
)
956 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
958 nvc0
->stipple
= *stipple
;
959 nvc0
->dirty
|= NVC0_NEW_STIPPLE
;
963 nvc0_set_scissor_states(struct pipe_context
*pipe
,
965 unsigned num_scissors
,
966 const struct pipe_scissor_state
*scissor
)
968 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
971 assert(start_slot
+ num_scissors
<= NVC0_MAX_VIEWPORTS
);
972 for (i
= 0; i
< num_scissors
; i
++) {
973 if (!memcmp(&nvc0
->scissors
[start_slot
+ i
], &scissor
[i
], sizeof(*scissor
)))
975 nvc0
->scissors
[start_slot
+ i
] = scissor
[i
];
976 nvc0
->scissors_dirty
|= 1 << (start_slot
+ i
);
977 nvc0
->dirty
|= NVC0_NEW_SCISSOR
;
982 nvc0_set_viewport_states(struct pipe_context
*pipe
,
984 unsigned num_viewports
,
985 const struct pipe_viewport_state
*vpt
)
987 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
990 assert(start_slot
+ num_viewports
<= NVC0_MAX_VIEWPORTS
);
991 for (i
= 0; i
< num_viewports
; i
++) {
992 if (!memcmp(&nvc0
->viewports
[start_slot
+ i
], &vpt
[i
], sizeof(*vpt
)))
994 nvc0
->viewports
[start_slot
+ i
] = vpt
[i
];
995 nvc0
->viewports_dirty
|= 1 << (start_slot
+ i
);
996 nvc0
->dirty
|= NVC0_NEW_VIEWPORT
;
1002 nvc0_set_tess_state(struct pipe_context
*pipe
,
1003 const float default_tess_outer
[4],
1004 const float default_tess_inner
[2])
1006 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
1008 memcpy(nvc0
->default_tess_outer
, default_tess_outer
, 4 * sizeof(float));
1009 memcpy(nvc0
->default_tess_inner
, default_tess_inner
, 2 * sizeof(float));
1010 nvc0
->dirty
|= NVC0_NEW_TESSFACTOR
;
1014 nvc0_set_vertex_buffers(struct pipe_context
*pipe
,
1015 unsigned start_slot
, unsigned count
,
1016 const struct pipe_vertex_buffer
*vb
)
1018 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
1021 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_VTX
);
1022 nvc0
->dirty
|= NVC0_NEW_ARRAYS
;
1024 util_set_vertex_buffers_count(nvc0
->vtxbuf
, &nvc0
->num_vtxbufs
, vb
,
1028 nvc0
->vbo_user
&= ~(((1ull << count
) - 1) << start_slot
);
1029 nvc0
->constant_vbos
&= ~(((1ull << count
) - 1) << start_slot
);
1030 nvc0
->vtxbufs_coherent
&= ~(((1ull << count
) - 1) << start_slot
);
1034 for (i
= 0; i
< count
; ++i
) {
1035 unsigned dst_index
= start_slot
+ i
;
1037 if (vb
[i
].user_buffer
) {
1038 nvc0
->vbo_user
|= 1 << dst_index
;
1039 if (!vb
[i
].stride
&& nvc0
->screen
->eng3d
->oclass
< GM107_3D_CLASS
)
1040 nvc0
->constant_vbos
|= 1 << dst_index
;
1042 nvc0
->constant_vbos
&= ~(1 << dst_index
);
1043 nvc0
->vtxbufs_coherent
&= ~(1 << dst_index
);
1045 nvc0
->vbo_user
&= ~(1 << dst_index
);
1046 nvc0
->constant_vbos
&= ~(1 << dst_index
);
1049 vb
[i
].buffer
->flags
& PIPE_RESOURCE_FLAG_MAP_COHERENT
)
1050 nvc0
->vtxbufs_coherent
|= (1 << dst_index
);
1052 nvc0
->vtxbufs_coherent
&= ~(1 << dst_index
);
1058 nvc0_set_index_buffer(struct pipe_context
*pipe
,
1059 const struct pipe_index_buffer
*ib
)
1061 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
1063 if (nvc0
->idxbuf
.buffer
)
1064 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_IDX
);
1067 pipe_resource_reference(&nvc0
->idxbuf
.buffer
, ib
->buffer
);
1068 nvc0
->idxbuf
.index_size
= ib
->index_size
;
1070 nvc0
->idxbuf
.offset
= ib
->offset
;
1071 nvc0
->dirty
|= NVC0_NEW_IDXBUF
;
1073 nvc0
->idxbuf
.user_buffer
= ib
->user_buffer
;
1074 nvc0
->dirty
&= ~NVC0_NEW_IDXBUF
;
1077 nvc0
->dirty
&= ~NVC0_NEW_IDXBUF
;
1078 pipe_resource_reference(&nvc0
->idxbuf
.buffer
, NULL
);
1083 nvc0_vertex_state_bind(struct pipe_context
*pipe
, void *hwcso
)
1085 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
1087 nvc0
->vertex
= hwcso
;
1088 nvc0
->dirty
|= NVC0_NEW_VERTEX
;
1091 static struct pipe_stream_output_target
*
1092 nvc0_so_target_create(struct pipe_context
*pipe
,
1093 struct pipe_resource
*res
,
1094 unsigned offset
, unsigned size
)
1096 struct nv04_resource
*buf
= (struct nv04_resource
*)res
;
1097 struct nvc0_so_target
*targ
= MALLOC_STRUCT(nvc0_so_target
);
1101 targ
->pq
= pipe
->create_query(pipe
, NVC0_HW_QUERY_TFB_BUFFER_OFFSET
, 0);
1108 targ
->pipe
.buffer_size
= size
;
1109 targ
->pipe
.buffer_offset
= offset
;
1110 targ
->pipe
.context
= pipe
;
1111 targ
->pipe
.buffer
= NULL
;
1112 pipe_resource_reference(&targ
->pipe
.buffer
, res
);
1113 pipe_reference_init(&targ
->pipe
.reference
, 1);
1115 assert(buf
->base
.target
== PIPE_BUFFER
);
1116 util_range_add(&buf
->valid_buffer_range
, offset
, offset
+ size
);
1122 nvc0_so_target_save_offset(struct pipe_context
*pipe
,
1123 struct pipe_stream_output_target
*ptarg
,
1124 unsigned index
, bool *serialize
)
1126 struct nvc0_so_target
*targ
= nvc0_so_target(ptarg
);
1130 PUSH_SPACE(nvc0_context(pipe
)->base
.pushbuf
, 1);
1131 IMMED_NVC0(nvc0_context(pipe
)->base
.pushbuf
, NVC0_3D(SERIALIZE
), 0);
1133 NOUVEAU_DRV_STAT(nouveau_screen(pipe
->screen
), gpu_serialize_count
, 1);
1136 nvc0_query(targ
->pq
)->index
= index
;
1137 pipe
->end_query(pipe
, targ
->pq
);
1141 nvc0_so_target_destroy(struct pipe_context
*pipe
,
1142 struct pipe_stream_output_target
*ptarg
)
1144 struct nvc0_so_target
*targ
= nvc0_so_target(ptarg
);
1145 pipe
->destroy_query(pipe
, targ
->pq
);
1146 pipe_resource_reference(&targ
->pipe
.buffer
, NULL
);
1151 nvc0_set_transform_feedback_targets(struct pipe_context
*pipe
,
1152 unsigned num_targets
,
1153 struct pipe_stream_output_target
**targets
,
1154 const unsigned *offsets
)
1156 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
1158 bool serialize
= true;
1160 assert(num_targets
<= 4);
1162 for (i
= 0; i
< num_targets
; ++i
) {
1163 const bool changed
= nvc0
->tfbbuf
[i
] != targets
[i
];
1164 const bool append
= (offsets
[i
] == ((unsigned)-1));
1165 if (!changed
&& append
)
1167 nvc0
->tfbbuf_dirty
|= 1 << i
;
1169 if (nvc0
->tfbbuf
[i
] && changed
)
1170 nvc0_so_target_save_offset(pipe
, nvc0
->tfbbuf
[i
], i
, &serialize
);
1172 if (targets
[i
] && !append
)
1173 nvc0_so_target(targets
[i
])->clean
= true;
1175 pipe_so_target_reference(&nvc0
->tfbbuf
[i
], targets
[i
]);
1177 for (; i
< nvc0
->num_tfbbufs
; ++i
) {
1178 if (nvc0
->tfbbuf
[i
]) {
1179 nvc0
->tfbbuf_dirty
|= 1 << i
;
1180 nvc0_so_target_save_offset(pipe
, nvc0
->tfbbuf
[i
], i
, &serialize
);
1181 pipe_so_target_reference(&nvc0
->tfbbuf
[i
], NULL
);
1184 nvc0
->num_tfbbufs
= num_targets
;
1186 if (nvc0
->tfbbuf_dirty
)
1187 nvc0
->dirty
|= NVC0_NEW_TFB_TARGETS
;
1191 nvc0_bind_surfaces_range(struct nvc0_context
*nvc0
, const unsigned t
,
1192 unsigned start
, unsigned nr
,
1193 struct pipe_surface
**psurfaces
)
1195 const unsigned end
= start
+ nr
;
1196 const unsigned mask
= ((1 << nr
) - 1) << start
;
1200 for (i
= start
; i
< end
; ++i
) {
1201 const unsigned p
= i
- start
;
1203 nvc0
->surfaces_valid
[t
] |= (1 << i
);
1205 nvc0
->surfaces_valid
[t
] &= ~(1 << i
);
1206 pipe_surface_reference(&nvc0
->surfaces
[t
][i
], psurfaces
[p
]);
1209 for (i
= start
; i
< end
; ++i
)
1210 pipe_surface_reference(&nvc0
->surfaces
[t
][i
], NULL
);
1211 nvc0
->surfaces_valid
[t
] &= ~mask
;
1213 nvc0
->surfaces_dirty
[t
] |= mask
;
1216 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_SUF
);
1218 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_SUF
);
1222 nvc0_set_compute_resources(struct pipe_context
*pipe
,
1223 unsigned start
, unsigned nr
,
1224 struct pipe_surface
**resources
)
1226 nvc0_bind_surfaces_range(nvc0_context(pipe
), 1, start
, nr
, resources
);
1228 nvc0_context(pipe
)->dirty_cp
|= NVC0_NEW_CP_SURFACES
;
1232 nvc0_set_shader_images(struct pipe_context
*pipe
, unsigned shader
,
1233 unsigned start_slot
, unsigned count
,
1234 struct pipe_image_view
*views
)
1239 nvc0_bind_buffers_range(struct nvc0_context
*nvc0
, const unsigned t
,
1240 unsigned start
, unsigned nr
,
1241 struct pipe_shader_buffer
*pbuffers
)
1243 const unsigned end
= start
+ nr
;
1244 const unsigned mask
= ((1 << nr
) - 1) << start
;
1250 for (i
= start
; i
< end
; ++i
) {
1251 const unsigned p
= i
- start
;
1252 if (pbuffers
[p
].buffer
)
1253 nvc0
->buffers_valid
[t
] |= (1 << i
);
1255 nvc0
->buffers_valid
[t
] &= ~(1 << i
);
1256 nvc0
->buffers
[t
][i
].buffer_offset
= pbuffers
[p
].buffer_offset
;
1257 nvc0
->buffers
[t
][i
].buffer_size
= pbuffers
[p
].buffer_size
;
1258 pipe_resource_reference(&nvc0
->buffers
[t
][i
].buffer
, pbuffers
[p
].buffer
);
1261 for (i
= start
; i
< end
; ++i
)
1262 pipe_resource_reference(&nvc0
->buffers
[t
][i
].buffer
, NULL
);
1263 nvc0
->buffers_valid
[t
] &= ~mask
;
1265 nvc0
->buffers_dirty
[t
] |= mask
;
1267 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_BUF
);
1271 nvc0_set_shader_buffers(struct pipe_context
*pipe
,
1273 unsigned start
, unsigned nr
,
1274 struct pipe_shader_buffer
*buffers
)
1276 const unsigned s
= nvc0_shader_stage(shader
);
1277 nvc0_bind_buffers_range(nvc0_context(pipe
), s
, start
, nr
, buffers
);
1279 nvc0_context(pipe
)->dirty
|= NVC0_NEW_BUFFERS
;
1283 nvc0_set_global_handle(uint32_t *phandle
, struct pipe_resource
*res
)
1285 struct nv04_resource
*buf
= nv04_resource(res
);
1287 uint64_t limit
= (buf
->address
+ buf
->base
.width0
) - 1;
1288 if (limit
< (1ULL << 32)) {
1289 *phandle
= (uint32_t)buf
->address
;
1291 NOUVEAU_ERR("Cannot map into TGSI_RESOURCE_GLOBAL: "
1292 "resource not contained within 32-bit address space !\n");
1301 nvc0_set_global_bindings(struct pipe_context
*pipe
,
1302 unsigned start
, unsigned nr
,
1303 struct pipe_resource
**resources
,
1306 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
1307 struct pipe_resource
**ptr
;
1309 const unsigned end
= start
+ nr
;
1311 if (nvc0
->global_residents
.size
<= (end
* sizeof(struct pipe_resource
*))) {
1312 const unsigned old_size
= nvc0
->global_residents
.size
;
1313 const unsigned req_size
= end
* sizeof(struct pipe_resource
*);
1314 util_dynarray_resize(&nvc0
->global_residents
, req_size
);
1315 memset((uint8_t *)nvc0
->global_residents
.data
+ old_size
, 0,
1316 req_size
- old_size
);
1320 ptr
= util_dynarray_element(
1321 &nvc0
->global_residents
, struct pipe_resource
*, start
);
1322 for (i
= 0; i
< nr
; ++i
) {
1323 pipe_resource_reference(&ptr
[i
], resources
[i
]);
1324 nvc0_set_global_handle(handles
[i
], resources
[i
]);
1327 ptr
= util_dynarray_element(
1328 &nvc0
->global_residents
, struct pipe_resource
*, start
);
1329 for (i
= 0; i
< nr
; ++i
)
1330 pipe_resource_reference(&ptr
[i
], NULL
);
1333 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_GLOBAL
);
1335 nvc0
->dirty_cp
= NVC0_NEW_CP_GLOBALS
;
1339 nvc0_init_state_functions(struct nvc0_context
*nvc0
)
1341 struct pipe_context
*pipe
= &nvc0
->base
.pipe
;
1343 pipe
->create_blend_state
= nvc0_blend_state_create
;
1344 pipe
->bind_blend_state
= nvc0_blend_state_bind
;
1345 pipe
->delete_blend_state
= nvc0_blend_state_delete
;
1347 pipe
->create_rasterizer_state
= nvc0_rasterizer_state_create
;
1348 pipe
->bind_rasterizer_state
= nvc0_rasterizer_state_bind
;
1349 pipe
->delete_rasterizer_state
= nvc0_rasterizer_state_delete
;
1351 pipe
->create_depth_stencil_alpha_state
= nvc0_zsa_state_create
;
1352 pipe
->bind_depth_stencil_alpha_state
= nvc0_zsa_state_bind
;
1353 pipe
->delete_depth_stencil_alpha_state
= nvc0_zsa_state_delete
;
1355 pipe
->create_sampler_state
= nv50_sampler_state_create
;
1356 pipe
->delete_sampler_state
= nvc0_sampler_state_delete
;
1357 pipe
->bind_sampler_states
= nvc0_bind_sampler_states
;
1359 pipe
->create_sampler_view
= nvc0_create_sampler_view
;
1360 pipe
->sampler_view_destroy
= nvc0_sampler_view_destroy
;
1361 pipe
->set_sampler_views
= nvc0_set_sampler_views
;
1363 pipe
->create_vs_state
= nvc0_vp_state_create
;
1364 pipe
->create_fs_state
= nvc0_fp_state_create
;
1365 pipe
->create_gs_state
= nvc0_gp_state_create
;
1366 pipe
->create_tcs_state
= nvc0_tcp_state_create
;
1367 pipe
->create_tes_state
= nvc0_tep_state_create
;
1368 pipe
->bind_vs_state
= nvc0_vp_state_bind
;
1369 pipe
->bind_fs_state
= nvc0_fp_state_bind
;
1370 pipe
->bind_gs_state
= nvc0_gp_state_bind
;
1371 pipe
->bind_tcs_state
= nvc0_tcp_state_bind
;
1372 pipe
->bind_tes_state
= nvc0_tep_state_bind
;
1373 pipe
->delete_vs_state
= nvc0_sp_state_delete
;
1374 pipe
->delete_fs_state
= nvc0_sp_state_delete
;
1375 pipe
->delete_gs_state
= nvc0_sp_state_delete
;
1376 pipe
->delete_tcs_state
= nvc0_sp_state_delete
;
1377 pipe
->delete_tes_state
= nvc0_sp_state_delete
;
1379 pipe
->create_compute_state
= nvc0_cp_state_create
;
1380 pipe
->bind_compute_state
= nvc0_cp_state_bind
;
1381 pipe
->delete_compute_state
= nvc0_sp_state_delete
;
1383 pipe
->set_blend_color
= nvc0_set_blend_color
;
1384 pipe
->set_stencil_ref
= nvc0_set_stencil_ref
;
1385 pipe
->set_clip_state
= nvc0_set_clip_state
;
1386 pipe
->set_sample_mask
= nvc0_set_sample_mask
;
1387 pipe
->set_min_samples
= nvc0_set_min_samples
;
1388 pipe
->set_constant_buffer
= nvc0_set_constant_buffer
;
1389 pipe
->set_framebuffer_state
= nvc0_set_framebuffer_state
;
1390 pipe
->set_polygon_stipple
= nvc0_set_polygon_stipple
;
1391 pipe
->set_scissor_states
= nvc0_set_scissor_states
;
1392 pipe
->set_viewport_states
= nvc0_set_viewport_states
;
1393 pipe
->set_tess_state
= nvc0_set_tess_state
;
1395 pipe
->create_vertex_elements_state
= nvc0_vertex_state_create
;
1396 pipe
->delete_vertex_elements_state
= nvc0_vertex_state_delete
;
1397 pipe
->bind_vertex_elements_state
= nvc0_vertex_state_bind
;
1399 pipe
->set_vertex_buffers
= nvc0_set_vertex_buffers
;
1400 pipe
->set_index_buffer
= nvc0_set_index_buffer
;
1402 pipe
->create_stream_output_target
= nvc0_so_target_create
;
1403 pipe
->stream_output_target_destroy
= nvc0_so_target_destroy
;
1404 pipe
->set_stream_output_targets
= nvc0_set_transform_feedback_targets
;
1406 pipe
->set_global_binding
= nvc0_set_global_bindings
;
1407 pipe
->set_compute_resources
= nvc0_set_compute_resources
;
1408 pipe
->set_shader_images
= nvc0_set_shader_images
;
1409 pipe
->set_shader_buffers
= nvc0_set_shader_buffers
;
1411 nvc0
->sample_mask
= ~0;
1412 nvc0
->min_samples
= 1;
1413 nvc0
->default_tess_outer
[0] =
1414 nvc0
->default_tess_outer
[1] =
1415 nvc0
->default_tess_outer
[2] =
1416 nvc0
->default_tess_outer
[3] = 1.0;
1417 nvc0
->default_tess_inner
[0] =
1418 nvc0
->default_tess_inner
[1] = 1.0;