2 * Copyright 2010 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "pipe/p_defines.h"
24 #include "util/u_framebuffer.h"
25 #include "util/u_helpers.h"
26 #include "util/u_inlines.h"
27 #include "util/u_transfer.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_serialize.h"
33 #include "nvc0/nvc0_stateobj.h"
34 #include "nvc0/nvc0_context.h"
35 #include "nvc0/nvc0_query_hw.h"
37 #include "nvc0/nvc0_3d.xml.h"
39 #include "nouveau_gldefs.h"
41 static inline uint32_t
42 nvc0_colormask(unsigned mask
)
46 if (mask
& PIPE_MASK_R
)
48 if (mask
& PIPE_MASK_G
)
50 if (mask
& PIPE_MASK_B
)
52 if (mask
& PIPE_MASK_A
)
58 #define NVC0_BLEND_FACTOR_CASE(a, b) \
59 case PIPE_BLENDFACTOR_##a: return NV50_BLEND_FACTOR_##b
61 static inline uint32_t
62 nvc0_blend_fac(unsigned factor
)
65 NVC0_BLEND_FACTOR_CASE(ONE
, ONE
);
66 NVC0_BLEND_FACTOR_CASE(SRC_COLOR
, SRC_COLOR
);
67 NVC0_BLEND_FACTOR_CASE(SRC_ALPHA
, SRC_ALPHA
);
68 NVC0_BLEND_FACTOR_CASE(DST_ALPHA
, DST_ALPHA
);
69 NVC0_BLEND_FACTOR_CASE(DST_COLOR
, DST_COLOR
);
70 NVC0_BLEND_FACTOR_CASE(SRC_ALPHA_SATURATE
, SRC_ALPHA_SATURATE
);
71 NVC0_BLEND_FACTOR_CASE(CONST_COLOR
, CONSTANT_COLOR
);
72 NVC0_BLEND_FACTOR_CASE(CONST_ALPHA
, CONSTANT_ALPHA
);
73 NVC0_BLEND_FACTOR_CASE(SRC1_COLOR
, SRC1_COLOR
);
74 NVC0_BLEND_FACTOR_CASE(SRC1_ALPHA
, SRC1_ALPHA
);
75 NVC0_BLEND_FACTOR_CASE(ZERO
, ZERO
);
76 NVC0_BLEND_FACTOR_CASE(INV_SRC_COLOR
, ONE_MINUS_SRC_COLOR
);
77 NVC0_BLEND_FACTOR_CASE(INV_SRC_ALPHA
, ONE_MINUS_SRC_ALPHA
);
78 NVC0_BLEND_FACTOR_CASE(INV_DST_ALPHA
, ONE_MINUS_DST_ALPHA
);
79 NVC0_BLEND_FACTOR_CASE(INV_DST_COLOR
, ONE_MINUS_DST_COLOR
);
80 NVC0_BLEND_FACTOR_CASE(INV_CONST_COLOR
, ONE_MINUS_CONSTANT_COLOR
);
81 NVC0_BLEND_FACTOR_CASE(INV_CONST_ALPHA
, ONE_MINUS_CONSTANT_ALPHA
);
82 NVC0_BLEND_FACTOR_CASE(INV_SRC1_COLOR
, ONE_MINUS_SRC1_COLOR
);
83 NVC0_BLEND_FACTOR_CASE(INV_SRC1_ALPHA
, ONE_MINUS_SRC1_ALPHA
);
85 return NV50_BLEND_FACTOR_ZERO
;
90 nvc0_blend_state_create(struct pipe_context
*pipe
,
91 const struct pipe_blend_state
*cso
)
93 struct nvc0_blend_stateobj
*so
= CALLOC_STRUCT(nvc0_blend_stateobj
);
95 int r
; /* reference */
98 bool indep_masks
= false;
99 bool indep_funcs
= false;
103 /* check which states actually have differing values */
104 if (cso
->independent_blend_enable
) {
105 for (r
= 0; r
< 8 && !cso
->rt
[r
].blend_enable
; ++r
);
107 for (i
= r
+ 1; i
< 8; ++i
) {
108 if (!cso
->rt
[i
].blend_enable
)
111 if (cso
->rt
[i
].rgb_func
!= cso
->rt
[r
].rgb_func
||
112 cso
->rt
[i
].rgb_src_factor
!= cso
->rt
[r
].rgb_src_factor
||
113 cso
->rt
[i
].rgb_dst_factor
!= cso
->rt
[r
].rgb_dst_factor
||
114 cso
->rt
[i
].alpha_func
!= cso
->rt
[r
].alpha_func
||
115 cso
->rt
[i
].alpha_src_factor
!= cso
->rt
[r
].alpha_src_factor
||
116 cso
->rt
[i
].alpha_dst_factor
!= cso
->rt
[r
].alpha_dst_factor
) {
122 blend_en
|= (cso
->rt
[i
].blend_enable
? 1 : 0) << i
;
124 for (i
= 1; i
< 8; ++i
) {
125 if (cso
->rt
[i
].colormask
!= cso
->rt
[0].colormask
) {
132 if (cso
->rt
[0].blend_enable
)
136 if (cso
->logicop_enable
) {
137 SB_BEGIN_3D(so
, LOGIC_OP_ENABLE
, 2);
139 SB_DATA (so
, nvgl_logicop_func(cso
->logicop_func
));
141 SB_IMMED_3D(so
, MACRO_BLEND_ENABLES
, 0);
143 SB_IMMED_3D(so
, LOGIC_OP_ENABLE
, 0);
145 SB_IMMED_3D(so
, BLEND_INDEPENDENT
, indep_funcs
);
146 SB_IMMED_3D(so
, MACRO_BLEND_ENABLES
, blend_en
);
148 for (i
= 0; i
< 8; ++i
) {
149 if (cso
->rt
[i
].blend_enable
) {
150 SB_BEGIN_3D(so
, IBLEND_EQUATION_RGB(i
), 6);
151 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[i
].rgb_func
));
152 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].rgb_src_factor
));
153 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].rgb_dst_factor
));
154 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[i
].alpha_func
));
155 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].alpha_src_factor
));
156 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[i
].alpha_dst_factor
));
161 SB_BEGIN_3D(so
, BLEND_EQUATION_RGB
, 5);
162 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[r
].rgb_func
));
163 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].rgb_src_factor
));
164 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].rgb_dst_factor
));
165 SB_DATA (so
, nvgl_blend_eqn(cso
->rt
[r
].alpha_func
));
166 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].alpha_src_factor
));
167 SB_BEGIN_3D(so
, BLEND_FUNC_DST_ALPHA
, 1);
168 SB_DATA (so
, nvc0_blend_fac(cso
->rt
[r
].alpha_dst_factor
));
171 SB_IMMED_3D(so
, COLOR_MASK_COMMON
, !indep_masks
);
173 SB_BEGIN_3D(so
, COLOR_MASK(0), 8);
174 for (i
= 0; i
< 8; ++i
)
175 SB_DATA(so
, nvc0_colormask(cso
->rt
[i
].colormask
));
177 SB_BEGIN_3D(so
, COLOR_MASK(0), 1);
178 SB_DATA (so
, nvc0_colormask(cso
->rt
[0].colormask
));
183 if (cso
->alpha_to_coverage
)
184 ms
|= NVC0_3D_MULTISAMPLE_CTRL_ALPHA_TO_COVERAGE
;
185 if (cso
->alpha_to_one
)
186 ms
|= NVC0_3D_MULTISAMPLE_CTRL_ALPHA_TO_ONE
;
188 SB_BEGIN_3D(so
, MULTISAMPLE_CTRL
, 1);
191 assert(so
->size
<= ARRAY_SIZE(so
->state
));
196 nvc0_blend_state_bind(struct pipe_context
*pipe
, void *hwcso
)
198 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
201 nvc0
->dirty_3d
|= NVC0_NEW_3D_BLEND
;
205 nvc0_blend_state_delete(struct pipe_context
*pipe
, void *hwcso
)
210 /* NOTE: ignoring line_last_pixel */
212 nvc0_rasterizer_state_create(struct pipe_context
*pipe
,
213 const struct pipe_rasterizer_state
*cso
)
215 struct nvc0_rasterizer_stateobj
*so
;
216 uint16_t class_3d
= nouveau_screen(pipe
->screen
)->class_3d
;
219 so
= CALLOC_STRUCT(nvc0_rasterizer_stateobj
);
224 /* Scissor enables are handled in scissor state, we will not want to
225 * always emit 16 commands, one for each scissor rectangle, here.
228 SB_IMMED_3D(so
, PROVOKING_VERTEX_LAST
, !cso
->flatshade_first
);
229 SB_IMMED_3D(so
, VERTEX_TWO_SIDE_ENABLE
, cso
->light_twoside
);
231 SB_IMMED_3D(so
, VERT_COLOR_CLAMP_EN
, cso
->clamp_vertex_color
);
232 SB_BEGIN_3D(so
, FRAG_COLOR_CLAMP_EN
, 1);
233 SB_DATA (so
, cso
->clamp_fragment_color
? 0x11111111 : 0x00000000);
235 SB_IMMED_3D(so
, MULTISAMPLE_ENABLE
, cso
->multisample
);
237 SB_IMMED_3D(so
, LINE_SMOOTH_ENABLE
, cso
->line_smooth
);
238 if (cso
->line_smooth
|| cso
->multisample
)
239 SB_BEGIN_3D(so
, LINE_WIDTH_SMOOTH
, 1);
241 SB_BEGIN_3D(so
, LINE_WIDTH_ALIASED
, 1);
242 SB_DATA (so
, fui(cso
->line_width
));
244 SB_IMMED_3D(so
, LINE_STIPPLE_ENABLE
, cso
->line_stipple_enable
);
245 if (cso
->line_stipple_enable
) {
246 SB_BEGIN_3D(so
, LINE_STIPPLE_PATTERN
, 1);
247 SB_DATA (so
, (cso
->line_stipple_pattern
<< 8) |
248 cso
->line_stipple_factor
);
252 SB_IMMED_3D(so
, VP_POINT_SIZE
, cso
->point_size_per_vertex
);
253 if (!cso
->point_size_per_vertex
) {
254 SB_BEGIN_3D(so
, POINT_SIZE
, 1);
255 SB_DATA (so
, fui(cso
->point_size
));
258 reg
= (cso
->sprite_coord_mode
== PIPE_SPRITE_COORD_UPPER_LEFT
) ?
259 NVC0_3D_POINT_COORD_REPLACE_COORD_ORIGIN_UPPER_LEFT
:
260 NVC0_3D_POINT_COORD_REPLACE_COORD_ORIGIN_LOWER_LEFT
;
262 SB_BEGIN_3D(so
, POINT_COORD_REPLACE
, 1);
263 SB_DATA (so
, ((cso
->sprite_coord_enable
& 0xff) << 3) | reg
);
264 SB_IMMED_3D(so
, POINT_SPRITE_ENABLE
, cso
->point_quad_rasterization
);
265 SB_IMMED_3D(so
, POINT_SMOOTH_ENABLE
, cso
->point_smooth
);
267 if (class_3d
>= GM200_3D_CLASS
) {
268 SB_IMMED_3D(so
, FILL_RECTANGLE
,
269 cso
->fill_front
== PIPE_POLYGON_MODE_FILL_RECTANGLE
?
270 NVC0_3D_FILL_RECTANGLE_ENABLE
: 0);
273 SB_BEGIN_3D(so
, MACRO_POLYGON_MODE_FRONT
, 1);
274 SB_DATA (so
, nvgl_polygon_mode(cso
->fill_front
));
275 SB_BEGIN_3D(so
, MACRO_POLYGON_MODE_BACK
, 1);
276 SB_DATA (so
, nvgl_polygon_mode(cso
->fill_back
));
277 SB_IMMED_3D(so
, POLYGON_SMOOTH_ENABLE
, cso
->poly_smooth
);
279 SB_BEGIN_3D(so
, CULL_FACE_ENABLE
, 3);
280 SB_DATA (so
, cso
->cull_face
!= PIPE_FACE_NONE
);
281 SB_DATA (so
, cso
->front_ccw
? NVC0_3D_FRONT_FACE_CCW
:
282 NVC0_3D_FRONT_FACE_CW
);
283 switch (cso
->cull_face
) {
284 case PIPE_FACE_FRONT_AND_BACK
:
285 SB_DATA(so
, NVC0_3D_CULL_FACE_FRONT_AND_BACK
);
287 case PIPE_FACE_FRONT
:
288 SB_DATA(so
, NVC0_3D_CULL_FACE_FRONT
);
292 SB_DATA(so
, NVC0_3D_CULL_FACE_BACK
);
296 SB_IMMED_3D(so
, POLYGON_STIPPLE_ENABLE
, cso
->poly_stipple_enable
);
297 SB_BEGIN_3D(so
, POLYGON_OFFSET_POINT_ENABLE
, 3);
298 SB_DATA (so
, cso
->offset_point
);
299 SB_DATA (so
, cso
->offset_line
);
300 SB_DATA (so
, cso
->offset_tri
);
302 if (cso
->offset_point
|| cso
->offset_line
|| cso
->offset_tri
) {
303 SB_BEGIN_3D(so
, POLYGON_OFFSET_FACTOR
, 1);
304 SB_DATA (so
, fui(cso
->offset_scale
));
305 if (!cso
->offset_units_unscaled
) {
306 SB_BEGIN_3D(so
, POLYGON_OFFSET_UNITS
, 1);
307 SB_DATA (so
, fui(cso
->offset_units
* 2.0f
));
309 SB_BEGIN_3D(so
, POLYGON_OFFSET_CLAMP
, 1);
310 SB_DATA (so
, fui(cso
->offset_clamp
));
313 if (cso
->depth_clip_near
)
314 reg
= NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1
;
317 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1
|
318 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_NEAR
|
319 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_FAR
|
320 NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK12_UNK2
;
322 SB_BEGIN_3D(so
, VIEW_VOLUME_CLIP_CTRL
, 1);
325 SB_IMMED_3D(so
, DEPTH_CLIP_NEGATIVE_Z
, cso
->clip_halfz
);
327 SB_IMMED_3D(so
, PIXEL_CENTER_INTEGER
, !cso
->half_pixel_center
);
329 if (class_3d
>= GM200_3D_CLASS
) {
330 if (cso
->conservative_raster_mode
!= PIPE_CONSERVATIVE_RASTER_OFF
) {
331 bool post_snap
= cso
->conservative_raster_mode
==
332 PIPE_CONSERVATIVE_RASTER_POST_SNAP
;
333 uint32_t state
= cso
->subpixel_precision_x
;
334 state
|= cso
->subpixel_precision_y
<< 4;
335 state
|= (uint32_t)(cso
->conservative_raster_dilate
* 4) << 8;
336 state
|= (post_snap
|| class_3d
< GP100_3D_CLASS
) ? 1 << 10 : 0;
337 SB_IMMED_3D(so
, MACRO_CONSERVATIVE_RASTER_STATE
, state
);
339 SB_IMMED_3D(so
, CONSERVATIVE_RASTER
, 0);
343 assert(so
->size
<= ARRAY_SIZE(so
->state
));
348 nvc0_rasterizer_state_bind(struct pipe_context
*pipe
, void *hwcso
)
350 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
353 nvc0
->dirty_3d
|= NVC0_NEW_3D_RASTERIZER
;
357 nvc0_rasterizer_state_delete(struct pipe_context
*pipe
, void *hwcso
)
363 nvc0_zsa_state_create(struct pipe_context
*pipe
,
364 const struct pipe_depth_stencil_alpha_state
*cso
)
366 struct nvc0_zsa_stateobj
*so
= CALLOC_STRUCT(nvc0_zsa_stateobj
);
370 SB_IMMED_3D(so
, DEPTH_TEST_ENABLE
, cso
->depth
.enabled
);
371 if (cso
->depth
.enabled
) {
372 SB_IMMED_3D(so
, DEPTH_WRITE_ENABLE
, cso
->depth
.writemask
);
373 SB_BEGIN_3D(so
, DEPTH_TEST_FUNC
, 1);
374 SB_DATA (so
, nvgl_comparison_op(cso
->depth
.func
));
377 SB_IMMED_3D(so
, DEPTH_BOUNDS_EN
, cso
->depth
.bounds_test
);
378 if (cso
->depth
.bounds_test
) {
379 SB_BEGIN_3D(so
, DEPTH_BOUNDS(0), 2);
380 SB_DATA (so
, fui(cso
->depth
.bounds_min
));
381 SB_DATA (so
, fui(cso
->depth
.bounds_max
));
384 if (cso
->stencil
[0].enabled
) {
385 SB_BEGIN_3D(so
, STENCIL_ENABLE
, 5);
387 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].fail_op
));
388 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].zfail_op
));
389 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[0].zpass_op
));
390 SB_DATA (so
, nvgl_comparison_op(cso
->stencil
[0].func
));
391 SB_BEGIN_3D(so
, STENCIL_FRONT_FUNC_MASK
, 2);
392 SB_DATA (so
, cso
->stencil
[0].valuemask
);
393 SB_DATA (so
, cso
->stencil
[0].writemask
);
395 SB_IMMED_3D(so
, STENCIL_ENABLE
, 0);
398 if (cso
->stencil
[1].enabled
) {
399 assert(cso
->stencil
[0].enabled
);
400 SB_BEGIN_3D(so
, STENCIL_TWO_SIDE_ENABLE
, 5);
402 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].fail_op
));
403 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].zfail_op
));
404 SB_DATA (so
, nvgl_stencil_op(cso
->stencil
[1].zpass_op
));
405 SB_DATA (so
, nvgl_comparison_op(cso
->stencil
[1].func
));
406 SB_BEGIN_3D(so
, STENCIL_BACK_MASK
, 2);
407 SB_DATA (so
, cso
->stencil
[1].writemask
);
408 SB_DATA (so
, cso
->stencil
[1].valuemask
);
410 if (cso
->stencil
[0].enabled
) {
411 SB_IMMED_3D(so
, STENCIL_TWO_SIDE_ENABLE
, 0);
414 SB_IMMED_3D(so
, ALPHA_TEST_ENABLE
, cso
->alpha
.enabled
);
415 if (cso
->alpha
.enabled
) {
416 SB_BEGIN_3D(so
, ALPHA_TEST_REF
, 2);
417 SB_DATA (so
, fui(cso
->alpha
.ref_value
));
418 SB_DATA (so
, nvgl_comparison_op(cso
->alpha
.func
));
421 assert(so
->size
<= ARRAY_SIZE(so
->state
));
426 nvc0_zsa_state_bind(struct pipe_context
*pipe
, void *hwcso
)
428 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
431 nvc0
->dirty_3d
|= NVC0_NEW_3D_ZSA
;
435 nvc0_zsa_state_delete(struct pipe_context
*pipe
, void *hwcso
)
440 /* ====================== SAMPLERS AND TEXTURES ================================
443 #define NV50_TSC_WRAP_CASE(n) \
444 case PIPE_TEX_WRAP_##n: return NV50_TSC_WRAP_##n
447 nvc0_sampler_state_delete(struct pipe_context
*pipe
, void *hwcso
)
451 for (s
= 0; s
< 6; ++s
)
452 for (i
= 0; i
< nvc0_context(pipe
)->num_samplers
[s
]; ++i
)
453 if (nvc0_context(pipe
)->samplers
[s
][i
] == hwcso
)
454 nvc0_context(pipe
)->samplers
[s
][i
] = NULL
;
456 nvc0_screen_tsc_free(nvc0_context(pipe
)->screen
, nv50_tsc_entry(hwcso
));
462 nvc0_stage_sampler_states_bind(struct nvc0_context
*nvc0
,
464 unsigned nr
, void **hwcsos
)
466 unsigned highest_found
= 0;
469 for (i
= 0; i
< nr
; ++i
) {
470 struct nv50_tsc_entry
*hwcso
= hwcsos
? nv50_tsc_entry(hwcsos
[i
]) : NULL
;
471 struct nv50_tsc_entry
*old
= nvc0
->samplers
[s
][i
];
478 nvc0
->samplers_dirty
[s
] |= 1 << i
;
480 nvc0
->samplers
[s
][i
] = hwcso
;
482 nvc0_screen_tsc_unlock(nvc0
->screen
, old
);
484 if (nr
>= nvc0
->num_samplers
[s
])
485 nvc0
->num_samplers
[s
] = highest_found
+ 1;
489 nvc0_bind_sampler_states(struct pipe_context
*pipe
,
490 enum pipe_shader_type shader
,
491 unsigned start
, unsigned nr
, void **samplers
)
493 const unsigned s
= nvc0_shader_stage(shader
);
496 nvc0_stage_sampler_states_bind(nvc0_context(pipe
), s
, nr
, samplers
);
499 nvc0_context(pipe
)->dirty_cp
|= NVC0_NEW_CP_SAMPLERS
;
501 nvc0_context(pipe
)->dirty_3d
|= NVC0_NEW_3D_SAMPLERS
;
505 /* NOTE: only called when not referenced anywhere, won't be bound */
507 nvc0_sampler_view_destroy(struct pipe_context
*pipe
,
508 struct pipe_sampler_view
*view
)
510 pipe_resource_reference(&view
->texture
, NULL
);
512 nvc0_screen_tic_free(nvc0_context(pipe
)->screen
, nv50_tic_entry(view
));
514 FREE(nv50_tic_entry(view
));
518 nvc0_stage_set_sampler_views(struct nvc0_context
*nvc0
, int s
,
520 struct pipe_sampler_view
**views
)
524 for (i
= 0; i
< nr
; ++i
) {
525 struct pipe_sampler_view
*view
= views
? views
[i
] : NULL
;
526 struct nv50_tic_entry
*old
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
528 if (view
== nvc0
->textures
[s
][i
])
530 nvc0
->textures_dirty
[s
] |= 1 << i
;
532 if (view
&& view
->texture
) {
533 struct pipe_resource
*res
= view
->texture
;
534 if (res
->target
== PIPE_BUFFER
&&
535 (res
->flags
& PIPE_RESOURCE_FLAG_MAP_COHERENT
))
536 nvc0
->textures_coherent
[s
] |= 1 << i
;
538 nvc0
->textures_coherent
[s
] &= ~(1 << i
);
540 nvc0
->textures_coherent
[s
] &= ~(1 << i
);
545 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_TEX(i
));
547 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_TEX(s
, i
));
548 nvc0_screen_tic_unlock(nvc0
->screen
, old
);
551 pipe_sampler_view_reference(&nvc0
->textures
[s
][i
], view
);
554 for (i
= nr
; i
< nvc0
->num_textures
[s
]; ++i
) {
555 struct nv50_tic_entry
*old
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
558 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_TEX(i
));
560 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_TEX(s
, i
));
561 nvc0_screen_tic_unlock(nvc0
->screen
, old
);
562 pipe_sampler_view_reference(&nvc0
->textures
[s
][i
], NULL
);
566 nvc0
->num_textures
[s
] = nr
;
570 nvc0_set_sampler_views(struct pipe_context
*pipe
, enum pipe_shader_type shader
,
571 unsigned start
, unsigned nr
,
572 struct pipe_sampler_view
**views
)
574 const unsigned s
= nvc0_shader_stage(shader
);
577 nvc0_stage_set_sampler_views(nvc0_context(pipe
), s
, nr
, views
);
580 nvc0_context(pipe
)->dirty_cp
|= NVC0_NEW_CP_TEXTURES
;
582 nvc0_context(pipe
)->dirty_3d
|= NVC0_NEW_3D_TEXTURES
;
585 /* ============================= SHADERS =======================================
589 nvc0_sp_state_create(struct pipe_context
*pipe
,
590 const struct pipe_shader_state
*cso
, unsigned type
)
592 struct nvc0_program
*prog
;
594 prog
= CALLOC_STRUCT(nvc0_program
);
599 prog
->pipe
.type
= cso
->type
;
602 case PIPE_SHADER_IR_TGSI
:
603 prog
->pipe
.tokens
= tgsi_dup_tokens(cso
->tokens
);
605 case PIPE_SHADER_IR_NIR
:
606 prog
->pipe
.ir
.nir
= cso
->ir
.nir
;
609 assert(!"unsupported IR!");
614 if (cso
->stream_output
.num_outputs
)
615 prog
->pipe
.stream_output
= cso
->stream_output
;
617 prog
->translated
= nvc0_program_translate(
618 prog
, nvc0_context(pipe
)->screen
->base
.device
->chipset
,
619 &nouveau_context(pipe
)->debug
);
625 nvc0_sp_state_delete(struct pipe_context
*pipe
, void *hwcso
)
627 struct nvc0_program
*prog
= (struct nvc0_program
*)hwcso
;
629 nvc0_program_destroy(nvc0_context(pipe
), prog
);
631 if (prog
->pipe
.type
== PIPE_SHADER_IR_TGSI
)
632 FREE((void *)prog
->pipe
.tokens
);
633 else if (prog
->pipe
.type
== PIPE_SHADER_IR_NIR
)
634 ralloc_free(prog
->pipe
.ir
.nir
);
639 nvc0_vp_state_create(struct pipe_context
*pipe
,
640 const struct pipe_shader_state
*cso
)
642 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_VERTEX
);
646 nvc0_vp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
648 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
650 nvc0
->vertprog
= hwcso
;
651 nvc0
->dirty_3d
|= NVC0_NEW_3D_VERTPROG
;
655 nvc0_fp_state_create(struct pipe_context
*pipe
,
656 const struct pipe_shader_state
*cso
)
658 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_FRAGMENT
);
662 nvc0_fp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
664 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
666 nvc0
->fragprog
= hwcso
;
667 nvc0
->dirty_3d
|= NVC0_NEW_3D_FRAGPROG
;
671 nvc0_gp_state_create(struct pipe_context
*pipe
,
672 const struct pipe_shader_state
*cso
)
674 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_GEOMETRY
);
678 nvc0_gp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
680 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
682 nvc0
->gmtyprog
= hwcso
;
683 nvc0
->dirty_3d
|= NVC0_NEW_3D_GMTYPROG
;
687 nvc0_tcp_state_create(struct pipe_context
*pipe
,
688 const struct pipe_shader_state
*cso
)
690 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_TESS_CTRL
);
694 nvc0_tcp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
696 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
698 nvc0
->tctlprog
= hwcso
;
699 nvc0
->dirty_3d
|= NVC0_NEW_3D_TCTLPROG
;
703 nvc0_tep_state_create(struct pipe_context
*pipe
,
704 const struct pipe_shader_state
*cso
)
706 return nvc0_sp_state_create(pipe
, cso
, PIPE_SHADER_TESS_EVAL
);
710 nvc0_tep_state_bind(struct pipe_context
*pipe
, void *hwcso
)
712 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
714 nvc0
->tevlprog
= hwcso
;
715 nvc0
->dirty_3d
|= NVC0_NEW_3D_TEVLPROG
;
719 nvc0_cp_state_create(struct pipe_context
*pipe
,
720 const struct pipe_compute_state
*cso
)
722 struct nvc0_program
*prog
;
724 prog
= CALLOC_STRUCT(nvc0_program
);
727 prog
->type
= PIPE_SHADER_COMPUTE
;
728 prog
->pipe
.type
= cso
->ir_type
;
730 prog
->cp
.smem_size
= cso
->req_local_mem
;
731 prog
->cp
.lmem_size
= cso
->req_private_mem
;
732 prog
->parm_size
= cso
->req_input_mem
;
734 switch(cso
->ir_type
) {
735 case PIPE_SHADER_IR_TGSI
:
736 prog
->pipe
.tokens
= tgsi_dup_tokens((const struct tgsi_token
*)cso
->prog
);
738 case PIPE_SHADER_IR_NIR
:
739 prog
->pipe
.ir
.nir
= (nir_shader
*)cso
->prog
;
741 case PIPE_SHADER_IR_NIR_SERIALIZED
: {
742 struct blob_reader reader
;
743 const struct pipe_binary_program_header
*hdr
= cso
->prog
;
745 blob_reader_init(&reader
, hdr
->blob
, hdr
->num_bytes
);
746 prog
->pipe
.ir
.nir
= nir_deserialize(NULL
, pipe
->screen
->get_compiler_options(pipe
->screen
, PIPE_SHADER_IR_NIR
, PIPE_SHADER_COMPUTE
), &reader
);
747 prog
->pipe
.type
= PIPE_SHADER_IR_NIR
;
751 assert(!"unsupported IR!");
756 prog
->translated
= nvc0_program_translate(
757 prog
, nvc0_context(pipe
)->screen
->base
.device
->chipset
,
758 &nouveau_context(pipe
)->debug
);
764 nvc0_cp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
766 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
768 nvc0
->compprog
= hwcso
;
769 nvc0
->dirty_cp
|= NVC0_NEW_CP_PROGRAM
;
773 nvc0_set_constant_buffer(struct pipe_context
*pipe
,
774 enum pipe_shader_type shader
, uint index
,
775 const struct pipe_constant_buffer
*cb
)
777 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
778 struct pipe_resource
*res
= cb
? cb
->buffer
: NULL
;
779 const unsigned s
= nvc0_shader_stage(shader
);
780 const unsigned i
= index
;
782 if (unlikely(shader
== PIPE_SHADER_COMPUTE
)) {
783 if (nvc0
->constbuf
[s
][i
].user
)
784 nvc0
->constbuf
[s
][i
].u
.buf
= NULL
;
786 if (nvc0
->constbuf
[s
][i
].u
.buf
)
787 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_CB(i
));
789 nvc0
->dirty_cp
|= NVC0_NEW_CP_CONSTBUF
;
791 if (nvc0
->constbuf
[s
][i
].user
)
792 nvc0
->constbuf
[s
][i
].u
.buf
= NULL
;
794 if (nvc0
->constbuf
[s
][i
].u
.buf
)
795 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_CB(s
, i
));
797 nvc0
->dirty_3d
|= NVC0_NEW_3D_CONSTBUF
;
799 nvc0
->constbuf_dirty
[s
] |= 1 << i
;
801 if (nvc0
->constbuf
[s
][i
].u
.buf
)
802 nv04_resource(nvc0
->constbuf
[s
][i
].u
.buf
)->cb_bindings
[s
] &= ~(1 << i
);
803 pipe_resource_reference(&nvc0
->constbuf
[s
][i
].u
.buf
, res
);
805 nvc0
->constbuf
[s
][i
].user
= (cb
&& cb
->user_buffer
) ? true : false;
806 if (nvc0
->constbuf
[s
][i
].user
) {
807 nvc0
->constbuf
[s
][i
].u
.data
= cb
->user_buffer
;
808 nvc0
->constbuf
[s
][i
].size
= MIN2(cb
->buffer_size
, 0x10000);
809 nvc0
->constbuf_valid
[s
] |= 1 << i
;
810 nvc0
->constbuf_coherent
[s
] &= ~(1 << i
);
813 nvc0
->constbuf
[s
][i
].offset
= cb
->buffer_offset
;
814 nvc0
->constbuf
[s
][i
].size
= MIN2(align(cb
->buffer_size
, 0x100), 0x10000);
815 nvc0
->constbuf_valid
[s
] |= 1 << i
;
816 if (res
&& res
->flags
& PIPE_RESOURCE_FLAG_MAP_COHERENT
)
817 nvc0
->constbuf_coherent
[s
] |= 1 << i
;
819 nvc0
->constbuf_coherent
[s
] &= ~(1 << i
);
822 nvc0
->constbuf_valid
[s
] &= ~(1 << i
);
823 nvc0
->constbuf_coherent
[s
] &= ~(1 << i
);
827 /* =============================================================================
831 nvc0_set_blend_color(struct pipe_context
*pipe
,
832 const struct pipe_blend_color
*bcol
)
834 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
836 nvc0
->blend_colour
= *bcol
;
837 nvc0
->dirty_3d
|= NVC0_NEW_3D_BLEND_COLOUR
;
841 nvc0_set_stencil_ref(struct pipe_context
*pipe
,
842 const struct pipe_stencil_ref
*sr
)
844 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
846 nvc0
->stencil_ref
= *sr
;
847 nvc0
->dirty_3d
|= NVC0_NEW_3D_STENCIL_REF
;
851 nvc0_set_clip_state(struct pipe_context
*pipe
,
852 const struct pipe_clip_state
*clip
)
854 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
856 memcpy(nvc0
->clip
.ucp
, clip
->ucp
, sizeof(clip
->ucp
));
858 nvc0
->dirty_3d
|= NVC0_NEW_3D_CLIP
;
862 nvc0_set_sample_mask(struct pipe_context
*pipe
, unsigned sample_mask
)
864 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
866 nvc0
->sample_mask
= sample_mask
;
867 nvc0
->dirty_3d
|= NVC0_NEW_3D_SAMPLE_MASK
;
871 nvc0_set_min_samples(struct pipe_context
*pipe
, unsigned min_samples
)
873 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
875 if (nvc0
->min_samples
!= min_samples
) {
876 nvc0
->min_samples
= min_samples
;
877 nvc0
->dirty_3d
|= NVC0_NEW_3D_MIN_SAMPLES
;
882 nvc0_set_framebuffer_state(struct pipe_context
*pipe
,
883 const struct pipe_framebuffer_state
*fb
)
885 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
887 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_FB
);
889 util_copy_framebuffer_state(&nvc0
->framebuffer
, fb
);
891 nvc0
->dirty_3d
|= NVC0_NEW_3D_FRAMEBUFFER
| NVC0_NEW_3D_SAMPLE_LOCATIONS
|
892 NVC0_NEW_3D_TEXTURES
;
893 nvc0
->dirty_cp
|= NVC0_NEW_CP_TEXTURES
;
897 nvc0_set_sample_locations(struct pipe_context
*pipe
,
898 size_t size
, const uint8_t *locations
)
900 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
902 nvc0
->sample_locations_enabled
= size
&& locations
;
903 if (size
> sizeof(nvc0
->sample_locations
))
904 size
= sizeof(nvc0
->sample_locations
);
905 memcpy(nvc0
->sample_locations
, locations
, size
);
907 nvc0
->dirty_3d
|= NVC0_NEW_3D_SAMPLE_LOCATIONS
;
911 nvc0_set_polygon_stipple(struct pipe_context
*pipe
,
912 const struct pipe_poly_stipple
*stipple
)
914 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
916 nvc0
->stipple
= *stipple
;
917 nvc0
->dirty_3d
|= NVC0_NEW_3D_STIPPLE
;
921 nvc0_set_scissor_states(struct pipe_context
*pipe
,
923 unsigned num_scissors
,
924 const struct pipe_scissor_state
*scissor
)
926 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
929 assert(start_slot
+ num_scissors
<= NVC0_MAX_VIEWPORTS
);
930 for (i
= 0; i
< num_scissors
; i
++) {
931 if (!memcmp(&nvc0
->scissors
[start_slot
+ i
], &scissor
[i
], sizeof(*scissor
)))
933 nvc0
->scissors
[start_slot
+ i
] = scissor
[i
];
934 nvc0
->scissors_dirty
|= 1 << (start_slot
+ i
);
935 nvc0
->dirty_3d
|= NVC0_NEW_3D_SCISSOR
;
940 nvc0_set_viewport_states(struct pipe_context
*pipe
,
942 unsigned num_viewports
,
943 const struct pipe_viewport_state
*vpt
)
945 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
948 assert(start_slot
+ num_viewports
<= NVC0_MAX_VIEWPORTS
);
949 for (i
= 0; i
< num_viewports
; i
++) {
950 if (!memcmp(&nvc0
->viewports
[start_slot
+ i
], &vpt
[i
], sizeof(*vpt
)))
952 nvc0
->viewports
[start_slot
+ i
] = vpt
[i
];
953 nvc0
->viewports_dirty
|= 1 << (start_slot
+ i
);
954 nvc0
->dirty_3d
|= NVC0_NEW_3D_VIEWPORT
;
960 nvc0_set_window_rectangles(struct pipe_context
*pipe
,
962 unsigned num_rectangles
,
963 const struct pipe_scissor_state
*rectangles
)
965 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
967 nvc0
->window_rect
.inclusive
= include
;
968 nvc0
->window_rect
.rects
= MIN2(num_rectangles
, NVC0_MAX_WINDOW_RECTANGLES
);
969 memcpy(nvc0
->window_rect
.rect
, rectangles
,
970 sizeof(struct pipe_scissor_state
) * nvc0
->window_rect
.rects
);
972 nvc0
->dirty_3d
|= NVC0_NEW_3D_WINDOW_RECTS
;
976 nvc0_set_tess_state(struct pipe_context
*pipe
,
977 const float default_tess_outer
[4],
978 const float default_tess_inner
[2])
980 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
982 memcpy(nvc0
->default_tess_outer
, default_tess_outer
, 4 * sizeof(float));
983 memcpy(nvc0
->default_tess_inner
, default_tess_inner
, 2 * sizeof(float));
984 nvc0
->dirty_3d
|= NVC0_NEW_3D_TESSFACTOR
;
988 nvc0_set_vertex_buffers(struct pipe_context
*pipe
,
989 unsigned start_slot
, unsigned count
,
990 const struct pipe_vertex_buffer
*vb
)
992 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
995 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_VTX
);
996 nvc0
->dirty_3d
|= NVC0_NEW_3D_ARRAYS
;
998 util_set_vertex_buffers_count(nvc0
->vtxbuf
, &nvc0
->num_vtxbufs
, vb
,
1002 nvc0
->vbo_user
&= ~(((1ull << count
) - 1) << start_slot
);
1003 nvc0
->constant_vbos
&= ~(((1ull << count
) - 1) << start_slot
);
1004 nvc0
->vtxbufs_coherent
&= ~(((1ull << count
) - 1) << start_slot
);
1008 for (i
= 0; i
< count
; ++i
) {
1009 unsigned dst_index
= start_slot
+ i
;
1011 if (vb
[i
].is_user_buffer
) {
1012 nvc0
->vbo_user
|= 1 << dst_index
;
1013 if (!vb
[i
].stride
&& nvc0
->screen
->eng3d
->oclass
< GM107_3D_CLASS
)
1014 nvc0
->constant_vbos
|= 1 << dst_index
;
1016 nvc0
->constant_vbos
&= ~(1 << dst_index
);
1017 nvc0
->vtxbufs_coherent
&= ~(1 << dst_index
);
1019 nvc0
->vbo_user
&= ~(1 << dst_index
);
1020 nvc0
->constant_vbos
&= ~(1 << dst_index
);
1022 if (vb
[i
].buffer
.resource
&&
1023 vb
[i
].buffer
.resource
->flags
& PIPE_RESOURCE_FLAG_MAP_COHERENT
)
1024 nvc0
->vtxbufs_coherent
|= (1 << dst_index
);
1026 nvc0
->vtxbufs_coherent
&= ~(1 << dst_index
);
1032 nvc0_vertex_state_bind(struct pipe_context
*pipe
, void *hwcso
)
1034 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
1036 nvc0
->vertex
= hwcso
;
1037 nvc0
->dirty_3d
|= NVC0_NEW_3D_VERTEX
;
1040 static struct pipe_stream_output_target
*
1041 nvc0_so_target_create(struct pipe_context
*pipe
,
1042 struct pipe_resource
*res
,
1043 unsigned offset
, unsigned size
)
1045 struct nv04_resource
*buf
= (struct nv04_resource
*)res
;
1046 struct nvc0_so_target
*targ
= MALLOC_STRUCT(nvc0_so_target
);
1050 targ
->pq
= pipe
->create_query(pipe
, NVC0_HW_QUERY_TFB_BUFFER_OFFSET
, 0);
1057 targ
->pipe
.buffer_size
= size
;
1058 targ
->pipe
.buffer_offset
= offset
;
1059 targ
->pipe
.context
= pipe
;
1060 targ
->pipe
.buffer
= NULL
;
1061 pipe_resource_reference(&targ
->pipe
.buffer
, res
);
1062 pipe_reference_init(&targ
->pipe
.reference
, 1);
1064 assert(buf
->base
.target
== PIPE_BUFFER
);
1065 util_range_add(&buf
->base
, &buf
->valid_buffer_range
, offset
, offset
+ size
);
1071 nvc0_so_target_save_offset(struct pipe_context
*pipe
,
1072 struct pipe_stream_output_target
*ptarg
,
1073 unsigned index
, bool *serialize
)
1075 struct nvc0_so_target
*targ
= nvc0_so_target(ptarg
);
1079 PUSH_SPACE(nvc0_context(pipe
)->base
.pushbuf
, 1);
1080 IMMED_NVC0(nvc0_context(pipe
)->base
.pushbuf
, NVC0_3D(SERIALIZE
), 0);
1082 NOUVEAU_DRV_STAT(nouveau_screen(pipe
->screen
), gpu_serialize_count
, 1);
1085 nvc0_query(targ
->pq
)->index
= index
;
1086 pipe
->end_query(pipe
, targ
->pq
);
1090 nvc0_so_target_destroy(struct pipe_context
*pipe
,
1091 struct pipe_stream_output_target
*ptarg
)
1093 struct nvc0_so_target
*targ
= nvc0_so_target(ptarg
);
1094 pipe
->destroy_query(pipe
, targ
->pq
);
1095 pipe_resource_reference(&targ
->pipe
.buffer
, NULL
);
1100 nvc0_set_transform_feedback_targets(struct pipe_context
*pipe
,
1101 unsigned num_targets
,
1102 struct pipe_stream_output_target
**targets
,
1103 const unsigned *offsets
)
1105 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
1107 bool serialize
= true;
1109 assert(num_targets
<= 4);
1111 for (i
= 0; i
< num_targets
; ++i
) {
1112 const bool changed
= nvc0
->tfbbuf
[i
] != targets
[i
];
1113 const bool append
= (offsets
[i
] == ((unsigned)-1));
1114 if (!changed
&& append
)
1116 nvc0
->tfbbuf_dirty
|= 1 << i
;
1118 if (nvc0
->tfbbuf
[i
] && changed
)
1119 nvc0_so_target_save_offset(pipe
, nvc0
->tfbbuf
[i
], i
, &serialize
);
1121 if (targets
[i
] && !append
)
1122 nvc0_so_target(targets
[i
])->clean
= true;
1124 pipe_so_target_reference(&nvc0
->tfbbuf
[i
], targets
[i
]);
1126 for (; i
< nvc0
->num_tfbbufs
; ++i
) {
1127 if (nvc0
->tfbbuf
[i
]) {
1128 nvc0
->tfbbuf_dirty
|= 1 << i
;
1129 nvc0_so_target_save_offset(pipe
, nvc0
->tfbbuf
[i
], i
, &serialize
);
1130 pipe_so_target_reference(&nvc0
->tfbbuf
[i
], NULL
);
1133 nvc0
->num_tfbbufs
= num_targets
;
1135 if (nvc0
->tfbbuf_dirty
) {
1136 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_TFB
);
1137 nvc0
->dirty_3d
|= NVC0_NEW_3D_TFB_TARGETS
;
1142 nvc0_bind_surfaces_range(struct nvc0_context
*nvc0
, const unsigned t
,
1143 unsigned start
, unsigned nr
,
1144 struct pipe_surface
**psurfaces
)
1146 const unsigned end
= start
+ nr
;
1147 const unsigned mask
= ((1 << nr
) - 1) << start
;
1151 for (i
= start
; i
< end
; ++i
) {
1152 const unsigned p
= i
- start
;
1154 nvc0
->surfaces_valid
[t
] |= (1 << i
);
1156 nvc0
->surfaces_valid
[t
] &= ~(1 << i
);
1157 pipe_surface_reference(&nvc0
->surfaces
[t
][i
], psurfaces
[p
]);
1160 for (i
= start
; i
< end
; ++i
)
1161 pipe_surface_reference(&nvc0
->surfaces
[t
][i
], NULL
);
1162 nvc0
->surfaces_valid
[t
] &= ~mask
;
1164 nvc0
->surfaces_dirty
[t
] |= mask
;
1167 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_SUF
);
1169 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_SUF
);
1173 nvc0_set_compute_resources(struct pipe_context
*pipe
,
1174 unsigned start
, unsigned nr
,
1175 struct pipe_surface
**resources
)
1177 nvc0_bind_surfaces_range(nvc0_context(pipe
), 1, start
, nr
, resources
);
1179 nvc0_context(pipe
)->dirty_cp
|= NVC0_NEW_CP_SURFACES
;
1183 nvc0_bind_images_range(struct nvc0_context
*nvc0
, const unsigned s
,
1184 unsigned start
, unsigned nr
,
1185 const struct pipe_image_view
*pimages
)
1187 const unsigned end
= start
+ nr
;
1194 for (i
= start
; i
< end
; ++i
) {
1195 struct pipe_image_view
*img
= &nvc0
->images
[s
][i
];
1196 const unsigned p
= i
- start
;
1198 if (img
->resource
== pimages
[p
].resource
&&
1199 img
->format
== pimages
[p
].format
&&
1200 img
->access
== pimages
[p
].access
) {
1201 if (img
->resource
== NULL
)
1203 if (img
->resource
->target
== PIPE_BUFFER
&&
1204 img
->u
.buf
.offset
== pimages
[p
].u
.buf
.offset
&&
1205 img
->u
.buf
.size
== pimages
[p
].u
.buf
.size
)
1207 if (img
->resource
->target
!= PIPE_BUFFER
&&
1208 img
->u
.tex
.first_layer
== pimages
[p
].u
.tex
.first_layer
&&
1209 img
->u
.tex
.last_layer
== pimages
[p
].u
.tex
.last_layer
&&
1210 img
->u
.tex
.level
== pimages
[p
].u
.tex
.level
)
1215 if (pimages
[p
].resource
)
1216 nvc0
->images_valid
[s
] |= (1 << i
);
1218 nvc0
->images_valid
[s
] &= ~(1 << i
);
1220 img
->format
= pimages
[p
].format
;
1221 img
->access
= pimages
[p
].access
;
1222 if (pimages
[p
].resource
&& pimages
[p
].resource
->target
== PIPE_BUFFER
)
1223 img
->u
.buf
= pimages
[p
].u
.buf
;
1225 img
->u
.tex
= pimages
[p
].u
.tex
;
1227 pipe_resource_reference(
1228 &img
->resource
, pimages
[p
].resource
);
1230 if (nvc0
->screen
->base
.class_3d
>= GM107_3D_CLASS
) {
1231 if (nvc0
->images_tic
[s
][i
]) {
1232 struct nv50_tic_entry
*old
=
1233 nv50_tic_entry(nvc0
->images_tic
[s
][i
]);
1234 nvc0_screen_tic_unlock(nvc0
->screen
, old
);
1235 pipe_sampler_view_reference(&nvc0
->images_tic
[s
][i
], NULL
);
1238 nvc0
->images_tic
[s
][i
] =
1239 gm107_create_texture_view_from_image(&nvc0
->base
.pipe
,
1246 mask
= ((1 << nr
) - 1) << start
;
1247 if (!(nvc0
->images_valid
[s
] & mask
))
1249 for (i
= start
; i
< end
; ++i
) {
1250 pipe_resource_reference(&nvc0
->images
[s
][i
].resource
, NULL
);
1251 if (nvc0
->screen
->base
.class_3d
>= GM107_3D_CLASS
) {
1252 struct nv50_tic_entry
*old
= nv50_tic_entry(nvc0
->images_tic
[s
][i
]);
1254 nvc0_screen_tic_unlock(nvc0
->screen
, old
);
1255 pipe_sampler_view_reference(&nvc0
->images_tic
[s
][i
], NULL
);
1259 nvc0
->images_valid
[s
] &= ~mask
;
1261 nvc0
->images_dirty
[s
] |= mask
;
1264 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_SUF
);
1266 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_SUF
);
1272 nvc0_set_shader_images(struct pipe_context
*pipe
,
1273 enum pipe_shader_type shader
,
1274 unsigned start
, unsigned nr
,
1275 const struct pipe_image_view
*images
)
1277 const unsigned s
= nvc0_shader_stage(shader
);
1278 if (!nvc0_bind_images_range(nvc0_context(pipe
), s
, start
, nr
, images
))
1282 nvc0_context(pipe
)->dirty_cp
|= NVC0_NEW_CP_SURFACES
;
1284 nvc0_context(pipe
)->dirty_3d
|= NVC0_NEW_3D_SURFACES
;
1288 nvc0_bind_buffers_range(struct nvc0_context
*nvc0
, const unsigned t
,
1289 unsigned start
, unsigned nr
,
1290 const struct pipe_shader_buffer
*pbuffers
)
1292 const unsigned end
= start
+ nr
;
1299 for (i
= start
; i
< end
; ++i
) {
1300 struct pipe_shader_buffer
*buf
= &nvc0
->buffers
[t
][i
];
1301 const unsigned p
= i
- start
;
1302 if (buf
->buffer
== pbuffers
[p
].buffer
&&
1303 buf
->buffer_offset
== pbuffers
[p
].buffer_offset
&&
1304 buf
->buffer_size
== pbuffers
[p
].buffer_size
)
1308 if (pbuffers
[p
].buffer
)
1309 nvc0
->buffers_valid
[t
] |= (1 << i
);
1311 nvc0
->buffers_valid
[t
] &= ~(1 << i
);
1312 buf
->buffer_offset
= pbuffers
[p
].buffer_offset
;
1313 buf
->buffer_size
= pbuffers
[p
].buffer_size
;
1314 pipe_resource_reference(&buf
->buffer
, pbuffers
[p
].buffer
);
1319 mask
= ((1 << nr
) - 1) << start
;
1320 if (!(nvc0
->buffers_valid
[t
] & mask
))
1322 for (i
= start
; i
< end
; ++i
)
1323 pipe_resource_reference(&nvc0
->buffers
[t
][i
].buffer
, NULL
);
1324 nvc0
->buffers_valid
[t
] &= ~mask
;
1326 nvc0
->buffers_dirty
[t
] |= mask
;
1329 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_BUF
);
1331 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_BUF
);
1337 nvc0_set_shader_buffers(struct pipe_context
*pipe
,
1338 enum pipe_shader_type shader
,
1339 unsigned start
, unsigned nr
,
1340 const struct pipe_shader_buffer
*buffers
,
1341 unsigned writable_bitmask
)
1343 const unsigned s
= nvc0_shader_stage(shader
);
1344 if (!nvc0_bind_buffers_range(nvc0_context(pipe
), s
, start
, nr
, buffers
))
1348 nvc0_context(pipe
)->dirty_cp
|= NVC0_NEW_CP_BUFFERS
;
1350 nvc0_context(pipe
)->dirty_3d
|= NVC0_NEW_3D_BUFFERS
;
1354 nvc0_set_global_handle(uint32_t *phandle
, struct pipe_resource
*res
)
1356 struct nv04_resource
*buf
= nv04_resource(res
);
1358 uint64_t limit
= (buf
->address
+ buf
->base
.width0
) - 1;
1359 if (limit
< (1ULL << 32)) {
1360 *phandle
= (uint32_t)buf
->address
;
1362 NOUVEAU_ERR("Cannot map into TGSI_RESOURCE_GLOBAL: "
1363 "resource not contained within 32-bit address space !\n");
1372 nvc0_set_global_bindings(struct pipe_context
*pipe
,
1373 unsigned start
, unsigned nr
,
1374 struct pipe_resource
**resources
,
1377 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
1378 struct pipe_resource
**ptr
;
1380 const unsigned end
= start
+ nr
;
1385 if (nvc0
->global_residents
.size
<= (end
* sizeof(struct pipe_resource
*))) {
1386 const unsigned old_size
= nvc0
->global_residents
.size
;
1387 if (util_dynarray_resize(&nvc0
->global_residents
, struct pipe_resource
*, end
)) {
1388 memset((uint8_t *)nvc0
->global_residents
.data
+ old_size
, 0,
1389 nvc0
->global_residents
.size
- old_size
);
1391 NOUVEAU_ERR("Could not resize global residents array\n");
1397 ptr
= util_dynarray_element(
1398 &nvc0
->global_residents
, struct pipe_resource
*, start
);
1399 for (i
= 0; i
< nr
; ++i
) {
1400 pipe_resource_reference(&ptr
[i
], resources
[i
]);
1401 nvc0_set_global_handle(handles
[i
], resources
[i
]);
1404 ptr
= util_dynarray_element(
1405 &nvc0
->global_residents
, struct pipe_resource
*, start
);
1406 for (i
= 0; i
< nr
; ++i
)
1407 pipe_resource_reference(&ptr
[i
], NULL
);
1410 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_GLOBAL
);
1412 nvc0
->dirty_cp
|= NVC0_NEW_CP_GLOBALS
;
1416 nvc0_init_state_functions(struct nvc0_context
*nvc0
)
1418 struct pipe_context
*pipe
= &nvc0
->base
.pipe
;
1420 pipe
->create_blend_state
= nvc0_blend_state_create
;
1421 pipe
->bind_blend_state
= nvc0_blend_state_bind
;
1422 pipe
->delete_blend_state
= nvc0_blend_state_delete
;
1424 pipe
->create_rasterizer_state
= nvc0_rasterizer_state_create
;
1425 pipe
->bind_rasterizer_state
= nvc0_rasterizer_state_bind
;
1426 pipe
->delete_rasterizer_state
= nvc0_rasterizer_state_delete
;
1428 pipe
->create_depth_stencil_alpha_state
= nvc0_zsa_state_create
;
1429 pipe
->bind_depth_stencil_alpha_state
= nvc0_zsa_state_bind
;
1430 pipe
->delete_depth_stencil_alpha_state
= nvc0_zsa_state_delete
;
1432 pipe
->create_sampler_state
= nv50_sampler_state_create
;
1433 pipe
->delete_sampler_state
= nvc0_sampler_state_delete
;
1434 pipe
->bind_sampler_states
= nvc0_bind_sampler_states
;
1436 pipe
->create_sampler_view
= nvc0_create_sampler_view
;
1437 pipe
->sampler_view_destroy
= nvc0_sampler_view_destroy
;
1438 pipe
->set_sampler_views
= nvc0_set_sampler_views
;
1440 pipe
->create_vs_state
= nvc0_vp_state_create
;
1441 pipe
->create_fs_state
= nvc0_fp_state_create
;
1442 pipe
->create_gs_state
= nvc0_gp_state_create
;
1443 pipe
->create_tcs_state
= nvc0_tcp_state_create
;
1444 pipe
->create_tes_state
= nvc0_tep_state_create
;
1445 pipe
->bind_vs_state
= nvc0_vp_state_bind
;
1446 pipe
->bind_fs_state
= nvc0_fp_state_bind
;
1447 pipe
->bind_gs_state
= nvc0_gp_state_bind
;
1448 pipe
->bind_tcs_state
= nvc0_tcp_state_bind
;
1449 pipe
->bind_tes_state
= nvc0_tep_state_bind
;
1450 pipe
->delete_vs_state
= nvc0_sp_state_delete
;
1451 pipe
->delete_fs_state
= nvc0_sp_state_delete
;
1452 pipe
->delete_gs_state
= nvc0_sp_state_delete
;
1453 pipe
->delete_tcs_state
= nvc0_sp_state_delete
;
1454 pipe
->delete_tes_state
= nvc0_sp_state_delete
;
1456 pipe
->create_compute_state
= nvc0_cp_state_create
;
1457 pipe
->bind_compute_state
= nvc0_cp_state_bind
;
1458 pipe
->delete_compute_state
= nvc0_sp_state_delete
;
1460 pipe
->set_blend_color
= nvc0_set_blend_color
;
1461 pipe
->set_stencil_ref
= nvc0_set_stencil_ref
;
1462 pipe
->set_clip_state
= nvc0_set_clip_state
;
1463 pipe
->set_sample_mask
= nvc0_set_sample_mask
;
1464 pipe
->set_min_samples
= nvc0_set_min_samples
;
1465 pipe
->set_constant_buffer
= nvc0_set_constant_buffer
;
1466 pipe
->set_framebuffer_state
= nvc0_set_framebuffer_state
;
1467 pipe
->set_sample_locations
= nvc0_set_sample_locations
;
1468 pipe
->set_polygon_stipple
= nvc0_set_polygon_stipple
;
1469 pipe
->set_scissor_states
= nvc0_set_scissor_states
;
1470 pipe
->set_viewport_states
= nvc0_set_viewport_states
;
1471 pipe
->set_window_rectangles
= nvc0_set_window_rectangles
;
1472 pipe
->set_tess_state
= nvc0_set_tess_state
;
1474 pipe
->create_vertex_elements_state
= nvc0_vertex_state_create
;
1475 pipe
->delete_vertex_elements_state
= nvc0_vertex_state_delete
;
1476 pipe
->bind_vertex_elements_state
= nvc0_vertex_state_bind
;
1478 pipe
->set_vertex_buffers
= nvc0_set_vertex_buffers
;
1480 pipe
->create_stream_output_target
= nvc0_so_target_create
;
1481 pipe
->stream_output_target_destroy
= nvc0_so_target_destroy
;
1482 pipe
->set_stream_output_targets
= nvc0_set_transform_feedback_targets
;
1484 pipe
->set_global_binding
= nvc0_set_global_bindings
;
1485 pipe
->set_compute_resources
= nvc0_set_compute_resources
;
1486 pipe
->set_shader_images
= nvc0_set_shader_images
;
1487 pipe
->set_shader_buffers
= nvc0_set_shader_buffers
;
1489 nvc0
->sample_mask
= ~0;
1490 nvc0
->min_samples
= 1;
1491 nvc0
->default_tess_outer
[0] =
1492 nvc0
->default_tess_outer
[1] =
1493 nvc0
->default_tess_outer
[2] =
1494 nvc0
->default_tess_outer
[3] = 1.0;
1495 nvc0
->default_tess_inner
[0] =
1496 nvc0
->default_tess_inner
[1] = 1.0;