2 * Copyright 2010 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "pipe/p_defines.h"
24 #include "util/u_framebuffer.h"
26 #include "nv50/nv50_context.h"
27 #include "nv50/nv50_screen.h"
28 #include "nv50/nv50_resource.h"
31 nv50_flush(struct pipe_context
*pipe
,
32 struct pipe_fence_handle
**fence
,
35 struct nouveau_screen
*screen
= nouveau_screen(pipe
->screen
);
38 nouveau_fence_ref(screen
->fence
.current
, (struct nouveau_fence
**)fence
);
40 PUSH_KICK(screen
->pushbuf
);
42 nouveau_context_update_frame_stats(nouveau_context(pipe
));
46 nv50_texture_barrier(struct pipe_context
*pipe
)
48 struct nouveau_pushbuf
*push
= nv50_context(pipe
)->base
.pushbuf
;
50 BEGIN_NV04(push
, SUBC_3D(NV50_GRAPH_SERIALIZE
), 1);
52 BEGIN_NV04(push
, NV50_3D(TEX_CACHE_CTL
), 1);
53 PUSH_DATA (push
, 0x20);
57 nv50_memory_barrier(struct pipe_context
*pipe
, unsigned flags
)
59 struct nv50_context
*nv50
= nv50_context(pipe
);
62 if (flags
& PIPE_BARRIER_MAPPED_BUFFER
) {
63 for (i
= 0; i
< nv50
->num_vtxbufs
; ++i
) {
64 if (!nv50
->vtxbuf
[i
].buffer
)
66 if (nv50
->vtxbuf
[i
].buffer
->flags
& PIPE_RESOURCE_FLAG_MAP_PERSISTENT
)
67 nv50
->base
.vbo_dirty
= true;
70 if (nv50
->idxbuf
.buffer
&&
71 nv50
->idxbuf
.buffer
->flags
& PIPE_RESOURCE_FLAG_MAP_PERSISTENT
)
72 nv50
->base
.vbo_dirty
= true;
74 for (s
= 0; s
< 3 && !nv50
->cb_dirty
; ++s
) {
75 uint32_t valid
= nv50
->constbuf_valid
[s
];
77 while (valid
&& !nv50
->cb_dirty
) {
78 const unsigned i
= ffs(valid
) - 1;
79 struct pipe_resource
*res
;
82 if (nv50
->constbuf
[s
][i
].user
)
85 res
= nv50
->constbuf
[s
][i
].u
.buf
;
89 if (res
->flags
& PIPE_RESOURCE_FLAG_MAP_PERSISTENT
)
90 nv50
->cb_dirty
= true;
97 nv50_default_kick_notify(struct nouveau_pushbuf
*push
)
99 struct nv50_screen
*screen
= push
->user_priv
;
102 nouveau_fence_next(&screen
->base
);
103 nouveau_fence_update(&screen
->base
, true);
105 screen
->cur_ctx
->state
.flushed
= true;
110 nv50_context_unreference_resources(struct nv50_context
*nv50
)
114 nouveau_bufctx_del(&nv50
->bufctx_3d
);
115 nouveau_bufctx_del(&nv50
->bufctx
);
116 nouveau_bufctx_del(&nv50
->bufctx_cp
);
118 util_unreference_framebuffer_state(&nv50
->framebuffer
);
120 assert(nv50
->num_vtxbufs
<= PIPE_MAX_ATTRIBS
);
121 for (i
= 0; i
< nv50
->num_vtxbufs
; ++i
)
122 pipe_resource_reference(&nv50
->vtxbuf
[i
].buffer
, NULL
);
124 pipe_resource_reference(&nv50
->idxbuf
.buffer
, NULL
);
126 for (s
= 0; s
< 3; ++s
) {
127 assert(nv50
->num_textures
[s
] <= PIPE_MAX_SAMPLERS
);
128 for (i
= 0; i
< nv50
->num_textures
[s
]; ++i
)
129 pipe_sampler_view_reference(&nv50
->textures
[s
][i
], NULL
);
131 for (i
= 0; i
< NV50_MAX_PIPE_CONSTBUFS
; ++i
)
132 if (!nv50
->constbuf
[s
][i
].user
)
133 pipe_resource_reference(&nv50
->constbuf
[s
][i
].u
.buf
, NULL
);
136 for (i
= 0; i
< nv50
->global_residents
.size
/ sizeof(struct pipe_resource
*);
138 struct pipe_resource
**res
= util_dynarray_element(
139 &nv50
->global_residents
, struct pipe_resource
*, i
);
140 pipe_resource_reference(res
, NULL
);
142 util_dynarray_fini(&nv50
->global_residents
);
146 nv50_destroy(struct pipe_context
*pipe
)
148 struct nv50_context
*nv50
= nv50_context(pipe
);
150 if (nv50
->screen
->cur_ctx
== nv50
) {
151 nv50
->screen
->cur_ctx
= NULL
;
152 /* Save off the state in case another context gets created */
153 nv50
->screen
->save_state
= nv50
->state
;
155 nouveau_pushbuf_bufctx(nv50
->base
.pushbuf
, NULL
);
156 nouveau_pushbuf_kick(nv50
->base
.pushbuf
, nv50
->base
.pushbuf
->channel
);
158 nv50_context_unreference_resources(nv50
);
162 nouveau_context_destroy(&nv50
->base
);
166 nv50_invalidate_resource_storage(struct nouveau_context
*ctx
,
167 struct pipe_resource
*res
,
170 struct nv50_context
*nv50
= nv50_context(&ctx
->pipe
);
171 unsigned bind
= res
->bind
? res
->bind
: PIPE_BIND_VERTEX_BUFFER
;
174 if (bind
& PIPE_BIND_RENDER_TARGET
) {
175 assert(nv50
->framebuffer
.nr_cbufs
<= PIPE_MAX_COLOR_BUFS
);
176 for (i
= 0; i
< nv50
->framebuffer
.nr_cbufs
; ++i
) {
177 if (nv50
->framebuffer
.cbufs
[i
] &&
178 nv50
->framebuffer
.cbufs
[i
]->texture
== res
) {
179 nv50
->dirty
|= NV50_NEW_FRAMEBUFFER
;
180 nouveau_bufctx_reset(nv50
->bufctx_3d
, NV50_BIND_FB
);
186 if (bind
& PIPE_BIND_DEPTH_STENCIL
) {
187 if (nv50
->framebuffer
.zsbuf
&&
188 nv50
->framebuffer
.zsbuf
->texture
== res
) {
189 nv50
->dirty
|= NV50_NEW_FRAMEBUFFER
;
190 nouveau_bufctx_reset(nv50
->bufctx_3d
, NV50_BIND_FB
);
196 if (bind
& (PIPE_BIND_VERTEX_BUFFER
|
197 PIPE_BIND_INDEX_BUFFER
|
198 PIPE_BIND_CONSTANT_BUFFER
|
199 PIPE_BIND_STREAM_OUTPUT
|
200 PIPE_BIND_SAMPLER_VIEW
)) {
202 assert(nv50
->num_vtxbufs
<= PIPE_MAX_ATTRIBS
);
203 for (i
= 0; i
< nv50
->num_vtxbufs
; ++i
) {
204 if (nv50
->vtxbuf
[i
].buffer
== res
) {
205 nv50
->dirty
|= NV50_NEW_ARRAYS
;
206 nouveau_bufctx_reset(nv50
->bufctx_3d
, NV50_BIND_VERTEX
);
212 if (nv50
->idxbuf
.buffer
== res
) {
213 /* Just rebind to the bufctx as there is no separate dirty bit */
214 nouveau_bufctx_reset(nv50
->bufctx_3d
, NV50_BIND_INDEX
);
215 BCTX_REFN(nv50
->bufctx_3d
, INDEX
, nv04_resource(res
), RD
);
220 for (s
= 0; s
< 3; ++s
) {
221 assert(nv50
->num_textures
[s
] <= PIPE_MAX_SAMPLERS
);
222 for (i
= 0; i
< nv50
->num_textures
[s
]; ++i
) {
223 if (nv50
->textures
[s
][i
] &&
224 nv50
->textures
[s
][i
]->texture
== res
) {
225 nv50
->dirty
|= NV50_NEW_TEXTURES
;
226 nouveau_bufctx_reset(nv50
->bufctx_3d
, NV50_BIND_TEXTURES
);
233 for (s
= 0; s
< 3; ++s
) {
234 for (i
= 0; i
< NV50_MAX_PIPE_CONSTBUFS
; ++i
) {
235 if (!(nv50
->constbuf_valid
[s
] & (1 << i
)))
237 if (!nv50
->constbuf
[s
][i
].user
&&
238 nv50
->constbuf
[s
][i
].u
.buf
== res
) {
239 nv50
->dirty
|= NV50_NEW_CONSTBUF
;
240 nv50
->constbuf_dirty
[s
] |= 1 << i
;
241 nouveau_bufctx_reset(nv50
->bufctx_3d
, NV50_BIND_CB(s
, i
));
253 nv50_context_get_sample_position(struct pipe_context
*, unsigned, unsigned,
256 struct pipe_context
*
257 nv50_create(struct pipe_screen
*pscreen
, void *priv
, unsigned ctxflags
)
259 struct nv50_screen
*screen
= nv50_screen(pscreen
);
260 struct nv50_context
*nv50
;
261 struct pipe_context
*pipe
;
265 nv50
= CALLOC_STRUCT(nv50_context
);
268 pipe
= &nv50
->base
.pipe
;
270 if (!nv50_blitctx_create(nv50
))
273 nv50
->base
.pushbuf
= screen
->base
.pushbuf
;
274 nv50
->base
.client
= screen
->base
.client
;
276 ret
= nouveau_bufctx_new(screen
->base
.client
, 2, &nv50
->bufctx
);
278 ret
= nouveau_bufctx_new(screen
->base
.client
, NV50_BIND_3D_COUNT
,
281 ret
= nouveau_bufctx_new(screen
->base
.client
, NV50_BIND_CP_COUNT
,
286 nv50
->base
.screen
= &screen
->base
;
287 nv50
->base
.copy_data
= nv50_m2mf_copy_linear
;
288 nv50
->base
.push_data
= nv50_sifc_linear_u8
;
289 /* FIXME: Make it possible to use this again. The problem is that there is
290 * some clever logic in the card that allows for multiple renders to happen
291 * when there are only constbuf changes. However that relies on the
292 * constbuf updates happening to the right constbuf slots. Currently
293 * implementation just makes it go through a separate slot which doesn't
294 * properly update the right constbuf data.
295 nv50->base.push_cb = nv50_cb_push;
298 nv50
->screen
= screen
;
299 pipe
->screen
= pscreen
;
302 pipe
->destroy
= nv50_destroy
;
304 pipe
->draw_vbo
= nv50_draw_vbo
;
305 pipe
->clear
= nv50_clear
;
306 pipe
->launch_grid
= nv50_launch_grid
;
308 pipe
->flush
= nv50_flush
;
309 pipe
->texture_barrier
= nv50_texture_barrier
;
310 pipe
->memory_barrier
= nv50_memory_barrier
;
311 pipe
->get_sample_position
= nv50_context_get_sample_position
;
313 if (!screen
->cur_ctx
) {
314 /* Restore the last context's state here, normally handled during
317 nv50
->state
= screen
->save_state
;
318 screen
->cur_ctx
= nv50
;
319 nouveau_pushbuf_bufctx(screen
->base
.pushbuf
, nv50
->bufctx
);
321 nv50
->base
.pushbuf
->kick_notify
= nv50_default_kick_notify
;
323 nouveau_context_init(&nv50
->base
);
324 nv50_init_query_functions(nv50
);
325 nv50_init_surface_functions(nv50
);
326 nv50_init_state_functions(nv50
);
327 nv50_init_resource_functions(pipe
);
329 nv50
->base
.invalidate_resource_storage
= nv50_invalidate_resource_storage
;
331 if (screen
->base
.device
->chipset
< 0x84 ||
332 debug_get_bool_option("NOUVEAU_PMPEG", false)) {
334 nouveau_context_init_vdec(&nv50
->base
);
335 } else if (screen
->base
.device
->chipset
< 0x98 ||
336 screen
->base
.device
->chipset
== 0xa0) {
338 pipe
->create_video_codec
= nv84_create_decoder
;
339 pipe
->create_video_buffer
= nv84_video_buffer_create
;
342 pipe
->create_video_codec
= nv98_create_decoder
;
343 pipe
->create_video_buffer
= nv98_video_buffer_create
;
346 flags
= NOUVEAU_BO_VRAM
| NOUVEAU_BO_RD
;
348 BCTX_REFN_bo(nv50
->bufctx_3d
, SCREEN
, flags
, screen
->code
);
349 BCTX_REFN_bo(nv50
->bufctx_3d
, SCREEN
, flags
, screen
->uniforms
);
350 BCTX_REFN_bo(nv50
->bufctx_3d
, SCREEN
, flags
, screen
->txc
);
351 BCTX_REFN_bo(nv50
->bufctx_3d
, SCREEN
, flags
, screen
->stack_bo
);
352 if (screen
->compute
) {
353 BCTX_REFN_bo(nv50
->bufctx_cp
, CP_SCREEN
, flags
, screen
->code
);
354 BCTX_REFN_bo(nv50
->bufctx_cp
, CP_SCREEN
, flags
, screen
->txc
);
355 BCTX_REFN_bo(nv50
->bufctx_cp
, CP_SCREEN
, flags
, screen
->stack_bo
);
358 flags
= NOUVEAU_BO_GART
| NOUVEAU_BO_WR
;
360 BCTX_REFN_bo(nv50
->bufctx_3d
, SCREEN
, flags
, screen
->fence
.bo
);
361 BCTX_REFN_bo(nv50
->bufctx
, FENCE
, flags
, screen
->fence
.bo
);
363 BCTX_REFN_bo(nv50
->bufctx_cp
, CP_SCREEN
, flags
, screen
->fence
.bo
);
365 nv50
->base
.scratch
.bo_size
= 2 << 20;
367 util_dynarray_init(&nv50
->global_residents
);
373 nouveau_bufctx_del(&nv50
->bufctx_3d
);
375 nouveau_bufctx_del(&nv50
->bufctx_cp
);
377 nouveau_bufctx_del(&nv50
->bufctx
);
384 nv50_bufctx_fence(struct nouveau_bufctx
*bufctx
, bool on_flush
)
386 struct nouveau_list
*list
= on_flush
? &bufctx
->current
: &bufctx
->pending
;
387 struct nouveau_list
*it
;
389 for (it
= list
->next
; it
!= list
; it
= it
->next
) {
390 struct nouveau_bufref
*ref
= (struct nouveau_bufref
*)it
;
391 struct nv04_resource
*res
= ref
->priv
;
393 nv50_resource_validate(res
, (unsigned)ref
->priv_data
);
398 nv50_context_get_sample_position(struct pipe_context
*pipe
,
399 unsigned sample_count
, unsigned sample_index
,
402 static const uint8_t ms1
[1][2] = { { 0x8, 0x8 } };
403 static const uint8_t ms2
[2][2] = {
404 { 0x4, 0x4 }, { 0xc, 0xc } }; /* surface coords (0,0), (1,0) */
405 static const uint8_t ms4
[4][2] = {
406 { 0x6, 0x2 }, { 0xe, 0x6 }, /* (0,0), (1,0) */
407 { 0x2, 0xa }, { 0xa, 0xe } }; /* (0,1), (1,1) */
408 static const uint8_t ms8
[8][2] = {
409 { 0x1, 0x7 }, { 0x5, 0x3 }, /* (0,0), (1,0) */
410 { 0x3, 0xd }, { 0x7, 0xb }, /* (0,1), (1,1) */
411 { 0x9, 0x5 }, { 0xf, 0x1 }, /* (2,0), (3,0) */
412 { 0xb, 0xf }, { 0xd, 0x9 } }; /* (2,1), (3,1) */
414 /* NOTE: there are alternative modes for MS2 and MS8, currently not used */
415 static const uint8_t ms8_alt
[8][2] = {
416 { 0x9, 0x5 }, { 0x7, 0xb }, /* (2,0), (1,1) */
417 { 0xd, 0x9 }, { 0x5, 0x3 }, /* (3,1), (1,0) */
418 { 0x3, 0xd }, { 0x1, 0x7 }, /* (0,1), (0,0) */
419 { 0xb, 0xf }, { 0xf, 0x1 } }; /* (2,1), (3,0) */
422 const uint8_t (*ptr
)[2];
424 switch (sample_count
) {
426 case 1: ptr
= ms1
; break;
427 case 2: ptr
= ms2
; break;
428 case 4: ptr
= ms4
; break;
429 case 8: ptr
= ms8
; break;
432 return; /* bad sample count -> undefined locations */
434 xy
[0] = ptr
[sample_index
][0] * 0.0625f
;
435 xy
[1] = ptr
[sample_index
][1] * 0.0625f
;