2 * Copyright 2010 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "pipe/p_defines.h"
24 #include "util/u_framebuffer.h"
25 #include "util/u_upload_mgr.h"
27 #include "nvc0/nvc0_context.h"
28 #include "nvc0/nvc0_screen.h"
29 #include "nvc0/nvc0_resource.h"
32 nvc0_flush(struct pipe_context
*pipe
,
33 struct pipe_fence_handle
**fence
,
36 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
37 struct nouveau_screen
*screen
= &nvc0
->screen
->base
;
40 nouveau_fence_ref(screen
->fence
.current
, (struct nouveau_fence
**)fence
);
42 PUSH_KICK(nvc0
->base
.pushbuf
); /* fencing handled in kick_notify */
44 nouveau_context_update_frame_stats(&nvc0
->base
);
48 nvc0_texture_barrier(struct pipe_context
*pipe
, unsigned flags
)
50 struct nouveau_pushbuf
*push
= nvc0_context(pipe
)->base
.pushbuf
;
52 IMMED_NVC0(push
, NVC0_3D(SERIALIZE
), 0);
53 IMMED_NVC0(push
, NVC0_3D(TEX_CACHE_CTL
), 0);
57 nvc0_memory_barrier(struct pipe_context
*pipe
, unsigned flags
)
59 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
60 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
63 if (!(flags
& ~PIPE_BARRIER_UPDATE
))
66 if (flags
& PIPE_BARRIER_MAPPED_BUFFER
) {
67 for (i
= 0; i
< nvc0
->num_vtxbufs
; ++i
) {
68 if (!nvc0
->vtxbuf
[i
].buffer
.resource
&& !nvc0
->vtxbuf
[i
].is_user_buffer
)
70 if (nvc0
->vtxbuf
[i
].buffer
.resource
->flags
& PIPE_RESOURCE_FLAG_MAP_PERSISTENT
)
71 nvc0
->base
.vbo_dirty
= true;
74 for (s
= 0; s
< 5 && !nvc0
->cb_dirty
; ++s
) {
75 uint32_t valid
= nvc0
->constbuf_valid
[s
];
77 while (valid
&& !nvc0
->cb_dirty
) {
78 const unsigned i
= ffs(valid
) - 1;
79 struct pipe_resource
*res
;
82 if (nvc0
->constbuf
[s
][i
].user
)
85 res
= nvc0
->constbuf
[s
][i
].u
.buf
;
89 if (res
->flags
& PIPE_RESOURCE_FLAG_MAP_PERSISTENT
)
90 nvc0
->cb_dirty
= true;
94 /* Pretty much any writing by shaders needs a serialize after
95 * it. Especially when moving between 3d and compute pipelines, but even
98 IMMED_NVC0(push
, NVC0_3D(SERIALIZE
), 0);
101 /* If we're going to texture from a buffer/image written by a shader, we
102 * must flush the texture cache.
104 if (flags
& PIPE_BARRIER_TEXTURE
)
105 IMMED_NVC0(push
, NVC0_3D(TEX_CACHE_CTL
), 0);
107 if (flags
& PIPE_BARRIER_CONSTANT_BUFFER
)
108 nvc0
->cb_dirty
= true;
109 if (flags
& (PIPE_BARRIER_VERTEX_BUFFER
| PIPE_BARRIER_INDEX_BUFFER
))
110 nvc0
->base
.vbo_dirty
= true;
114 nvc0_emit_string_marker(struct pipe_context
*pipe
, const char *str
, int len
)
116 struct nouveau_pushbuf
*push
= nvc0_context(pipe
)->base
.pushbuf
;
117 int string_words
= len
/ 4;
122 string_words
= MIN2(string_words
, NV04_PFIFO_MAX_PACKET_LEN
);
123 if (string_words
== NV04_PFIFO_MAX_PACKET_LEN
)
124 data_words
= string_words
;
126 data_words
= string_words
+ !!(len
& 3);
127 BEGIN_NIC0(push
, SUBC_3D(NV04_GRAPH_NOP
), data_words
);
129 PUSH_DATAp(push
, str
, string_words
);
130 if (string_words
!= data_words
) {
132 memcpy(&data
, &str
[string_words
* 4], len
& 3);
133 PUSH_DATA (push
, data
);
137 static enum pipe_reset_status
138 nvc0_get_device_reset_status(struct pipe_context
*pipe
)
140 return PIPE_NO_RESET
;
144 nvc0_context_unreference_resources(struct nvc0_context
*nvc0
)
148 nouveau_bufctx_del(&nvc0
->bufctx_3d
);
149 nouveau_bufctx_del(&nvc0
->bufctx
);
150 nouveau_bufctx_del(&nvc0
->bufctx_cp
);
152 util_unreference_framebuffer_state(&nvc0
->framebuffer
);
154 for (i
= 0; i
< nvc0
->num_vtxbufs
; ++i
)
155 pipe_vertex_buffer_unreference(&nvc0
->vtxbuf
[i
]);
157 for (s
= 0; s
< 6; ++s
) {
158 for (i
= 0; i
< nvc0
->num_textures
[s
]; ++i
)
159 pipe_sampler_view_reference(&nvc0
->textures
[s
][i
], NULL
);
161 for (i
= 0; i
< NVC0_MAX_PIPE_CONSTBUFS
; ++i
)
162 if (!nvc0
->constbuf
[s
][i
].user
)
163 pipe_resource_reference(&nvc0
->constbuf
[s
][i
].u
.buf
, NULL
);
165 for (i
= 0; i
< NVC0_MAX_BUFFERS
; ++i
)
166 pipe_resource_reference(&nvc0
->buffers
[s
][i
].buffer
, NULL
);
168 for (i
= 0; i
< NVC0_MAX_IMAGES
; ++i
) {
169 pipe_resource_reference(&nvc0
->images
[s
][i
].resource
, NULL
);
170 if (nvc0
->screen
->base
.class_3d
>= GM107_3D_CLASS
)
171 pipe_sampler_view_reference(&nvc0
->images_tic
[s
][i
], NULL
);
175 for (s
= 0; s
< 2; ++s
) {
176 for (i
= 0; i
< NVC0_MAX_SURFACE_SLOTS
; ++i
)
177 pipe_surface_reference(&nvc0
->surfaces
[s
][i
], NULL
);
180 for (i
= 0; i
< nvc0
->num_tfbbufs
; ++i
)
181 pipe_so_target_reference(&nvc0
->tfbbuf
[i
], NULL
);
183 for (i
= 0; i
< nvc0
->global_residents
.size
/ sizeof(struct pipe_resource
*);
185 struct pipe_resource
**res
= util_dynarray_element(
186 &nvc0
->global_residents
, struct pipe_resource
*, i
);
187 pipe_resource_reference(res
, NULL
);
189 util_dynarray_fini(&nvc0
->global_residents
);
192 nvc0
->base
.pipe
.delete_tcs_state(&nvc0
->base
.pipe
, nvc0
->tcp_empty
);
196 nvc0_destroy(struct pipe_context
*pipe
)
198 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
200 if (nvc0
->screen
->cur_ctx
== nvc0
) {
201 nvc0
->screen
->cur_ctx
= NULL
;
202 nvc0
->screen
->save_state
= nvc0
->state
;
203 nvc0
->screen
->save_state
.tfb
= NULL
;
206 if (nvc0
->base
.pipe
.stream_uploader
)
207 u_upload_destroy(nvc0
->base
.pipe
.stream_uploader
);
209 /* Unset bufctx, we don't want to revalidate any resources after the flush.
210 * Other contexts will always set their bufctx again on action calls.
212 nouveau_pushbuf_bufctx(nvc0
->base
.pushbuf
, NULL
);
213 nouveau_pushbuf_kick(nvc0
->base
.pushbuf
, nvc0
->base
.pushbuf
->channel
);
215 nvc0_context_unreference_resources(nvc0
);
216 nvc0_blitctx_destroy(nvc0
);
218 list_for_each_entry_safe(struct nvc0_resident
, pos
, &nvc0
->tex_head
, list
) {
219 list_del(&pos
->list
);
223 list_for_each_entry_safe(struct nvc0_resident
, pos
, &nvc0
->img_head
, list
) {
224 list_del(&pos
->list
);
228 nouveau_context_destroy(&nvc0
->base
);
232 nvc0_default_kick_notify(struct nouveau_pushbuf
*push
)
234 struct nvc0_screen
*screen
= push
->user_priv
;
237 nouveau_fence_next(&screen
->base
);
238 nouveau_fence_update(&screen
->base
, true);
240 screen
->cur_ctx
->state
.flushed
= true;
241 NOUVEAU_DRV_STAT(&screen
->base
, pushbuf_count
, 1);
246 nvc0_invalidate_resource_storage(struct nouveau_context
*ctx
,
247 struct pipe_resource
*res
,
250 struct nvc0_context
*nvc0
= nvc0_context(&ctx
->pipe
);
253 if (res
->bind
& PIPE_BIND_RENDER_TARGET
) {
254 for (i
= 0; i
< nvc0
->framebuffer
.nr_cbufs
; ++i
) {
255 if (nvc0
->framebuffer
.cbufs
[i
] &&
256 nvc0
->framebuffer
.cbufs
[i
]->texture
== res
) {
257 nvc0
->dirty_3d
|= NVC0_NEW_3D_FRAMEBUFFER
;
258 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_FB
);
264 if (res
->bind
& PIPE_BIND_DEPTH_STENCIL
) {
265 if (nvc0
->framebuffer
.zsbuf
&&
266 nvc0
->framebuffer
.zsbuf
->texture
== res
) {
267 nvc0
->dirty_3d
|= NVC0_NEW_3D_FRAMEBUFFER
;
268 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_FB
);
274 if (res
->target
== PIPE_BUFFER
) {
275 for (i
= 0; i
< nvc0
->num_vtxbufs
; ++i
) {
276 if (nvc0
->vtxbuf
[i
].buffer
.resource
== res
) {
277 nvc0
->dirty_3d
|= NVC0_NEW_3D_ARRAYS
;
278 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_VTX
);
284 for (s
= 0; s
< 6; ++s
) {
285 for (i
= 0; i
< nvc0
->num_textures
[s
]; ++i
) {
286 if (nvc0
->textures
[s
][i
] &&
287 nvc0
->textures
[s
][i
]->texture
== res
) {
288 nvc0
->textures_dirty
[s
] |= 1 << i
;
289 if (unlikely(s
== 5)) {
290 nvc0
->dirty_cp
|= NVC0_NEW_CP_TEXTURES
;
291 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_TEX(i
));
293 nvc0
->dirty_3d
|= NVC0_NEW_3D_TEXTURES
;
294 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_TEX(s
, i
));
302 for (s
= 0; s
< 6; ++s
) {
303 for (i
= 0; i
< NVC0_MAX_PIPE_CONSTBUFS
; ++i
) {
304 if (!(nvc0
->constbuf_valid
[s
] & (1 << i
)))
306 if (!nvc0
->constbuf
[s
][i
].user
&&
307 nvc0
->constbuf
[s
][i
].u
.buf
== res
) {
308 nvc0
->constbuf_dirty
[s
] |= 1 << i
;
309 if (unlikely(s
== 5)) {
310 nvc0
->dirty_cp
|= NVC0_NEW_CP_CONSTBUF
;
311 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_CB(i
));
313 nvc0
->dirty_3d
|= NVC0_NEW_3D_CONSTBUF
;
314 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_CB(s
, i
));
322 for (s
= 0; s
< 6; ++s
) {
323 for (i
= 0; i
< NVC0_MAX_BUFFERS
; ++i
) {
324 if (nvc0
->buffers
[s
][i
].buffer
== res
) {
325 nvc0
->buffers_dirty
[s
] |= 1 << i
;
326 if (unlikely(s
== 5)) {
327 nvc0
->dirty_cp
|= NVC0_NEW_CP_BUFFERS
;
328 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_BUF
);
330 nvc0
->dirty_3d
|= NVC0_NEW_3D_BUFFERS
;
331 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_BUF
);
339 for (s
= 0; s
< 6; ++s
) {
340 for (i
= 0; i
< NVC0_MAX_IMAGES
; ++i
) {
341 if (nvc0
->images
[s
][i
].resource
== res
) {
342 nvc0
->images_dirty
[s
] |= 1 << i
;
343 if (unlikely(s
== 5)) {
344 nvc0
->dirty_cp
|= NVC0_NEW_CP_SURFACES
;
345 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_SUF
);
347 nvc0
->dirty_3d
|= NVC0_NEW_3D_SURFACES
;
348 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_SUF
);
361 nvc0_context_get_sample_position(struct pipe_context
*, unsigned, unsigned,
364 struct pipe_context
*
365 nvc0_create(struct pipe_screen
*pscreen
, void *priv
, unsigned ctxflags
)
367 struct nvc0_screen
*screen
= nvc0_screen(pscreen
);
368 struct nvc0_context
*nvc0
;
369 struct pipe_context
*pipe
;
373 nvc0
= CALLOC_STRUCT(nvc0_context
);
376 pipe
= &nvc0
->base
.pipe
;
378 if (!nvc0_blitctx_create(nvc0
))
381 nvc0
->base
.pushbuf
= screen
->base
.pushbuf
;
382 nvc0
->base
.client
= screen
->base
.client
;
384 ret
= nouveau_bufctx_new(screen
->base
.client
, 2, &nvc0
->bufctx
);
386 ret
= nouveau_bufctx_new(screen
->base
.client
, NVC0_BIND_3D_COUNT
,
389 ret
= nouveau_bufctx_new(screen
->base
.client
, NVC0_BIND_CP_COUNT
,
394 nvc0
->screen
= screen
;
395 nvc0
->base
.screen
= &screen
->base
;
397 pipe
->screen
= pscreen
;
399 pipe
->stream_uploader
= u_upload_create_default(pipe
);
400 if (!pipe
->stream_uploader
)
402 pipe
->const_uploader
= pipe
->stream_uploader
;
404 pipe
->destroy
= nvc0_destroy
;
406 pipe
->draw_vbo
= nvc0_draw_vbo
;
407 pipe
->clear
= nvc0_clear
;
408 pipe
->launch_grid
= (nvc0
->screen
->base
.class_3d
>= NVE4_3D_CLASS
) ?
409 nve4_launch_grid
: nvc0_launch_grid
;
411 pipe
->flush
= nvc0_flush
;
412 pipe
->texture_barrier
= nvc0_texture_barrier
;
413 pipe
->memory_barrier
= nvc0_memory_barrier
;
414 pipe
->get_sample_position
= nvc0_context_get_sample_position
;
415 pipe
->emit_string_marker
= nvc0_emit_string_marker
;
416 pipe
->get_device_reset_status
= nvc0_get_device_reset_status
;
418 nouveau_context_init(&nvc0
->base
);
419 nvc0_init_query_functions(nvc0
);
420 nvc0_init_surface_functions(nvc0
);
421 nvc0_init_state_functions(nvc0
);
422 nvc0_init_transfer_functions(nvc0
);
423 nvc0_init_resource_functions(pipe
);
424 if (nvc0
->screen
->base
.class_3d
>= NVE4_3D_CLASS
)
425 nvc0_init_bindless_functions(pipe
);
427 list_inithead(&nvc0
->tex_head
);
428 list_inithead(&nvc0
->img_head
);
430 nvc0
->base
.invalidate_resource_storage
= nvc0_invalidate_resource_storage
;
432 pipe
->create_video_codec
= nvc0_create_decoder
;
433 pipe
->create_video_buffer
= nvc0_video_buffer_create
;
435 /* shader builtin library is per-screen, but we need a context for m2mf */
436 nvc0_program_library_upload(nvc0
);
437 nvc0_program_init_tcp_empty(nvc0
);
438 if (!nvc0
->tcp_empty
)
440 /* set the empty tctl prog on next draw in case one is never set */
441 nvc0
->dirty_3d
|= NVC0_NEW_3D_TCTLPROG
;
443 /* Do not bind the COMPUTE driver constbuf at screen initialization because
444 * CBs are aliased between 3D and COMPUTE, but make sure it will be bound if
445 * a grid is launched later. */
446 nvc0
->dirty_cp
|= NVC0_NEW_CP_DRIVERCONST
;
448 /* now that there are no more opportunities for errors, set the current
449 * context if there isn't already one.
451 if (!screen
->cur_ctx
) {
452 nvc0
->state
= screen
->save_state
;
453 screen
->cur_ctx
= nvc0
;
454 nouveau_pushbuf_bufctx(screen
->base
.pushbuf
, nvc0
->bufctx
);
456 screen
->base
.pushbuf
->kick_notify
= nvc0_default_kick_notify
;
458 /* add permanently resident buffers to bufctxts */
460 flags
= NV_VRAM_DOMAIN(&screen
->base
) | NOUVEAU_BO_RD
;
462 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_SCREEN
, flags
, screen
->uniform_bo
);
463 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_SCREEN
, flags
, screen
->txc
);
464 if (screen
->compute
) {
465 BCTX_REFN_bo(nvc0
->bufctx_cp
, CP_SCREEN
, flags
, screen
->uniform_bo
);
466 BCTX_REFN_bo(nvc0
->bufctx_cp
, CP_SCREEN
, flags
, screen
->txc
);
469 flags
= NV_VRAM_DOMAIN(&screen
->base
) | NOUVEAU_BO_RDWR
;
471 if (screen
->poly_cache
)
472 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_SCREEN
, flags
, screen
->poly_cache
);
474 BCTX_REFN_bo(nvc0
->bufctx_cp
, CP_SCREEN
, flags
, screen
->tls
);
476 flags
= NOUVEAU_BO_GART
| NOUVEAU_BO_WR
;
478 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_SCREEN
, flags
, screen
->fence
.bo
);
479 BCTX_REFN_bo(nvc0
->bufctx
, FENCE
, flags
, screen
->fence
.bo
);
481 BCTX_REFN_bo(nvc0
->bufctx_cp
, CP_SCREEN
, flags
, screen
->fence
.bo
);
483 nvc0
->base
.scratch
.bo_size
= 2 << 20;
485 memset(nvc0
->tex_handles
, ~0, sizeof(nvc0
->tex_handles
));
487 util_dynarray_init(&nvc0
->global_residents
, NULL
);
489 // Make sure that the first TSC entry has SRGB conversion bit set, since we
490 // use it as a fallback on Fermi for TXF, and on Kepler+ generations for
491 // FBFETCH handling (which also uses TXF).
493 // NOTE: Preliminary testing suggests that this isn't necessary at all at
494 // least on GM20x (untested on Kepler). However this is ~free, so no reason
496 if (!screen
->tsc
.entries
[0])
497 nvc0_upload_tsc0(nvc0
);
499 // On Fermi, mark samplers dirty so that the proper binding can happen
500 if (screen
->base
.class_3d
< NVE4_3D_CLASS
) {
501 for (int s
= 0; s
< 6; s
++)
502 nvc0
->samplers_dirty
[s
] = 1;
503 nvc0
->dirty_3d
|= NVC0_NEW_3D_SAMPLERS
;
504 nvc0
->dirty_cp
|= NVC0_NEW_CP_SAMPLERS
;
511 if (pipe
->stream_uploader
)
512 u_upload_destroy(pipe
->stream_uploader
);
514 nouveau_bufctx_del(&nvc0
->bufctx_3d
);
516 nouveau_bufctx_del(&nvc0
->bufctx_cp
);
518 nouveau_bufctx_del(&nvc0
->bufctx
);
526 nvc0_bufctx_fence(struct nvc0_context
*nvc0
, struct nouveau_bufctx
*bufctx
,
529 struct nouveau_list
*list
= on_flush
? &bufctx
->current
: &bufctx
->pending
;
530 struct nouveau_list
*it
;
531 NOUVEAU_DRV_STAT_IFD(unsigned count
= 0);
533 for (it
= list
->next
; it
!= list
; it
= it
->next
) {
534 struct nouveau_bufref
*ref
= (struct nouveau_bufref
*)it
;
535 struct nv04_resource
*res
= ref
->priv
;
537 nvc0_resource_validate(res
, (unsigned)ref
->priv_data
);
538 NOUVEAU_DRV_STAT_IFD(count
++);
540 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, resource_validate_count
, count
);
544 nvc0_get_sample_locations(unsigned sample_count
)
546 static const uint8_t ms1
[1][2] = { { 0x8, 0x8 } };
547 static const uint8_t ms2
[2][2] = {
548 { 0x4, 0x4 }, { 0xc, 0xc } }; /* surface coords (0,0), (1,0) */
549 static const uint8_t ms4
[4][2] = {
550 { 0x6, 0x2 }, { 0xe, 0x6 }, /* (0,0), (1,0) */
551 { 0x2, 0xa }, { 0xa, 0xe } }; /* (0,1), (1,1) */
552 static const uint8_t ms8
[8][2] = {
553 { 0x1, 0x7 }, { 0x5, 0x3 }, /* (0,0), (1,0) */
554 { 0x3, 0xd }, { 0x7, 0xb }, /* (0,1), (1,1) */
555 { 0x9, 0x5 }, { 0xf, 0x1 }, /* (2,0), (3,0) */
556 { 0xb, 0xf }, { 0xd, 0x9 } }; /* (2,1), (3,1) */
558 /* NOTE: there are alternative modes for MS2 and MS8, currently not used */
559 static const uint8_t ms8_alt
[8][2] = {
560 { 0x9, 0x5 }, { 0x7, 0xb }, /* (2,0), (1,1) */
561 { 0xd, 0x9 }, { 0x5, 0x3 }, /* (3,1), (1,0) */
562 { 0x3, 0xd }, { 0x1, 0x7 }, /* (0,1), (0,0) */
563 { 0xb, 0xf }, { 0xf, 0x1 } }; /* (2,1), (3,0) */
566 const uint8_t (*ptr
)[2];
568 switch (sample_count
) {
570 case 1: ptr
= ms1
; break;
571 case 2: ptr
= ms2
; break;
572 case 4: ptr
= ms4
; break;
573 case 8: ptr
= ms8
; break;
576 return NULL
; /* bad sample count -> undefined locations */
582 nvc0_context_get_sample_position(struct pipe_context
*pipe
,
583 unsigned sample_count
, unsigned sample_index
,
586 const uint8_t (*ptr
)[2];
588 ptr
= nvc0_get_sample_locations(sample_count
);
592 xy
[0] = ptr
[sample_index
][0] * 0.0625f
;
593 xy
[1] = ptr
[sample_index
][1] * 0.0625f
;