nvc0/cl: hande 64 bit pointers in nvc0_set_global_handle
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nvc0_context.c
1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "pipe/p_defines.h"
24 #include "util/u_framebuffer.h"
25 #include "util/u_upload_mgr.h"
26
27 #include "nvc0/nvc0_context.h"
28 #include "nvc0/nvc0_screen.h"
29 #include "nvc0/nvc0_resource.h"
30
31 static void
32 nvc0_flush(struct pipe_context *pipe,
33 struct pipe_fence_handle **fence,
34 unsigned flags)
35 {
36 struct nvc0_context *nvc0 = nvc0_context(pipe);
37 struct nouveau_screen *screen = &nvc0->screen->base;
38
39 if (fence)
40 nouveau_fence_ref(screen->fence.current, (struct nouveau_fence **)fence);
41
42 PUSH_KICK(nvc0->base.pushbuf); /* fencing handled in kick_notify */
43
44 nouveau_context_update_frame_stats(&nvc0->base);
45 }
46
47 static void
48 nvc0_texture_barrier(struct pipe_context *pipe, unsigned flags)
49 {
50 struct nouveau_pushbuf *push = nvc0_context(pipe)->base.pushbuf;
51
52 IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0);
53 IMMED_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 0);
54 }
55
56 static void
57 nvc0_memory_barrier(struct pipe_context *pipe, unsigned flags)
58 {
59 struct nvc0_context *nvc0 = nvc0_context(pipe);
60 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
61 int i, s;
62
63 if (!(flags & ~PIPE_BARRIER_UPDATE))
64 return;
65
66 if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
67 for (i = 0; i < nvc0->num_vtxbufs; ++i) {
68 if (!nvc0->vtxbuf[i].buffer.resource && !nvc0->vtxbuf[i].is_user_buffer)
69 continue;
70 if (nvc0->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
71 nvc0->base.vbo_dirty = true;
72 }
73
74 for (s = 0; s < 5 && !nvc0->cb_dirty; ++s) {
75 uint32_t valid = nvc0->constbuf_valid[s];
76
77 while (valid && !nvc0->cb_dirty) {
78 const unsigned i = ffs(valid) - 1;
79 struct pipe_resource *res;
80
81 valid &= ~(1 << i);
82 if (nvc0->constbuf[s][i].user)
83 continue;
84
85 res = nvc0->constbuf[s][i].u.buf;
86 if (!res)
87 continue;
88
89 if (res->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
90 nvc0->cb_dirty = true;
91 }
92 }
93 } else {
94 /* Pretty much any writing by shaders needs a serialize after
95 * it. Especially when moving between 3d and compute pipelines, but even
96 * without that.
97 */
98 IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0);
99 }
100
101 /* If we're going to texture from a buffer/image written by a shader, we
102 * must flush the texture cache.
103 */
104 if (flags & PIPE_BARRIER_TEXTURE)
105 IMMED_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 0);
106
107 if (flags & PIPE_BARRIER_CONSTANT_BUFFER)
108 nvc0->cb_dirty = true;
109 if (flags & (PIPE_BARRIER_VERTEX_BUFFER | PIPE_BARRIER_INDEX_BUFFER))
110 nvc0->base.vbo_dirty = true;
111 }
112
113 static void
114 nvc0_emit_string_marker(struct pipe_context *pipe, const char *str, int len)
115 {
116 struct nouveau_pushbuf *push = nvc0_context(pipe)->base.pushbuf;
117 int string_words = len / 4;
118 int data_words;
119
120 if (len <= 0)
121 return;
122 string_words = MIN2(string_words, NV04_PFIFO_MAX_PACKET_LEN);
123 if (string_words == NV04_PFIFO_MAX_PACKET_LEN)
124 data_words = string_words;
125 else
126 data_words = string_words + !!(len & 3);
127 BEGIN_NIC0(push, SUBC_3D(NV04_GRAPH_NOP), data_words);
128 if (string_words)
129 PUSH_DATAp(push, str, string_words);
130 if (string_words != data_words) {
131 int data = 0;
132 memcpy(&data, &str[string_words * 4], len & 3);
133 PUSH_DATA (push, data);
134 }
135 }
136
137 static enum pipe_reset_status
138 nvc0_get_device_reset_status(struct pipe_context *pipe)
139 {
140 return PIPE_NO_RESET;
141 }
142
143 static void
144 nvc0_context_unreference_resources(struct nvc0_context *nvc0)
145 {
146 unsigned s, i;
147
148 nouveau_bufctx_del(&nvc0->bufctx_3d);
149 nouveau_bufctx_del(&nvc0->bufctx);
150 nouveau_bufctx_del(&nvc0->bufctx_cp);
151
152 util_unreference_framebuffer_state(&nvc0->framebuffer);
153
154 for (i = 0; i < nvc0->num_vtxbufs; ++i)
155 pipe_vertex_buffer_unreference(&nvc0->vtxbuf[i]);
156
157 for (s = 0; s < 6; ++s) {
158 for (i = 0; i < nvc0->num_textures[s]; ++i)
159 pipe_sampler_view_reference(&nvc0->textures[s][i], NULL);
160
161 for (i = 0; i < NVC0_MAX_PIPE_CONSTBUFS; ++i)
162 if (!nvc0->constbuf[s][i].user)
163 pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, NULL);
164
165 for (i = 0; i < NVC0_MAX_BUFFERS; ++i)
166 pipe_resource_reference(&nvc0->buffers[s][i].buffer, NULL);
167
168 for (i = 0; i < NVC0_MAX_IMAGES; ++i) {
169 pipe_resource_reference(&nvc0->images[s][i].resource, NULL);
170 if (nvc0->screen->base.class_3d >= GM107_3D_CLASS)
171 pipe_sampler_view_reference(&nvc0->images_tic[s][i], NULL);
172 }
173 }
174
175 for (s = 0; s < 2; ++s) {
176 for (i = 0; i < NVC0_MAX_SURFACE_SLOTS; ++i)
177 pipe_surface_reference(&nvc0->surfaces[s][i], NULL);
178 }
179
180 for (i = 0; i < nvc0->num_tfbbufs; ++i)
181 pipe_so_target_reference(&nvc0->tfbbuf[i], NULL);
182
183 for (i = 0; i < nvc0->global_residents.size / sizeof(struct pipe_resource *);
184 ++i) {
185 struct pipe_resource **res = util_dynarray_element(
186 &nvc0->global_residents, struct pipe_resource *, i);
187 pipe_resource_reference(res, NULL);
188 }
189 util_dynarray_fini(&nvc0->global_residents);
190
191 if (nvc0->tcp_empty)
192 nvc0->base.pipe.delete_tcs_state(&nvc0->base.pipe, nvc0->tcp_empty);
193 }
194
195 static void
196 nvc0_destroy(struct pipe_context *pipe)
197 {
198 struct nvc0_context *nvc0 = nvc0_context(pipe);
199
200 if (nvc0->screen->cur_ctx == nvc0) {
201 nvc0->screen->cur_ctx = NULL;
202 nvc0->screen->save_state = nvc0->state;
203 nvc0->screen->save_state.tfb = NULL;
204 }
205
206 if (nvc0->base.pipe.stream_uploader)
207 u_upload_destroy(nvc0->base.pipe.stream_uploader);
208
209 /* Unset bufctx, we don't want to revalidate any resources after the flush.
210 * Other contexts will always set their bufctx again on action calls.
211 */
212 nouveau_pushbuf_bufctx(nvc0->base.pushbuf, NULL);
213 nouveau_pushbuf_kick(nvc0->base.pushbuf, nvc0->base.pushbuf->channel);
214
215 nvc0_context_unreference_resources(nvc0);
216 nvc0_blitctx_destroy(nvc0);
217
218 list_for_each_entry_safe(struct nvc0_resident, pos, &nvc0->tex_head, list) {
219 list_del(&pos->list);
220 free(pos);
221 }
222
223 list_for_each_entry_safe(struct nvc0_resident, pos, &nvc0->img_head, list) {
224 list_del(&pos->list);
225 free(pos);
226 }
227
228 nouveau_context_destroy(&nvc0->base);
229 }
230
231 void
232 nvc0_default_kick_notify(struct nouveau_pushbuf *push)
233 {
234 struct nvc0_screen *screen = push->user_priv;
235
236 if (screen) {
237 nouveau_fence_next(&screen->base);
238 nouveau_fence_update(&screen->base, true);
239 if (screen->cur_ctx)
240 screen->cur_ctx->state.flushed = true;
241 NOUVEAU_DRV_STAT(&screen->base, pushbuf_count, 1);
242 }
243 }
244
245 static int
246 nvc0_invalidate_resource_storage(struct nouveau_context *ctx,
247 struct pipe_resource *res,
248 int ref)
249 {
250 struct nvc0_context *nvc0 = nvc0_context(&ctx->pipe);
251 unsigned s, i;
252
253 if (res->bind & PIPE_BIND_RENDER_TARGET) {
254 for (i = 0; i < nvc0->framebuffer.nr_cbufs; ++i) {
255 if (nvc0->framebuffer.cbufs[i] &&
256 nvc0->framebuffer.cbufs[i]->texture == res) {
257 nvc0->dirty_3d |= NVC0_NEW_3D_FRAMEBUFFER;
258 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_FB);
259 if (!--ref)
260 return ref;
261 }
262 }
263 }
264 if (res->bind & PIPE_BIND_DEPTH_STENCIL) {
265 if (nvc0->framebuffer.zsbuf &&
266 nvc0->framebuffer.zsbuf->texture == res) {
267 nvc0->dirty_3d |= NVC0_NEW_3D_FRAMEBUFFER;
268 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_FB);
269 if (!--ref)
270 return ref;
271 }
272 }
273
274 if (res->target == PIPE_BUFFER) {
275 for (i = 0; i < nvc0->num_vtxbufs; ++i) {
276 if (nvc0->vtxbuf[i].buffer.resource == res) {
277 nvc0->dirty_3d |= NVC0_NEW_3D_ARRAYS;
278 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_VTX);
279 if (!--ref)
280 return ref;
281 }
282 }
283
284 for (s = 0; s < 6; ++s) {
285 for (i = 0; i < nvc0->num_textures[s]; ++i) {
286 if (nvc0->textures[s][i] &&
287 nvc0->textures[s][i]->texture == res) {
288 nvc0->textures_dirty[s] |= 1 << i;
289 if (unlikely(s == 5)) {
290 nvc0->dirty_cp |= NVC0_NEW_CP_TEXTURES;
291 nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_TEX(i));
292 } else {
293 nvc0->dirty_3d |= NVC0_NEW_3D_TEXTURES;
294 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_TEX(s, i));
295 }
296 if (!--ref)
297 return ref;
298 }
299 }
300 }
301
302 for (s = 0; s < 6; ++s) {
303 for (i = 0; i < NVC0_MAX_PIPE_CONSTBUFS; ++i) {
304 if (!(nvc0->constbuf_valid[s] & (1 << i)))
305 continue;
306 if (!nvc0->constbuf[s][i].user &&
307 nvc0->constbuf[s][i].u.buf == res) {
308 nvc0->constbuf_dirty[s] |= 1 << i;
309 if (unlikely(s == 5)) {
310 nvc0->dirty_cp |= NVC0_NEW_CP_CONSTBUF;
311 nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_CB(i));
312 } else {
313 nvc0->dirty_3d |= NVC0_NEW_3D_CONSTBUF;
314 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_CB(s, i));
315 }
316 if (!--ref)
317 return ref;
318 }
319 }
320 }
321
322 for (s = 0; s < 6; ++s) {
323 for (i = 0; i < NVC0_MAX_BUFFERS; ++i) {
324 if (nvc0->buffers[s][i].buffer == res) {
325 nvc0->buffers_dirty[s] |= 1 << i;
326 if (unlikely(s == 5)) {
327 nvc0->dirty_cp |= NVC0_NEW_CP_BUFFERS;
328 nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_BUF);
329 } else {
330 nvc0->dirty_3d |= NVC0_NEW_3D_BUFFERS;
331 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_BUF);
332 }
333 if (!--ref)
334 return ref;
335 }
336 }
337 }
338
339 for (s = 0; s < 6; ++s) {
340 for (i = 0; i < NVC0_MAX_IMAGES; ++i) {
341 if (nvc0->images[s][i].resource == res) {
342 nvc0->images_dirty[s] |= 1 << i;
343 if (unlikely(s == 5)) {
344 nvc0->dirty_cp |= NVC0_NEW_CP_SURFACES;
345 nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_SUF);
346 } else {
347 nvc0->dirty_3d |= NVC0_NEW_3D_SURFACES;
348 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_SUF);
349 }
350 }
351 if (!--ref)
352 return ref;
353 }
354 }
355 }
356
357 return ref;
358 }
359
360 static void
361 nvc0_context_get_sample_position(struct pipe_context *, unsigned, unsigned,
362 float *);
363
364 struct pipe_context *
365 nvc0_create(struct pipe_screen *pscreen, void *priv, unsigned ctxflags)
366 {
367 struct nvc0_screen *screen = nvc0_screen(pscreen);
368 struct nvc0_context *nvc0;
369 struct pipe_context *pipe;
370 int ret;
371 uint32_t flags;
372
373 nvc0 = CALLOC_STRUCT(nvc0_context);
374 if (!nvc0)
375 return NULL;
376 pipe = &nvc0->base.pipe;
377
378 if (!nvc0_blitctx_create(nvc0))
379 goto out_err;
380
381 nvc0->base.pushbuf = screen->base.pushbuf;
382 nvc0->base.client = screen->base.client;
383
384 ret = nouveau_bufctx_new(screen->base.client, 2, &nvc0->bufctx);
385 if (!ret)
386 ret = nouveau_bufctx_new(screen->base.client, NVC0_BIND_3D_COUNT,
387 &nvc0->bufctx_3d);
388 if (!ret)
389 ret = nouveau_bufctx_new(screen->base.client, NVC0_BIND_CP_COUNT,
390 &nvc0->bufctx_cp);
391 if (ret)
392 goto out_err;
393
394 nvc0->screen = screen;
395 nvc0->base.screen = &screen->base;
396
397 pipe->screen = pscreen;
398 pipe->priv = priv;
399 pipe->stream_uploader = u_upload_create_default(pipe);
400 if (!pipe->stream_uploader)
401 goto out_err;
402 pipe->const_uploader = pipe->stream_uploader;
403
404 pipe->destroy = nvc0_destroy;
405
406 pipe->draw_vbo = nvc0_draw_vbo;
407 pipe->clear = nvc0_clear;
408 pipe->launch_grid = (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) ?
409 nve4_launch_grid : nvc0_launch_grid;
410
411 pipe->flush = nvc0_flush;
412 pipe->texture_barrier = nvc0_texture_barrier;
413 pipe->memory_barrier = nvc0_memory_barrier;
414 pipe->get_sample_position = nvc0_context_get_sample_position;
415 pipe->emit_string_marker = nvc0_emit_string_marker;
416 pipe->get_device_reset_status = nvc0_get_device_reset_status;
417
418 nouveau_context_init(&nvc0->base);
419 nvc0_init_query_functions(nvc0);
420 nvc0_init_surface_functions(nvc0);
421 nvc0_init_state_functions(nvc0);
422 nvc0_init_transfer_functions(nvc0);
423 nvc0_init_resource_functions(pipe);
424 if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS)
425 nvc0_init_bindless_functions(pipe);
426
427 list_inithead(&nvc0->tex_head);
428 list_inithead(&nvc0->img_head);
429
430 nvc0->base.invalidate_resource_storage = nvc0_invalidate_resource_storage;
431
432 pipe->create_video_codec = nvc0_create_decoder;
433 pipe->create_video_buffer = nvc0_video_buffer_create;
434
435 /* shader builtin library is per-screen, but we need a context for m2mf */
436 nvc0_program_library_upload(nvc0);
437 nvc0_program_init_tcp_empty(nvc0);
438 if (!nvc0->tcp_empty)
439 goto out_err;
440 /* set the empty tctl prog on next draw in case one is never set */
441 nvc0->dirty_3d |= NVC0_NEW_3D_TCTLPROG;
442
443 /* Do not bind the COMPUTE driver constbuf at screen initialization because
444 * CBs are aliased between 3D and COMPUTE, but make sure it will be bound if
445 * a grid is launched later. */
446 nvc0->dirty_cp |= NVC0_NEW_CP_DRIVERCONST;
447
448 /* now that there are no more opportunities for errors, set the current
449 * context if there isn't already one.
450 */
451 if (!screen->cur_ctx) {
452 nvc0->state = screen->save_state;
453 screen->cur_ctx = nvc0;
454 nouveau_pushbuf_bufctx(screen->base.pushbuf, nvc0->bufctx);
455 }
456 screen->base.pushbuf->kick_notify = nvc0_default_kick_notify;
457
458 /* add permanently resident buffers to bufctxts */
459
460 flags = NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RD;
461
462 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->uniform_bo);
463 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->txc);
464 if (screen->compute) {
465 BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->uniform_bo);
466 BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->txc);
467 }
468
469 flags = NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RDWR;
470
471 if (screen->poly_cache)
472 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->poly_cache);
473 if (screen->compute)
474 BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->tls);
475
476 flags = NOUVEAU_BO_GART | NOUVEAU_BO_WR;
477
478 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->fence.bo);
479 BCTX_REFN_bo(nvc0->bufctx, FENCE, flags, screen->fence.bo);
480 if (screen->compute)
481 BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->fence.bo);
482
483 nvc0->base.scratch.bo_size = 2 << 20;
484
485 memset(nvc0->tex_handles, ~0, sizeof(nvc0->tex_handles));
486
487 util_dynarray_init(&nvc0->global_residents, NULL);
488
489 // Make sure that the first TSC entry has SRGB conversion bit set, since we
490 // use it as a fallback on Fermi for TXF, and on Kepler+ generations for
491 // FBFETCH handling (which also uses TXF).
492 //
493 // NOTE: Preliminary testing suggests that this isn't necessary at all at
494 // least on GM20x (untested on Kepler). However this is ~free, so no reason
495 // not to do it.
496 if (!screen->tsc.entries[0])
497 nvc0_upload_tsc0(nvc0);
498
499 // On Fermi, mark samplers dirty so that the proper binding can happen
500 if (screen->base.class_3d < NVE4_3D_CLASS) {
501 for (int s = 0; s < 6; s++)
502 nvc0->samplers_dirty[s] = 1;
503 nvc0->dirty_3d |= NVC0_NEW_3D_SAMPLERS;
504 nvc0->dirty_cp |= NVC0_NEW_CP_SAMPLERS;
505 }
506
507 return pipe;
508
509 out_err:
510 if (nvc0) {
511 if (pipe->stream_uploader)
512 u_upload_destroy(pipe->stream_uploader);
513 if (nvc0->bufctx_3d)
514 nouveau_bufctx_del(&nvc0->bufctx_3d);
515 if (nvc0->bufctx_cp)
516 nouveau_bufctx_del(&nvc0->bufctx_cp);
517 if (nvc0->bufctx)
518 nouveau_bufctx_del(&nvc0->bufctx);
519 FREE(nvc0->blit);
520 FREE(nvc0);
521 }
522 return NULL;
523 }
524
525 void
526 nvc0_bufctx_fence(struct nvc0_context *nvc0, struct nouveau_bufctx *bufctx,
527 bool on_flush)
528 {
529 struct nouveau_list *list = on_flush ? &bufctx->current : &bufctx->pending;
530 struct nouveau_list *it;
531 NOUVEAU_DRV_STAT_IFD(unsigned count = 0);
532
533 for (it = list->next; it != list; it = it->next) {
534 struct nouveau_bufref *ref = (struct nouveau_bufref *)it;
535 struct nv04_resource *res = ref->priv;
536 if (res)
537 nvc0_resource_validate(res, (unsigned)ref->priv_data);
538 NOUVEAU_DRV_STAT_IFD(count++);
539 }
540 NOUVEAU_DRV_STAT(&nvc0->screen->base, resource_validate_count, count);
541 }
542
543 const void *
544 nvc0_get_sample_locations(unsigned sample_count)
545 {
546 static const uint8_t ms1[1][2] = { { 0x8, 0x8 } };
547 static const uint8_t ms2[2][2] = {
548 { 0x4, 0x4 }, { 0xc, 0xc } }; /* surface coords (0,0), (1,0) */
549 static const uint8_t ms4[4][2] = {
550 { 0x6, 0x2 }, { 0xe, 0x6 }, /* (0,0), (1,0) */
551 { 0x2, 0xa }, { 0xa, 0xe } }; /* (0,1), (1,1) */
552 static const uint8_t ms8[8][2] = {
553 { 0x1, 0x7 }, { 0x5, 0x3 }, /* (0,0), (1,0) */
554 { 0x3, 0xd }, { 0x7, 0xb }, /* (0,1), (1,1) */
555 { 0x9, 0x5 }, { 0xf, 0x1 }, /* (2,0), (3,0) */
556 { 0xb, 0xf }, { 0xd, 0x9 } }; /* (2,1), (3,1) */
557 #if 0
558 /* NOTE: there are alternative modes for MS2 and MS8, currently not used */
559 static const uint8_t ms8_alt[8][2] = {
560 { 0x9, 0x5 }, { 0x7, 0xb }, /* (2,0), (1,1) */
561 { 0xd, 0x9 }, { 0x5, 0x3 }, /* (3,1), (1,0) */
562 { 0x3, 0xd }, { 0x1, 0x7 }, /* (0,1), (0,0) */
563 { 0xb, 0xf }, { 0xf, 0x1 } }; /* (2,1), (3,0) */
564 #endif
565
566 const uint8_t (*ptr)[2];
567
568 switch (sample_count) {
569 case 0:
570 case 1: ptr = ms1; break;
571 case 2: ptr = ms2; break;
572 case 4: ptr = ms4; break;
573 case 8: ptr = ms8; break;
574 default:
575 assert(0);
576 return NULL; /* bad sample count -> undefined locations */
577 }
578 return ptr;
579 }
580
581 static void
582 nvc0_context_get_sample_position(struct pipe_context *pipe,
583 unsigned sample_count, unsigned sample_index,
584 float *xy)
585 {
586 const uint8_t (*ptr)[2];
587
588 ptr = nvc0_get_sample_locations(sample_count);
589 if (!ptr)
590 return;
591
592 xy[0] = ptr[sample_index][0] * 0.0625f;
593 xy[1] = ptr[sample_index][1] * 0.0625f;
594 }