ilo: harware contexts are only for the render ring
[mesa.git] / src / gallium / drivers / ilo / ilo_3d.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2012-2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "intel_winsys.h"
29
30 #include "ilo_3d_pipeline.h"
31 #include "ilo_context.h"
32 #include "ilo_cp.h"
33 #include "ilo_query.h"
34 #include "ilo_shader.h"
35 #include "ilo_state.h"
36 #include "ilo_3d.h"
37
38 /**
39 * Begin a query.
40 */
41 void
42 ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
43 {
44 struct ilo_3d *hw3d = ilo->hw3d;
45
46 ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
47
48 switch (q->type) {
49 case PIPE_QUERY_OCCLUSION_COUNTER:
50 /* reserve some space for pausing the query */
51 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
52 ILO_3D_PIPELINE_WRITE_DEPTH_COUNT, NULL);
53 ilo_cp_reserve_for_pre_flush(hw3d->cp, q->reg_cmd_size);
54
55 q->data.u64 = 0;
56
57 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
58 /* XXX we should check the aperture size */
59 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
60 q->bo, q->reg_read++);
61
62 list_add(&q->list, &hw3d->occlusion_queries);
63 }
64 break;
65 case PIPE_QUERY_TIMESTAMP:
66 /* nop */
67 break;
68 case PIPE_QUERY_TIME_ELAPSED:
69 /* reserve some space for pausing the query */
70 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
71 ILO_3D_PIPELINE_WRITE_TIMESTAMP, NULL);
72 ilo_cp_reserve_for_pre_flush(hw3d->cp, q->reg_cmd_size);
73
74 q->data.u64 = 0;
75
76 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
77 /* XXX we should check the aperture size */
78 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
79 q->bo, q->reg_read++);
80
81 list_add(&q->list, &hw3d->time_elapsed_queries);
82 }
83 break;
84 case PIPE_QUERY_PRIMITIVES_GENERATED:
85 q->data.u64 = 0;
86 list_add(&q->list, &hw3d->prim_generated_queries);
87 break;
88 case PIPE_QUERY_PRIMITIVES_EMITTED:
89 q->data.u64 = 0;
90 list_add(&q->list, &hw3d->prim_emitted_queries);
91 break;
92 default:
93 assert(!"unknown query type");
94 break;
95 }
96 }
97
98 /**
99 * End a query.
100 */
101 void
102 ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
103 {
104 struct ilo_3d *hw3d = ilo->hw3d;
105
106 ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
107
108 switch (q->type) {
109 case PIPE_QUERY_OCCLUSION_COUNTER:
110 list_del(&q->list);
111
112 assert(q->reg_read < q->reg_total);
113 ilo_cp_reserve_for_pre_flush(hw3d->cp, -q->reg_cmd_size);
114 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
115 q->bo, q->reg_read++);
116 break;
117 case PIPE_QUERY_TIMESTAMP:
118 q->data.u64 = 0;
119
120 if (ilo_query_alloc_bo(q, 1, 1, hw3d->cp->winsys)) {
121 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
122 q->bo, q->reg_read++);
123 }
124 break;
125 case PIPE_QUERY_TIME_ELAPSED:
126 list_del(&q->list);
127
128 assert(q->reg_read < q->reg_total);
129 ilo_cp_reserve_for_pre_flush(hw3d->cp, -q->reg_cmd_size);
130 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
131 q->bo, q->reg_read++);
132 break;
133 case PIPE_QUERY_PRIMITIVES_GENERATED:
134 case PIPE_QUERY_PRIMITIVES_EMITTED:
135 list_del(&q->list);
136 break;
137 default:
138 assert(!"unknown query type");
139 break;
140 }
141 }
142
143 static void
144 process_query_for_occlusion_counter(struct ilo_3d *hw3d,
145 struct ilo_query *q)
146 {
147 uint64_t *vals, depth_count = 0;
148 int i;
149
150 /* in pairs */
151 assert(q->reg_read % 2 == 0);
152
153 q->bo->map(q->bo, false);
154 vals = q->bo->get_virtual(q->bo);
155 for (i = 1; i < q->reg_read; i += 2)
156 depth_count += vals[i] - vals[i - 1];
157 q->bo->unmap(q->bo);
158
159 /* accumulate so that the query can be resumed if wanted */
160 q->data.u64 += depth_count;
161 q->reg_read = 0;
162 }
163
164 static uint64_t
165 timestamp_to_ns(uint64_t timestamp)
166 {
167 /* see ilo_get_timestamp() */
168 return (timestamp & 0xffffffff) * 80;
169 }
170
171 static void
172 process_query_for_timestamp(struct ilo_3d *hw3d, struct ilo_query *q)
173 {
174 uint64_t *vals, timestamp;
175
176 assert(q->reg_read == 1);
177
178 q->bo->map(q->bo, false);
179 vals = q->bo->get_virtual(q->bo);
180 timestamp = vals[0];
181 q->bo->unmap(q->bo);
182
183 q->data.u64 = timestamp_to_ns(timestamp);
184 q->reg_read = 0;
185 }
186
187 static void
188 process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
189 {
190 uint64_t *vals, elapsed = 0;
191 int i;
192
193 /* in pairs */
194 assert(q->reg_read % 2 == 0);
195
196 q->bo->map(q->bo, false);
197 vals = q->bo->get_virtual(q->bo);
198
199 for (i = 1; i < q->reg_read; i += 2)
200 elapsed += vals[i] - vals[i - 1];
201
202 q->bo->unmap(q->bo);
203
204 /* accumulate so that the query can be resumed if wanted */
205 q->data.u64 += timestamp_to_ns(elapsed);
206 q->reg_read = 0;
207 }
208
209 /**
210 * Process the raw query data.
211 */
212 void
213 ilo_3d_process_query(struct ilo_context *ilo, struct ilo_query *q)
214 {
215 struct ilo_3d *hw3d = ilo->hw3d;
216
217 switch (q->type) {
218 case PIPE_QUERY_OCCLUSION_COUNTER:
219 if (q->bo)
220 process_query_for_occlusion_counter(hw3d, q);
221 break;
222 case PIPE_QUERY_TIMESTAMP:
223 if (q->bo)
224 process_query_for_timestamp(hw3d, q);
225 break;
226 case PIPE_QUERY_TIME_ELAPSED:
227 if (q->bo)
228 process_query_for_time_elapsed(hw3d, q);
229 break;
230 case PIPE_QUERY_PRIMITIVES_GENERATED:
231 case PIPE_QUERY_PRIMITIVES_EMITTED:
232 break;
233 default:
234 assert(!"unknown query type");
235 break;
236 }
237 }
238
239 /**
240 * Hook for CP new-batch.
241 */
242 void
243 ilo_3d_new_cp_batch(struct ilo_3d *hw3d)
244 {
245 struct ilo_query *q;
246
247 hw3d->new_batch = true;
248
249 /* invalidate the pipeline */
250 ilo_3d_pipeline_invalidate(hw3d->pipeline,
251 ILO_3D_PIPELINE_INVALIDATE_BATCH_BO |
252 ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
253 if (!hw3d->cp->render_ctx) {
254 ilo_3d_pipeline_invalidate(hw3d->pipeline,
255 ILO_3D_PIPELINE_INVALIDATE_HW);
256 }
257
258 /* resume occlusion queries */
259 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
260 /* accumulate the result if the bo is alreay full */
261 if (q->reg_read >= q->reg_total)
262 process_query_for_occlusion_counter(hw3d, q);
263
264 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
265 q->bo, q->reg_read++);
266 }
267
268 /* resume timer queries */
269 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
270 /* accumulate the result if the bo is alreay full */
271 if (q->reg_read >= q->reg_total)
272 process_query_for_time_elapsed(hw3d, q);
273
274 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
275 q->bo, q->reg_read++);
276 }
277 }
278
279 /**
280 * Hook for CP pre-flush.
281 */
282 void
283 ilo_3d_pre_cp_flush(struct ilo_3d *hw3d)
284 {
285 struct ilo_query *q;
286
287 /* pause occlusion queries */
288 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
289 assert(q->reg_read < q->reg_total);
290 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
291 q->bo, q->reg_read++);
292 }
293
294 /* pause timer queries */
295 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
296 assert(q->reg_read < q->reg_total);
297 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
298 q->bo, q->reg_read++);
299 }
300 }
301
302 /**
303 * Hook for CP post-flush
304 */
305 void
306 ilo_3d_post_cp_flush(struct ilo_3d *hw3d)
307 {
308 if (ilo_debug & ILO_DEBUG_3D)
309 ilo_3d_pipeline_dump(hw3d->pipeline);
310 }
311
312 /**
313 * Create a 3D context.
314 */
315 struct ilo_3d *
316 ilo_3d_create(struct ilo_cp *cp, const struct ilo_dev_info *dev)
317 {
318 struct ilo_3d *hw3d;
319
320 hw3d = CALLOC_STRUCT(ilo_3d);
321 if (!hw3d)
322 return NULL;
323
324 hw3d->cp = cp;
325 hw3d->new_batch = true;
326
327 list_inithead(&hw3d->occlusion_queries);
328 list_inithead(&hw3d->time_elapsed_queries);
329 list_inithead(&hw3d->prim_generated_queries);
330 list_inithead(&hw3d->prim_emitted_queries);
331
332 hw3d->pipeline = ilo_3d_pipeline_create(cp, dev);
333 if (!hw3d->pipeline) {
334 FREE(hw3d);
335 return NULL;
336 }
337
338 return hw3d;
339 }
340
341 /**
342 * Destroy a 3D context.
343 */
344 void
345 ilo_3d_destroy(struct ilo_3d *hw3d)
346 {
347 ilo_3d_pipeline_destroy(hw3d->pipeline);
348 FREE(hw3d);
349 }
350
351 static bool
352 draw_vbo(struct ilo_3d *hw3d, const struct ilo_context *ilo,
353 const struct pipe_draw_info *info,
354 int *prim_generated, int *prim_emitted)
355 {
356 bool need_flush;
357 int max_len;
358
359 ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
360
361 /*
362 * Without a better tracking mechanism, when the framebuffer changes, we
363 * have to assume that the old framebuffer may be sampled from. If that
364 * happens in the middle of a batch buffer, we need to insert manual
365 * flushes.
366 */
367 need_flush = (!hw3d->new_batch && (ilo->dirty & ILO_DIRTY_FRAMEBUFFER));
368
369 /* make sure there is enough room first */
370 max_len = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
371 ILO_3D_PIPELINE_DRAW, ilo);
372 if (need_flush) {
373 max_len += ilo_3d_pipeline_estimate_size(hw3d->pipeline,
374 ILO_3D_PIPELINE_FLUSH, NULL);
375 }
376
377 if (max_len > ilo_cp_space(hw3d->cp)) {
378 ilo_cp_flush(hw3d->cp);
379 need_flush = false;
380 assert(max_len <= ilo_cp_space(hw3d->cp));
381 }
382
383 if (need_flush)
384 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
385
386 return ilo_3d_pipeline_emit_draw(hw3d->pipeline, ilo, info,
387 prim_generated, prim_emitted);
388 }
389
390 static void
391 update_prim_count(struct ilo_3d *hw3d, int generated, int emitted)
392 {
393 struct ilo_query *q;
394
395 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_generated_queries, list)
396 q->data.u64 += generated;
397
398 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_emitted_queries, list)
399 q->data.u64 += emitted;
400 }
401
402 static bool
403 pass_render_condition(struct ilo_3d *hw3d, struct pipe_context *pipe)
404 {
405 uint64_t result;
406 bool wait;
407
408 if (!hw3d->render_condition.query)
409 return true;
410
411 switch (hw3d->render_condition.mode) {
412 case PIPE_RENDER_COND_WAIT:
413 case PIPE_RENDER_COND_BY_REGION_WAIT:
414 wait = true;
415 break;
416 case PIPE_RENDER_COND_NO_WAIT:
417 case PIPE_RENDER_COND_BY_REGION_NO_WAIT:
418 default:
419 wait = false;
420 break;
421 }
422
423 if (pipe->get_query_result(pipe, hw3d->render_condition.query,
424 wait, (union pipe_query_result *) &result)) {
425 return (result > 0);
426 }
427 else {
428 return true;
429 }
430 }
431
432 #define UPDATE_MIN2(a, b) (a) = MIN2((a), (b))
433 #define UPDATE_MAX2(a, b) (a) = MAX2((a), (b))
434
435 /**
436 * \see find_sub_primitives() from core mesa
437 */
438 static int
439 ilo_find_sub_primitives(const void *elements, unsigned element_size,
440 const struct pipe_draw_info *orig_info,
441 struct pipe_draw_info *info)
442 {
443 const unsigned max_prims = orig_info->count - orig_info->start;
444 unsigned i, cur_start, cur_count;
445 int scan_index;
446 unsigned scan_num;
447
448 cur_start = orig_info->start;
449 cur_count = 0;
450 scan_num = 0;
451
452 #define IB_INDEX_READ(TYPE, INDEX) (((const TYPE *) elements)[INDEX])
453
454 #define SCAN_ELEMENTS(TYPE) \
455 info[scan_num] = *orig_info; \
456 info[scan_num].primitive_restart = false; \
457 for (i = orig_info->start; i < orig_info->count; i++) { \
458 scan_index = IB_INDEX_READ(TYPE, i); \
459 if (scan_index == orig_info->restart_index) { \
460 if (cur_count > 0) { \
461 assert(scan_num < max_prims); \
462 info[scan_num].start = cur_start; \
463 info[scan_num].count = cur_count; \
464 scan_num++; \
465 info[scan_num] = *orig_info; \
466 info[scan_num].primitive_restart = false; \
467 } \
468 cur_start = i + 1; \
469 cur_count = 0; \
470 } \
471 else { \
472 UPDATE_MIN2(info[scan_num].min_index, scan_index); \
473 UPDATE_MAX2(info[scan_num].max_index, scan_index); \
474 cur_count++; \
475 } \
476 } \
477 if (cur_count > 0) { \
478 assert(scan_num < max_prims); \
479 info[scan_num].start = cur_start; \
480 info[scan_num].count = cur_count; \
481 scan_num++; \
482 }
483
484 switch (element_size) {
485 case 1:
486 SCAN_ELEMENTS(uint8_t);
487 break;
488 case 2:
489 SCAN_ELEMENTS(uint16_t);
490 break;
491 case 4:
492 SCAN_ELEMENTS(uint32_t);
493 break;
494 default:
495 assert(0 && "bad index_size in find_sub_primitives()");
496 }
497
498 #undef SCAN_ELEMENTS
499
500 return scan_num;
501 }
502
503 static inline bool
504 ilo_check_restart_index(struct ilo_context *ilo,
505 const struct pipe_draw_info *info)
506 {
507 /*
508 * Haswell (GEN(7.5)) supports an arbitrary cut index, check everything
509 * older.
510 */
511 if (ilo->dev->gen >= ILO_GEN(7.5))
512 return true;
513
514 /* Note: indices must be unsigned byte, unsigned short or unsigned int */
515 switch (ilo->index_buffer.index_size) {
516 case 1:
517 return ((info->restart_index & 0xff) == 0xff);
518 break;
519 case 2:
520 return ((info->restart_index & 0xffff) == 0xffff);
521 break;
522 case 4:
523 return (info->restart_index == 0xffffffff);
524 break;
525 }
526 return false;
527 }
528
529 static inline bool
530 ilo_check_restart_prim_type(struct ilo_context *ilo,
531 const struct pipe_draw_info *info)
532 {
533 switch (info->mode) {
534 case PIPE_PRIM_POINTS:
535 case PIPE_PRIM_LINES:
536 case PIPE_PRIM_LINE_STRIP:
537 case PIPE_PRIM_TRIANGLES:
538 case PIPE_PRIM_TRIANGLE_STRIP:
539 /* All 965 GEN graphics support a cut index for these primitive types */
540 return true;
541 break;
542
543 case PIPE_PRIM_LINE_LOOP:
544 case PIPE_PRIM_POLYGON:
545 case PIPE_PRIM_QUAD_STRIP:
546 case PIPE_PRIM_QUADS:
547 case PIPE_PRIM_TRIANGLE_FAN:
548 if (ilo->dev->gen >= ILO_GEN(7.5)) {
549 /* Haswell and newer parts can handle these prim types. */
550 return true;
551 }
552 break;
553 }
554
555 return false;
556 }
557
558 /*
559 * Handle VBOs using primitive restart.
560 * Verify that restart index and primitive type can be handled by the HW.
561 * Return true if this routine did the rendering
562 * Return false if this routine did NOT render because restart can be handled
563 * in HW.
564 */
565 static void
566 ilo_draw_vbo_with_sw_restart(struct pipe_context *pipe,
567 const struct pipe_draw_info *info)
568 {
569 struct ilo_context *ilo = ilo_context(pipe);
570 struct pipe_draw_info *restart_info = NULL;
571 int sub_prim_count = 1;
572
573 /*
574 * We have to break up the primitive into chunks manually
575 * Worst case, every other index could be a restart index so
576 * need to have space for that many primitives
577 */
578 restart_info = MALLOC(((info->count + 1) / 2) * sizeof(*info));
579 if (NULL == restart_info) {
580 /* If we can't get memory for this, bail out */
581 ilo_err("%s:%d - Out of memory", __FILE__, __LINE__);
582 return;
583 }
584
585 struct pipe_transfer *transfer = NULL;
586 const void *map = NULL;
587 map = pipe_buffer_map(pipe,
588 ilo->index_buffer.buffer,
589 PIPE_TRANSFER_READ,
590 &transfer);
591
592 sub_prim_count = ilo_find_sub_primitives(map + ilo->index_buffer.offset,
593 ilo->index_buffer.index_size,
594 info,
595 restart_info);
596
597 pipe_buffer_unmap(pipe, transfer);
598
599 info = restart_info;
600
601 while (sub_prim_count > 0) {
602 pipe->draw_vbo(pipe, info);
603
604 sub_prim_count--;
605 info++;
606 }
607
608 FREE(restart_info);
609 }
610
611 static void
612 ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
613 {
614 struct ilo_context *ilo = ilo_context(pipe);
615 struct ilo_3d *hw3d = ilo->hw3d;
616 int prim_generated, prim_emitted;
617
618 if (!pass_render_condition(hw3d, pipe))
619 return;
620
621 if (info->primitive_restart && info->indexed) {
622 /*
623 * Want to draw an indexed primitive using primitive restart
624 * Check that HW can handle the request and fall to SW if not.
625 */
626 if (!ilo_check_restart_index(ilo, info) ||
627 !ilo_check_restart_prim_type(ilo, info)) {
628 ilo_draw_vbo_with_sw_restart(pipe, info);
629 return;
630 }
631 }
632
633 /* assume the cache is still in use by the previous batch */
634 if (hw3d->new_batch)
635 ilo_shader_cache_mark_busy(ilo->shader_cache);
636
637 ilo_finalize_states(ilo);
638
639 /* the shaders may be uploaded to a new shader cache */
640 if (hw3d->shader_cache_seqno != ilo->shader_cache->seqno) {
641 ilo_3d_pipeline_invalidate(hw3d->pipeline,
642 ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
643 }
644
645 /*
646 * The VBs and/or IB may have different BOs due to being mapped with
647 * PIPE_TRANSFER_DISCARD_x. We should track that instead of setting the
648 * dirty flags for the performance reason.
649 */
650 ilo->dirty |= ILO_DIRTY_VERTEX_BUFFERS | ILO_DIRTY_INDEX_BUFFER;
651
652 /* If draw_vbo ever fails, return immediately. */
653 if (!draw_vbo(hw3d, ilo, info, &prim_generated, &prim_emitted))
654 return;
655
656 /* clear dirty status */
657 ilo->dirty = 0x0;
658 hw3d->new_batch = false;
659 hw3d->shader_cache_seqno = ilo->shader_cache->seqno;
660
661 update_prim_count(hw3d, prim_generated, prim_emitted);
662
663 if (ilo_debug & ILO_DEBUG_NOCACHE)
664 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
665 }
666
667 static void
668 ilo_render_condition(struct pipe_context *pipe,
669 struct pipe_query *query,
670 uint mode)
671 {
672 struct ilo_context *ilo = ilo_context(pipe);
673 struct ilo_3d *hw3d = ilo->hw3d;
674
675 /* reference count? */
676 hw3d->render_condition.query = query;
677 hw3d->render_condition.mode = mode;
678 }
679
680 static void
681 ilo_texture_barrier(struct pipe_context *pipe)
682 {
683 struct ilo_context *ilo = ilo_context(pipe);
684 struct ilo_3d *hw3d = ilo->hw3d;
685
686 if (ilo->cp->ring != ILO_CP_RING_RENDER)
687 return;
688
689 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
690
691 /* don't know why */
692 if (ilo->dev->gen >= ILO_GEN(7))
693 ilo_cp_flush(hw3d->cp);
694 }
695
696 static void
697 ilo_get_sample_position(struct pipe_context *pipe,
698 unsigned sample_count,
699 unsigned sample_index,
700 float *out_value)
701 {
702 struct ilo_context *ilo = ilo_context(pipe);
703 struct ilo_3d *hw3d = ilo->hw3d;
704
705 ilo_3d_pipeline_get_sample_position(hw3d->pipeline,
706 sample_count, sample_index,
707 &out_value[0], &out_value[1]);
708 }
709
710 /**
711 * Initialize 3D-related functions.
712 */
713 void
714 ilo_init_3d_functions(struct ilo_context *ilo)
715 {
716 ilo->base.draw_vbo = ilo_draw_vbo;
717 ilo->base.render_condition = ilo_render_condition;
718 ilo->base.texture_barrier = ilo_texture_barrier;
719 ilo->base.get_sample_position = ilo_get_sample_position;
720 }