2 * Mesa 3-D graphics library
4 * Copyright (C) 2012-2013 LunarG, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Chia-I Wu <olv@lunarg.com>
28 #include "util/u_prim.h"
29 #include "intel_winsys.h"
31 #include "ilo_3d_pipeline.h"
33 #include "ilo_context.h"
35 #include "ilo_query.h"
36 #include "ilo_shader.h"
37 #include "ilo_state.h"
41 process_query_for_occlusion_counter(struct ilo_3d
*hw3d
,
44 uint64_t *vals
, depth_count
= 0;
48 assert(q
->reg_read
% 2 == 0);
50 intel_bo_map(q
->bo
, false);
51 vals
= intel_bo_get_virtual(q
->bo
);
52 for (i
= 1; i
< q
->reg_read
; i
+= 2)
53 depth_count
+= vals
[i
] - vals
[i
- 1];
54 intel_bo_unmap(q
->bo
);
56 /* accumulate so that the query can be resumed if wanted */
57 q
->data
.u64
+= depth_count
;
62 timestamp_to_ns(uint64_t timestamp
)
64 /* see ilo_get_timestamp() */
65 return (timestamp
& 0xffffffff) * 80;
69 process_query_for_timestamp(struct ilo_3d
*hw3d
, struct ilo_query
*q
)
71 uint64_t *vals
, timestamp
;
73 assert(q
->reg_read
== 1);
75 intel_bo_map(q
->bo
, false);
76 vals
= intel_bo_get_virtual(q
->bo
);
78 intel_bo_unmap(q
->bo
);
80 q
->data
.u64
= timestamp_to_ns(timestamp
);
85 process_query_for_time_elapsed(struct ilo_3d
*hw3d
, struct ilo_query
*q
)
87 uint64_t *vals
, elapsed
= 0;
91 assert(q
->reg_read
% 2 == 0);
93 intel_bo_map(q
->bo
, false);
94 vals
= intel_bo_get_virtual(q
->bo
);
96 for (i
= 1; i
< q
->reg_read
; i
+= 2)
97 elapsed
+= vals
[i
] - vals
[i
- 1];
99 intel_bo_unmap(q
->bo
);
101 /* accumulate so that the query can be resumed if wanted */
102 q
->data
.u64
+= timestamp_to_ns(elapsed
);
107 ilo_3d_resume_queries(struct ilo_3d
*hw3d
)
111 /* resume occlusion queries */
112 LIST_FOR_EACH_ENTRY(q
, &hw3d
->occlusion_queries
, list
) {
113 /* accumulate the result if the bo is alreay full */
114 if (q
->reg_read
>= q
->reg_total
)
115 process_query_for_occlusion_counter(hw3d
, q
);
117 ilo_3d_pipeline_emit_write_depth_count(hw3d
->pipeline
,
118 q
->bo
, q
->reg_read
++);
121 /* resume timer queries */
122 LIST_FOR_EACH_ENTRY(q
, &hw3d
->time_elapsed_queries
, list
) {
123 /* accumulate the result if the bo is alreay full */
124 if (q
->reg_read
>= q
->reg_total
)
125 process_query_for_time_elapsed(hw3d
, q
);
127 ilo_3d_pipeline_emit_write_timestamp(hw3d
->pipeline
,
128 q
->bo
, q
->reg_read
++);
133 ilo_3d_pause_queries(struct ilo_3d
*hw3d
)
137 /* pause occlusion queries */
138 LIST_FOR_EACH_ENTRY(q
, &hw3d
->occlusion_queries
, list
) {
139 assert(q
->reg_read
< q
->reg_total
);
140 ilo_3d_pipeline_emit_write_depth_count(hw3d
->pipeline
,
141 q
->bo
, q
->reg_read
++);
144 /* pause timer queries */
145 LIST_FOR_EACH_ENTRY(q
, &hw3d
->time_elapsed_queries
, list
) {
146 assert(q
->reg_read
< q
->reg_total
);
147 ilo_3d_pipeline_emit_write_timestamp(hw3d
->pipeline
,
148 q
->bo
, q
->reg_read
++);
153 ilo_3d_release_render_ring(struct ilo_cp
*cp
, void *data
)
155 struct ilo_3d
*hw3d
= data
;
157 ilo_3d_pause_queries(hw3d
);
161 ilo_3d_own_render_ring(struct ilo_3d
*hw3d
)
163 ilo_cp_set_ring(hw3d
->cp
, ILO_CP_RING_RENDER
);
165 if (ilo_cp_set_owner(hw3d
->cp
, &hw3d
->owner
, hw3d
->owner_reserve
))
166 ilo_3d_resume_queries(hw3d
);
173 ilo_3d_begin_query(struct ilo_context
*ilo
, struct ilo_query
*q
)
175 struct ilo_3d
*hw3d
= ilo
->hw3d
;
177 ilo_3d_own_render_ring(hw3d
);
180 case PIPE_QUERY_OCCLUSION_COUNTER
:
181 /* reserve some space for pausing the query */
182 q
->reg_cmd_size
= ilo_3d_pipeline_estimate_size(hw3d
->pipeline
,
183 ILO_3D_PIPELINE_WRITE_DEPTH_COUNT
, NULL
);
184 hw3d
->owner_reserve
+= q
->reg_cmd_size
;
185 ilo_cp_set_owner(hw3d
->cp
, &hw3d
->owner
, hw3d
->owner_reserve
);
189 if (ilo_query_alloc_bo(q
, 2, -1, hw3d
->cp
->winsys
)) {
190 /* XXX we should check the aperture size */
191 ilo_3d_pipeline_emit_write_depth_count(hw3d
->pipeline
,
192 q
->bo
, q
->reg_read
++);
194 list_add(&q
->list
, &hw3d
->occlusion_queries
);
197 case PIPE_QUERY_TIMESTAMP
:
200 case PIPE_QUERY_TIME_ELAPSED
:
201 /* reserve some space for pausing the query */
202 q
->reg_cmd_size
= ilo_3d_pipeline_estimate_size(hw3d
->pipeline
,
203 ILO_3D_PIPELINE_WRITE_TIMESTAMP
, NULL
);
204 hw3d
->owner_reserve
+= q
->reg_cmd_size
;
205 ilo_cp_set_owner(hw3d
->cp
, &hw3d
->owner
, hw3d
->owner_reserve
);
209 if (ilo_query_alloc_bo(q
, 2, -1, hw3d
->cp
->winsys
)) {
210 /* XXX we should check the aperture size */
211 ilo_3d_pipeline_emit_write_timestamp(hw3d
->pipeline
,
212 q
->bo
, q
->reg_read
++);
214 list_add(&q
->list
, &hw3d
->time_elapsed_queries
);
217 case PIPE_QUERY_PRIMITIVES_GENERATED
:
219 list_add(&q
->list
, &hw3d
->prim_generated_queries
);
221 case PIPE_QUERY_PRIMITIVES_EMITTED
:
223 list_add(&q
->list
, &hw3d
->prim_emitted_queries
);
226 assert(!"unknown query type");
235 ilo_3d_end_query(struct ilo_context
*ilo
, struct ilo_query
*q
)
237 struct ilo_3d
*hw3d
= ilo
->hw3d
;
239 ilo_3d_own_render_ring(hw3d
);
242 case PIPE_QUERY_OCCLUSION_COUNTER
:
245 assert(q
->reg_read
< q
->reg_total
);
246 hw3d
->owner_reserve
-= q
->reg_cmd_size
;
247 ilo_cp_set_owner(hw3d
->cp
, &hw3d
->owner
, hw3d
->owner_reserve
);
248 ilo_3d_pipeline_emit_write_depth_count(hw3d
->pipeline
,
249 q
->bo
, q
->reg_read
++);
251 case PIPE_QUERY_TIMESTAMP
:
254 if (ilo_query_alloc_bo(q
, 1, 1, hw3d
->cp
->winsys
)) {
255 ilo_3d_pipeline_emit_write_timestamp(hw3d
->pipeline
,
256 q
->bo
, q
->reg_read
++);
259 case PIPE_QUERY_TIME_ELAPSED
:
262 assert(q
->reg_read
< q
->reg_total
);
263 hw3d
->owner_reserve
-= q
->reg_cmd_size
;
264 ilo_cp_set_owner(hw3d
->cp
, &hw3d
->owner
, hw3d
->owner_reserve
);
265 ilo_3d_pipeline_emit_write_timestamp(hw3d
->pipeline
,
266 q
->bo
, q
->reg_read
++);
268 case PIPE_QUERY_PRIMITIVES_GENERATED
:
269 case PIPE_QUERY_PRIMITIVES_EMITTED
:
273 assert(!"unknown query type");
279 * Process the raw query data.
282 ilo_3d_process_query(struct ilo_context
*ilo
, struct ilo_query
*q
)
284 struct ilo_3d
*hw3d
= ilo
->hw3d
;
287 case PIPE_QUERY_OCCLUSION_COUNTER
:
289 process_query_for_occlusion_counter(hw3d
, q
);
291 case PIPE_QUERY_TIMESTAMP
:
293 process_query_for_timestamp(hw3d
, q
);
295 case PIPE_QUERY_TIME_ELAPSED
:
297 process_query_for_time_elapsed(hw3d
, q
);
299 case PIPE_QUERY_PRIMITIVES_GENERATED
:
300 case PIPE_QUERY_PRIMITIVES_EMITTED
:
303 assert(!"unknown query type");
309 * Hook for CP new-batch.
312 ilo_3d_cp_flushed(struct ilo_3d
*hw3d
)
314 if (ilo_debug
& ILO_DEBUG_3D
)
315 ilo_3d_pipeline_dump(hw3d
->pipeline
);
317 /* invalidate the pipeline */
318 ilo_3d_pipeline_invalidate(hw3d
->pipeline
,
319 ILO_3D_PIPELINE_INVALIDATE_BATCH_BO
|
320 ILO_3D_PIPELINE_INVALIDATE_STATE_BO
);
321 if (!hw3d
->cp
->render_ctx
) {
322 ilo_3d_pipeline_invalidate(hw3d
->pipeline
,
323 ILO_3D_PIPELINE_INVALIDATE_HW
);
326 hw3d
->new_batch
= true;
330 * Create a 3D context.
333 ilo_3d_create(struct ilo_cp
*cp
, const struct ilo_dev_info
*dev
)
337 hw3d
= CALLOC_STRUCT(ilo_3d
);
342 hw3d
->owner
.release_callback
= ilo_3d_release_render_ring
;
343 hw3d
->owner
.release_data
= hw3d
;
345 hw3d
->new_batch
= true;
347 list_inithead(&hw3d
->occlusion_queries
);
348 list_inithead(&hw3d
->time_elapsed_queries
);
349 list_inithead(&hw3d
->prim_generated_queries
);
350 list_inithead(&hw3d
->prim_emitted_queries
);
352 hw3d
->pipeline
= ilo_3d_pipeline_create(cp
, dev
);
353 if (!hw3d
->pipeline
) {
362 * Destroy a 3D context.
365 ilo_3d_destroy(struct ilo_3d
*hw3d
)
367 ilo_3d_pipeline_destroy(hw3d
->pipeline
);
370 intel_bo_unreference(hw3d
->kernel
.bo
);
376 draw_vbo(struct ilo_3d
*hw3d
, const struct ilo_context
*ilo
,
377 int *prim_generated
, int *prim_emitted
)
379 bool need_flush
= false;
382 ilo_3d_own_render_ring(hw3d
);
384 if (!hw3d
->new_batch
) {
386 * Without a better tracking mechanism, when the framebuffer changes, we
387 * have to assume that the old framebuffer may be sampled from. If that
388 * happens in the middle of a batch buffer, we need to insert manual
391 need_flush
= (ilo
->dirty
& ILO_DIRTY_FB
);
393 /* same to SO target changes */
394 need_flush
|= (ilo
->dirty
& ILO_DIRTY_SO
);
397 /* make sure there is enough room first */
398 max_len
= ilo_3d_pipeline_estimate_size(hw3d
->pipeline
,
399 ILO_3D_PIPELINE_DRAW
, ilo
);
401 max_len
+= ilo_3d_pipeline_estimate_size(hw3d
->pipeline
,
402 ILO_3D_PIPELINE_FLUSH
, NULL
);
405 if (max_len
> ilo_cp_space(hw3d
->cp
)) {
406 ilo_cp_flush(hw3d
->cp
, "out of space");
408 assert(max_len
<= ilo_cp_space(hw3d
->cp
));
412 ilo_3d_pipeline_emit_flush(hw3d
->pipeline
);
414 return ilo_3d_pipeline_emit_draw(hw3d
->pipeline
, ilo
,
415 prim_generated
, prim_emitted
);
419 update_prim_count(struct ilo_3d
*hw3d
, int generated
, int emitted
)
423 LIST_FOR_EACH_ENTRY(q
, &hw3d
->prim_generated_queries
, list
)
424 q
->data
.u64
+= generated
;
426 LIST_FOR_EACH_ENTRY(q
, &hw3d
->prim_emitted_queries
, list
)
427 q
->data
.u64
+= emitted
;
431 ilo_3d_pass_render_condition(struct ilo_context
*ilo
)
433 struct ilo_3d
*hw3d
= ilo
->hw3d
;
437 if (!hw3d
->render_condition
.query
)
440 switch (hw3d
->render_condition
.mode
) {
441 case PIPE_RENDER_COND_WAIT
:
442 case PIPE_RENDER_COND_BY_REGION_WAIT
:
445 case PIPE_RENDER_COND_NO_WAIT
:
446 case PIPE_RENDER_COND_BY_REGION_NO_WAIT
:
452 if (ilo
->base
.get_query_result(&ilo
->base
, hw3d
->render_condition
.query
,
453 wait
, (union pipe_query_result
*) &result
))
454 return (!result
== hw3d
->render_condition
.cond
);
459 #define UPDATE_MIN2(a, b) (a) = MIN2((a), (b))
460 #define UPDATE_MAX2(a, b) (a) = MAX2((a), (b))
463 * \see find_sub_primitives() from core mesa
466 ilo_find_sub_primitives(const void *elements
, unsigned element_size
,
467 const struct pipe_draw_info
*orig_info
,
468 struct pipe_draw_info
*info
)
470 const unsigned max_prims
= orig_info
->count
- orig_info
->start
;
471 unsigned i
, cur_start
, cur_count
;
475 cur_start
= orig_info
->start
;
479 #define IB_INDEX_READ(TYPE, INDEX) (((const TYPE *) elements)[INDEX])
481 #define SCAN_ELEMENTS(TYPE) \
482 info[scan_num] = *orig_info; \
483 info[scan_num].primitive_restart = false; \
484 for (i = orig_info->start; i < orig_info->count; i++) { \
485 scan_index = IB_INDEX_READ(TYPE, i); \
486 if (scan_index == orig_info->restart_index) { \
487 if (cur_count > 0) { \
488 assert(scan_num < max_prims); \
489 info[scan_num].start = cur_start; \
490 info[scan_num].count = cur_count; \
492 info[scan_num] = *orig_info; \
493 info[scan_num].primitive_restart = false; \
499 UPDATE_MIN2(info[scan_num].min_index, scan_index); \
500 UPDATE_MAX2(info[scan_num].max_index, scan_index); \
504 if (cur_count > 0) { \
505 assert(scan_num < max_prims); \
506 info[scan_num].start = cur_start; \
507 info[scan_num].count = cur_count; \
511 switch (element_size
) {
513 SCAN_ELEMENTS(uint8_t);
516 SCAN_ELEMENTS(uint16_t);
519 SCAN_ELEMENTS(uint32_t);
522 assert(0 && "bad index_size in find_sub_primitives()");
531 ilo_check_restart_index(const struct ilo_context
*ilo
, unsigned restart_index
)
534 * Haswell (GEN(7.5)) supports an arbitrary cut index, check everything
537 if (ilo
->dev
->gen
>= ILO_GEN(7.5))
540 /* Note: indices must be unsigned byte, unsigned short or unsigned int */
541 switch (ilo
->ib
.index_size
) {
543 return ((restart_index
& 0xff) == 0xff);
546 return ((restart_index
& 0xffff) == 0xffff);
549 return (restart_index
== 0xffffffff);
556 ilo_check_restart_prim_type(const struct ilo_context
*ilo
, unsigned prim
)
559 case PIPE_PRIM_POINTS
:
560 case PIPE_PRIM_LINES
:
561 case PIPE_PRIM_LINE_STRIP
:
562 case PIPE_PRIM_TRIANGLES
:
563 case PIPE_PRIM_TRIANGLE_STRIP
:
564 /* All 965 GEN graphics support a cut index for these primitive types */
568 case PIPE_PRIM_LINE_LOOP
:
569 case PIPE_PRIM_POLYGON
:
570 case PIPE_PRIM_QUAD_STRIP
:
571 case PIPE_PRIM_QUADS
:
572 case PIPE_PRIM_TRIANGLE_FAN
:
573 if (ilo
->dev
->gen
>= ILO_GEN(7.5)) {
574 /* Haswell and newer parts can handle these prim types. */
584 * Handle VBOs using primitive restart.
585 * Verify that restart index and primitive type can be handled by the HW.
586 * Return true if this routine did the rendering
587 * Return false if this routine did NOT render because restart can be handled
591 ilo_draw_vbo_with_sw_restart(struct pipe_context
*pipe
,
592 const struct pipe_draw_info
*info
)
594 struct ilo_context
*ilo
= ilo_context(pipe
);
595 struct pipe_draw_info
*restart_info
= NULL
;
596 int sub_prim_count
= 1;
599 * We have to break up the primitive into chunks manually
600 * Worst case, every other index could be a restart index so
601 * need to have space for that many primitives
603 restart_info
= MALLOC(((info
->count
+ 1) / 2) * sizeof(*info
));
604 if (NULL
== restart_info
) {
605 /* If we can't get memory for this, bail out */
606 ilo_err("%s:%d - Out of memory", __FILE__
, __LINE__
);
610 if (ilo
->ib
.buffer
) {
611 struct pipe_transfer
*transfer
;
614 map
= pipe_buffer_map(pipe
, ilo
->ib
.buffer
,
615 PIPE_TRANSFER_READ
, &transfer
);
617 sub_prim_count
= ilo_find_sub_primitives(map
+ ilo
->ib
.offset
,
618 ilo
->ib
.index_size
, info
, restart_info
);
620 pipe_buffer_unmap(pipe
, transfer
);
623 sub_prim_count
= ilo_find_sub_primitives(ilo
->ib
.user_buffer
,
624 ilo
->ib
.index_size
, info
, restart_info
);
629 while (sub_prim_count
> 0) {
630 pipe
->draw_vbo(pipe
, info
);
640 upload_shaders(struct ilo_3d
*hw3d
, struct ilo_shader_cache
*shc
)
642 bool incremental
= true;
645 upload
= ilo_shader_cache_upload(shc
,
646 NULL
, hw3d
->kernel
.used
, incremental
);
651 * Allocate a new bo. When this is a new batch, assume the bo is still in
652 * use by the previous batch and force allocation.
654 * Does it help to make shader cache upload with unsynchronized mapping,
655 * and remove the check for new batch here?
657 if (hw3d
->kernel
.used
+ upload
> hw3d
->kernel
.size
|| hw3d
->new_batch
) {
658 unsigned new_size
= (hw3d
->kernel
.size
) ?
659 hw3d
->kernel
.size
: (8 * 1024);
661 while (hw3d
->kernel
.used
+ upload
> new_size
)
665 intel_bo_unreference(hw3d
->kernel
.bo
);
667 hw3d
->kernel
.bo
= intel_winsys_alloc_buffer(hw3d
->cp
->winsys
,
668 "kernel bo", new_size
, 0);
669 if (!hw3d
->kernel
.bo
) {
670 ilo_err("failed to allocate kernel bo\n");
674 hw3d
->kernel
.used
= 0;
675 hw3d
->kernel
.size
= new_size
;
678 assert(new_size
>= ilo_shader_cache_upload(shc
,
679 NULL
, hw3d
->kernel
.used
, incremental
));
681 ilo_3d_pipeline_invalidate(hw3d
->pipeline
,
682 ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO
);
685 upload
= ilo_shader_cache_upload(shc
,
686 hw3d
->kernel
.bo
, hw3d
->kernel
.used
, incremental
);
688 ilo_err("failed to upload shaders\n");
692 hw3d
->kernel
.used
+= upload
;
694 assert(hw3d
->kernel
.used
<= hw3d
->kernel
.size
);
700 ilo_draw_vbo(struct pipe_context
*pipe
, const struct pipe_draw_info
*info
)
702 struct ilo_context
*ilo
= ilo_context(pipe
);
703 struct ilo_3d
*hw3d
= ilo
->hw3d
;
704 int prim_generated
, prim_emitted
;
706 if (ilo_debug
& ILO_DEBUG_DRAW
) {
708 ilo_printf("indexed draw %s: "
709 "index start %d, count %d, vertex range [%d, %d]\n",
710 u_prim_name(info
->mode
), info
->start
, info
->count
,
711 info
->min_index
, info
->max_index
);
714 ilo_printf("draw %s: vertex start %d, count %d\n",
715 u_prim_name(info
->mode
), info
->start
, info
->count
);
718 ilo_dump_dirty_flags(ilo
->dirty
);
721 if (!ilo_3d_pass_render_condition(ilo
))
724 if (info
->primitive_restart
&& info
->indexed
) {
726 * Want to draw an indexed primitive using primitive restart
727 * Check that HW can handle the request and fall to SW if not.
729 if (!ilo_check_restart_index(ilo
, info
->restart_index
) ||
730 !ilo_check_restart_prim_type(ilo
, info
->mode
)) {
731 ilo_draw_vbo_with_sw_restart(pipe
, info
);
736 ilo_finalize_3d_states(ilo
, info
);
738 if (!upload_shaders(hw3d
, ilo
->shader_cache
))
741 ilo_blit_resolve_framebuffer(ilo
);
743 /* If draw_vbo ever fails, return immediately. */
744 if (!draw_vbo(hw3d
, ilo
, &prim_generated
, &prim_emitted
))
747 /* clear dirty status */
749 hw3d
->new_batch
= false;
751 /* avoid dangling pointer reference */
754 update_prim_count(hw3d
, prim_generated
, prim_emitted
);
756 if (ilo_debug
& ILO_DEBUG_NOCACHE
)
757 ilo_3d_pipeline_emit_flush(hw3d
->pipeline
);
761 ilo_render_condition(struct pipe_context
*pipe
,
762 struct pipe_query
*query
,
766 struct ilo_context
*ilo
= ilo_context(pipe
);
767 struct ilo_3d
*hw3d
= ilo
->hw3d
;
769 /* reference count? */
770 hw3d
->render_condition
.query
= query
;
771 hw3d
->render_condition
.mode
= mode
;
772 hw3d
->render_condition
.cond
= condition
;
776 ilo_texture_barrier(struct pipe_context
*pipe
)
778 struct ilo_context
*ilo
= ilo_context(pipe
);
779 struct ilo_3d
*hw3d
= ilo
->hw3d
;
781 if (ilo
->cp
->ring
!= ILO_CP_RING_RENDER
)
784 ilo_3d_pipeline_emit_flush(hw3d
->pipeline
);
787 if (ilo
->dev
->gen
>= ILO_GEN(7))
788 ilo_cp_flush(hw3d
->cp
, "texture barrier");
792 ilo_get_sample_position(struct pipe_context
*pipe
,
793 unsigned sample_count
,
794 unsigned sample_index
,
797 struct ilo_context
*ilo
= ilo_context(pipe
);
798 struct ilo_3d
*hw3d
= ilo
->hw3d
;
800 ilo_3d_pipeline_get_sample_position(hw3d
->pipeline
,
801 sample_count
, sample_index
,
802 &out_value
[0], &out_value
[1]);
806 * Initialize 3D-related functions.
809 ilo_init_3d_functions(struct ilo_context
*ilo
)
811 ilo
->base
.draw_vbo
= ilo_draw_vbo
;
812 ilo
->base
.render_condition
= ilo_render_condition
;
813 ilo
->base
.texture_barrier
= ilo_texture_barrier
;
814 ilo
->base
.get_sample_position
= ilo_get_sample_position
;