2 * Mesa 3-D graphics library
4 * Copyright (C) 2012-2013 LunarG, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Chia-I Wu <olv@lunarg.com>
28 #include "util/u_prim.h"
29 #include "intel_winsys.h"
31 #include "ilo_3d_pipeline.h"
33 #include "ilo_context.h"
35 #include "ilo_query.h"
36 #include "ilo_shader.h"
37 #include "ilo_state.h"
41 process_query_for_occlusion_counter(struct ilo_3d
*hw3d
,
44 uint64_t *vals
, depth_count
= 0;
48 assert(q
->reg_read
% 2 == 0);
50 vals
= intel_bo_map(q
->bo
, false);
51 for (i
= 1; i
< q
->reg_read
; i
+= 2)
52 depth_count
+= vals
[i
] - vals
[i
- 1];
53 intel_bo_unmap(q
->bo
);
55 /* accumulate so that the query can be resumed if wanted */
56 q
->data
.u64
+= depth_count
;
61 timestamp_to_ns(uint64_t timestamp
)
63 /* see ilo_get_timestamp() */
64 return (timestamp
& 0xffffffff) * 80;
68 process_query_for_timestamp(struct ilo_3d
*hw3d
, struct ilo_query
*q
)
70 uint64_t *vals
, timestamp
;
72 assert(q
->reg_read
== 1);
74 vals
= intel_bo_map(q
->bo
, false);
76 intel_bo_unmap(q
->bo
);
78 q
->data
.u64
= timestamp_to_ns(timestamp
);
83 process_query_for_time_elapsed(struct ilo_3d
*hw3d
, struct ilo_query
*q
)
85 uint64_t *vals
, elapsed
= 0;
89 assert(q
->reg_read
% 2 == 0);
91 vals
= intel_bo_map(q
->bo
, false);
93 for (i
= 1; i
< q
->reg_read
; i
+= 2)
94 elapsed
+= vals
[i
] - vals
[i
- 1];
96 intel_bo_unmap(q
->bo
);
98 /* accumulate so that the query can be resumed if wanted */
99 q
->data
.u64
+= timestamp_to_ns(elapsed
);
104 process_query_for_pipeline_statistics(struct ilo_3d
*hw3d
,
107 const uint64_t *vals
;
110 assert(q
->reg_read
% 22 == 0);
112 vals
= intel_bo_map(q
->bo
, false);
114 for (i
= 0; i
< q
->reg_read
; i
+= 22) {
115 struct pipe_query_data_pipeline_statistics
*stats
=
116 &q
->data
.pipeline_statistics
;
117 const uint64_t *begin
= vals
+ i
;
118 const uint64_t *end
= begin
+ 11;
120 stats
->ia_vertices
+= end
[0] - begin
[0];
121 stats
->ia_primitives
+= end
[1] - begin
[1];
122 stats
->vs_invocations
+= end
[2] - begin
[2];
123 stats
->gs_invocations
+= end
[3] - begin
[3];
124 stats
->gs_primitives
+= end
[4] - begin
[4];
125 stats
->c_invocations
+= end
[5] - begin
[5];
126 stats
->c_primitives
+= end
[6] - begin
[6];
127 stats
->ps_invocations
+= end
[7] - begin
[7];
128 stats
->hs_invocations
+= end
[8] - begin
[8];
129 stats
->ds_invocations
+= end
[9] - begin
[9];
130 stats
->cs_invocations
+= end
[10] - begin
[10];
133 intel_bo_unmap(q
->bo
);
139 ilo_3d_resume_queries(struct ilo_3d
*hw3d
)
143 /* resume occlusion queries */
144 LIST_FOR_EACH_ENTRY(q
, &hw3d
->occlusion_queries
, list
) {
145 /* accumulate the result if the bo is alreay full */
146 if (q
->reg_read
>= q
->reg_total
)
147 process_query_for_occlusion_counter(hw3d
, q
);
149 ilo_3d_pipeline_emit_write_depth_count(hw3d
->pipeline
,
150 q
->bo
, q
->reg_read
++);
153 /* resume timer queries */
154 LIST_FOR_EACH_ENTRY(q
, &hw3d
->time_elapsed_queries
, list
) {
155 /* accumulate the result if the bo is alreay full */
156 if (q
->reg_read
>= q
->reg_total
)
157 process_query_for_time_elapsed(hw3d
, q
);
159 ilo_3d_pipeline_emit_write_timestamp(hw3d
->pipeline
,
160 q
->bo
, q
->reg_read
++);
163 /* resume pipeline statistics queries */
164 LIST_FOR_EACH_ENTRY(q
, &hw3d
->pipeline_statistics_queries
, list
) {
165 /* accumulate the result if the bo is alreay full */
166 if (q
->reg_read
>= q
->reg_total
)
167 process_query_for_pipeline_statistics(hw3d
, q
);
169 ilo_3d_pipeline_emit_write_statistics(hw3d
->pipeline
,
176 ilo_3d_pause_queries(struct ilo_3d
*hw3d
)
180 /* pause occlusion queries */
181 LIST_FOR_EACH_ENTRY(q
, &hw3d
->occlusion_queries
, list
) {
182 assert(q
->reg_read
< q
->reg_total
);
183 ilo_3d_pipeline_emit_write_depth_count(hw3d
->pipeline
,
184 q
->bo
, q
->reg_read
++);
187 /* pause timer queries */
188 LIST_FOR_EACH_ENTRY(q
, &hw3d
->time_elapsed_queries
, list
) {
189 assert(q
->reg_read
< q
->reg_total
);
190 ilo_3d_pipeline_emit_write_timestamp(hw3d
->pipeline
,
191 q
->bo
, q
->reg_read
++);
194 /* pause pipeline statistics queries */
195 LIST_FOR_EACH_ENTRY(q
, &hw3d
->pipeline_statistics_queries
, list
) {
196 assert(q
->reg_read
< q
->reg_total
);
197 ilo_3d_pipeline_emit_write_statistics(hw3d
->pipeline
,
204 ilo_3d_own_render_ring(struct ilo_3d
*hw3d
)
206 ilo_cp_set_owner(hw3d
->cp
, INTEL_RING_RENDER
, &hw3d
->owner
);
210 ilo_3d_reserve_for_query(struct ilo_3d
*hw3d
, struct ilo_query
*q
,
211 enum ilo_3d_pipeline_action act
)
213 q
->reg_cmd_size
= ilo_3d_pipeline_estimate_size(hw3d
->pipeline
, act
, NULL
);
215 /* XXX we should check the aperture size */
216 if (ilo_cp_space(hw3d
->cp
) < q
->reg_cmd_size
* 2) {
217 ilo_cp_submit(hw3d
->cp
, "out of space");
218 assert(ilo_cp_space(hw3d
->cp
) >= q
->reg_cmd_size
* 2);
221 /* reserve space for pausing the query */
222 hw3d
->owner
.reserve
+= q
->reg_cmd_size
;
229 ilo_3d_begin_query(struct ilo_context
*ilo
, struct ilo_query
*q
)
231 struct ilo_3d
*hw3d
= ilo
->hw3d
;
233 ilo_3d_own_render_ring(hw3d
);
236 case PIPE_QUERY_OCCLUSION_COUNTER
:
237 ilo_3d_reserve_for_query(hw3d
, q
, ILO_3D_PIPELINE_WRITE_DEPTH_COUNT
);
240 if (ilo_query_alloc_bo(q
, 2, -1, hw3d
->cp
->winsys
)) {
241 ilo_3d_pipeline_emit_write_depth_count(hw3d
->pipeline
,
242 q
->bo
, q
->reg_read
++);
244 list_add(&q
->list
, &hw3d
->occlusion_queries
);
247 case PIPE_QUERY_TIMESTAMP
:
250 case PIPE_QUERY_TIME_ELAPSED
:
251 ilo_3d_reserve_for_query(hw3d
, q
, ILO_3D_PIPELINE_WRITE_TIMESTAMP
);
254 if (ilo_query_alloc_bo(q
, 2, -1, hw3d
->cp
->winsys
)) {
255 ilo_3d_pipeline_emit_write_timestamp(hw3d
->pipeline
,
256 q
->bo
, q
->reg_read
++);
258 list_add(&q
->list
, &hw3d
->time_elapsed_queries
);
261 case PIPE_QUERY_PRIMITIVES_GENERATED
:
263 list_add(&q
->list
, &hw3d
->prim_generated_queries
);
265 case PIPE_QUERY_PRIMITIVES_EMITTED
:
267 list_add(&q
->list
, &hw3d
->prim_emitted_queries
);
269 case PIPE_QUERY_PIPELINE_STATISTICS
:
270 ilo_3d_reserve_for_query(hw3d
, q
, ILO_3D_PIPELINE_WRITE_STATISTICS
);
271 memset(&q
->data
.pipeline_statistics
, 0,
272 sizeof(q
->data
.pipeline_statistics
));
274 if (ilo_query_alloc_bo(q
, 11 * 2, -1, hw3d
->cp
->winsys
)) {
275 ilo_3d_pipeline_emit_write_statistics(hw3d
->pipeline
,
279 list_add(&q
->list
, &hw3d
->pipeline_statistics_queries
);
283 assert(!"unknown query type");
292 ilo_3d_end_query(struct ilo_context
*ilo
, struct ilo_query
*q
)
294 struct ilo_3d
*hw3d
= ilo
->hw3d
;
296 ilo_3d_own_render_ring(hw3d
);
299 case PIPE_QUERY_OCCLUSION_COUNTER
:
302 assert(q
->reg_read
< q
->reg_total
);
303 assert(hw3d
->owner
.reserve
>= q
->reg_cmd_size
);
304 hw3d
->owner
.reserve
-= q
->reg_cmd_size
;
306 ilo_3d_pipeline_emit_write_depth_count(hw3d
->pipeline
,
307 q
->bo
, q
->reg_read
++);
309 case PIPE_QUERY_TIMESTAMP
:
312 if (ilo_query_alloc_bo(q
, 1, 1, hw3d
->cp
->winsys
)) {
313 ilo_3d_pipeline_emit_write_timestamp(hw3d
->pipeline
,
314 q
->bo
, q
->reg_read
++);
317 case PIPE_QUERY_TIME_ELAPSED
:
320 assert(q
->reg_read
< q
->reg_total
);
321 assert(hw3d
->owner
.reserve
>= q
->reg_cmd_size
);
322 hw3d
->owner
.reserve
-= q
->reg_cmd_size
;
324 ilo_3d_pipeline_emit_write_timestamp(hw3d
->pipeline
,
325 q
->bo
, q
->reg_read
++);
327 case PIPE_QUERY_PRIMITIVES_GENERATED
:
328 case PIPE_QUERY_PRIMITIVES_EMITTED
:
331 case PIPE_QUERY_PIPELINE_STATISTICS
:
334 assert(q
->reg_read
+ 11 <= q
->reg_total
);
335 assert(hw3d
->owner
.reserve
>= q
->reg_cmd_size
);
336 hw3d
->owner
.reserve
-= q
->reg_cmd_size
;
338 ilo_3d_pipeline_emit_write_statistics(hw3d
->pipeline
,
343 assert(!"unknown query type");
349 * Process the raw query data.
352 ilo_3d_process_query(struct ilo_context
*ilo
, struct ilo_query
*q
)
354 struct ilo_3d
*hw3d
= ilo
->hw3d
;
357 case PIPE_QUERY_OCCLUSION_COUNTER
:
359 process_query_for_occlusion_counter(hw3d
, q
);
361 case PIPE_QUERY_TIMESTAMP
:
363 process_query_for_timestamp(hw3d
, q
);
365 case PIPE_QUERY_TIME_ELAPSED
:
367 process_query_for_time_elapsed(hw3d
, q
);
369 case PIPE_QUERY_PRIMITIVES_GENERATED
:
370 case PIPE_QUERY_PRIMITIVES_EMITTED
:
372 case PIPE_QUERY_PIPELINE_STATISTICS
:
374 process_query_for_pipeline_statistics(hw3d
, q
);
377 assert(!"unknown query type");
383 * Hook for CP new-batch.
386 ilo_3d_cp_submitted(struct ilo_3d
*hw3d
)
388 /* invalidate the pipeline */
389 ilo_3d_pipeline_invalidate(hw3d
->pipeline
,
390 ILO_3D_PIPELINE_INVALIDATE_BATCH_BO
|
391 ILO_3D_PIPELINE_INVALIDATE_STATE_BO
|
392 ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO
);
394 hw3d
->new_batch
= true;
398 ilo_3d_own_cp(struct ilo_cp
*cp
, void *data
)
400 struct ilo_3d
*hw3d
= data
;
402 ilo_3d_resume_queries(hw3d
);
406 ilo_3d_release_cp(struct ilo_cp
*cp
, void *data
)
408 struct ilo_3d
*hw3d
= data
;
410 ilo_3d_pause_queries(hw3d
);
414 * Create a 3D context.
417 ilo_3d_create(struct ilo_cp
*cp
, const struct ilo_dev_info
*dev
)
421 hw3d
= CALLOC_STRUCT(ilo_3d
);
426 hw3d
->owner
.own
= ilo_3d_own_cp
;
427 hw3d
->owner
.release
= ilo_3d_release_cp
;
428 hw3d
->owner
.data
= hw3d
;
429 hw3d
->owner
.reserve
= 0;
431 hw3d
->new_batch
= true;
433 list_inithead(&hw3d
->occlusion_queries
);
434 list_inithead(&hw3d
->time_elapsed_queries
);
435 list_inithead(&hw3d
->prim_generated_queries
);
436 list_inithead(&hw3d
->prim_emitted_queries
);
437 list_inithead(&hw3d
->pipeline_statistics_queries
);
439 hw3d
->pipeline
= ilo_3d_pipeline_create(cp
, dev
);
440 if (!hw3d
->pipeline
) {
449 * Destroy a 3D context.
452 ilo_3d_destroy(struct ilo_3d
*hw3d
)
454 ilo_3d_pipeline_destroy(hw3d
->pipeline
);
459 draw_vbo(struct ilo_3d
*hw3d
, const struct ilo_context
*ilo
,
460 int *prim_generated
, int *prim_emitted
)
462 bool need_flush
= false;
465 ilo_3d_own_render_ring(hw3d
);
467 if (!hw3d
->new_batch
) {
469 * Without a better tracking mechanism, when the framebuffer changes, we
470 * have to assume that the old framebuffer may be sampled from. If that
471 * happens in the middle of a batch buffer, we need to insert manual
474 need_flush
= (ilo
->dirty
& ILO_DIRTY_FB
);
476 /* same to SO target changes */
477 need_flush
|= (ilo
->dirty
& ILO_DIRTY_SO
);
480 /* make sure there is enough room first */
481 max_len
= ilo_3d_pipeline_estimate_size(hw3d
->pipeline
,
482 ILO_3D_PIPELINE_DRAW
, ilo
);
484 max_len
+= ilo_3d_pipeline_estimate_size(hw3d
->pipeline
,
485 ILO_3D_PIPELINE_FLUSH
, NULL
);
488 if (max_len
> ilo_cp_space(hw3d
->cp
)) {
489 ilo_cp_submit(hw3d
->cp
, "out of space");
491 assert(max_len
<= ilo_cp_space(hw3d
->cp
));
495 ilo_3d_pipeline_emit_flush(hw3d
->pipeline
);
497 return ilo_3d_pipeline_emit_draw(hw3d
->pipeline
, ilo
,
498 prim_generated
, prim_emitted
);
502 update_prim_count(struct ilo_3d
*hw3d
, int generated
, int emitted
)
506 LIST_FOR_EACH_ENTRY(q
, &hw3d
->prim_generated_queries
, list
)
507 q
->data
.u64
+= generated
;
509 LIST_FOR_EACH_ENTRY(q
, &hw3d
->prim_emitted_queries
, list
)
510 q
->data
.u64
+= emitted
;
514 ilo_3d_pass_render_condition(struct ilo_context
*ilo
)
516 struct ilo_3d
*hw3d
= ilo
->hw3d
;
520 if (!hw3d
->render_condition
.query
)
523 switch (hw3d
->render_condition
.mode
) {
524 case PIPE_RENDER_COND_WAIT
:
525 case PIPE_RENDER_COND_BY_REGION_WAIT
:
528 case PIPE_RENDER_COND_NO_WAIT
:
529 case PIPE_RENDER_COND_BY_REGION_NO_WAIT
:
535 if (ilo
->base
.get_query_result(&ilo
->base
, hw3d
->render_condition
.query
,
536 wait
, (union pipe_query_result
*) &result
))
537 return (!result
== hw3d
->render_condition
.cond
);
542 #define UPDATE_MIN2(a, b) (a) = MIN2((a), (b))
543 #define UPDATE_MAX2(a, b) (a) = MAX2((a), (b))
546 * \see find_sub_primitives() from core mesa
549 ilo_find_sub_primitives(const void *elements
, unsigned element_size
,
550 const struct pipe_draw_info
*orig_info
,
551 struct pipe_draw_info
*info
)
553 const unsigned max_prims
= orig_info
->count
- orig_info
->start
;
554 unsigned i
, cur_start
, cur_count
;
558 cur_start
= orig_info
->start
;
562 #define IB_INDEX_READ(TYPE, INDEX) (((const TYPE *) elements)[INDEX])
564 #define SCAN_ELEMENTS(TYPE) \
565 info[scan_num] = *orig_info; \
566 info[scan_num].primitive_restart = false; \
567 for (i = orig_info->start; i < orig_info->count; i++) { \
568 scan_index = IB_INDEX_READ(TYPE, i); \
569 if (scan_index == orig_info->restart_index) { \
570 if (cur_count > 0) { \
571 assert(scan_num < max_prims); \
572 info[scan_num].start = cur_start; \
573 info[scan_num].count = cur_count; \
575 info[scan_num] = *orig_info; \
576 info[scan_num].primitive_restart = false; \
582 UPDATE_MIN2(info[scan_num].min_index, scan_index); \
583 UPDATE_MAX2(info[scan_num].max_index, scan_index); \
587 if (cur_count > 0) { \
588 assert(scan_num < max_prims); \
589 info[scan_num].start = cur_start; \
590 info[scan_num].count = cur_count; \
594 switch (element_size
) {
596 SCAN_ELEMENTS(uint8_t);
599 SCAN_ELEMENTS(uint16_t);
602 SCAN_ELEMENTS(uint32_t);
605 assert(0 && "bad index_size in find_sub_primitives()");
614 ilo_check_restart_index(const struct ilo_context
*ilo
, unsigned restart_index
)
617 * Haswell (GEN(7.5)) supports an arbitrary cut index, check everything
620 if (ilo_dev_gen(ilo
->dev
) >= ILO_GEN(7.5))
623 /* Note: indices must be unsigned byte, unsigned short or unsigned int */
624 switch (ilo
->ib
.index_size
) {
626 return ((restart_index
& 0xff) == 0xff);
629 return ((restart_index
& 0xffff) == 0xffff);
632 return (restart_index
== 0xffffffff);
639 ilo_check_restart_prim_type(const struct ilo_context
*ilo
, unsigned prim
)
642 case PIPE_PRIM_POINTS
:
643 case PIPE_PRIM_LINES
:
644 case PIPE_PRIM_LINE_STRIP
:
645 case PIPE_PRIM_TRIANGLES
:
646 case PIPE_PRIM_TRIANGLE_STRIP
:
647 /* All 965 GEN graphics support a cut index for these primitive types */
651 case PIPE_PRIM_LINE_LOOP
:
652 case PIPE_PRIM_POLYGON
:
653 case PIPE_PRIM_QUAD_STRIP
:
654 case PIPE_PRIM_QUADS
:
655 case PIPE_PRIM_TRIANGLE_FAN
:
656 if (ilo_dev_gen(ilo
->dev
) >= ILO_GEN(7.5)) {
657 /* Haswell and newer parts can handle these prim types. */
667 * Handle VBOs using primitive restart.
668 * Verify that restart index and primitive type can be handled by the HW.
669 * Return true if this routine did the rendering
670 * Return false if this routine did NOT render because restart can be handled
674 ilo_draw_vbo_with_sw_restart(struct pipe_context
*pipe
,
675 const struct pipe_draw_info
*info
)
677 struct ilo_context
*ilo
= ilo_context(pipe
);
678 struct pipe_draw_info
*restart_info
= NULL
;
679 int sub_prim_count
= 1;
682 * We have to break up the primitive into chunks manually
683 * Worst case, every other index could be a restart index so
684 * need to have space for that many primitives
686 restart_info
= MALLOC(((info
->count
+ 1) / 2) * sizeof(*info
));
687 if (NULL
== restart_info
) {
688 /* If we can't get memory for this, bail out */
689 ilo_err("%s:%d - Out of memory", __FILE__
, __LINE__
);
693 if (ilo
->ib
.buffer
) {
694 struct pipe_transfer
*transfer
;
697 map
= pipe_buffer_map(pipe
, ilo
->ib
.buffer
,
698 PIPE_TRANSFER_READ
, &transfer
);
700 sub_prim_count
= ilo_find_sub_primitives(map
+ ilo
->ib
.offset
,
701 ilo
->ib
.index_size
, info
, restart_info
);
703 pipe_buffer_unmap(pipe
, transfer
);
706 sub_prim_count
= ilo_find_sub_primitives(ilo
->ib
.user_buffer
,
707 ilo
->ib
.index_size
, info
, restart_info
);
712 while (sub_prim_count
> 0) {
713 pipe
->draw_vbo(pipe
, info
);
723 ilo_draw_vbo(struct pipe_context
*pipe
, const struct pipe_draw_info
*info
)
725 struct ilo_context
*ilo
= ilo_context(pipe
);
726 struct ilo_3d
*hw3d
= ilo
->hw3d
;
727 int prim_generated
, prim_emitted
;
729 if (ilo_debug
& ILO_DEBUG_DRAW
) {
731 ilo_printf("indexed draw %s: "
732 "index start %d, count %d, vertex range [%d, %d]\n",
733 u_prim_name(info
->mode
), info
->start
, info
->count
,
734 info
->min_index
, info
->max_index
);
737 ilo_printf("draw %s: vertex start %d, count %d\n",
738 u_prim_name(info
->mode
), info
->start
, info
->count
);
741 ilo_dump_dirty_flags(ilo
->dirty
);
744 if (!ilo_3d_pass_render_condition(ilo
))
747 if (info
->primitive_restart
&& info
->indexed
) {
749 * Want to draw an indexed primitive using primitive restart
750 * Check that HW can handle the request and fall to SW if not.
752 if (!ilo_check_restart_index(ilo
, info
->restart_index
) ||
753 !ilo_check_restart_prim_type(ilo
, info
->mode
)) {
754 ilo_draw_vbo_with_sw_restart(pipe
, info
);
759 ilo_finalize_3d_states(ilo
, info
);
761 ilo_shader_cache_upload(ilo
->shader_cache
, &hw3d
->cp
->builder
);
763 ilo_blit_resolve_framebuffer(ilo
);
765 /* If draw_vbo ever fails, return immediately. */
766 if (!draw_vbo(hw3d
, ilo
, &prim_generated
, &prim_emitted
))
769 /* clear dirty status */
771 hw3d
->new_batch
= false;
773 /* avoid dangling pointer reference */
776 update_prim_count(hw3d
, prim_generated
, prim_emitted
);
778 if (ilo_debug
& ILO_DEBUG_NOCACHE
)
779 ilo_3d_pipeline_emit_flush(hw3d
->pipeline
);
783 ilo_render_condition(struct pipe_context
*pipe
,
784 struct pipe_query
*query
,
788 struct ilo_context
*ilo
= ilo_context(pipe
);
789 struct ilo_3d
*hw3d
= ilo
->hw3d
;
791 /* reference count? */
792 hw3d
->render_condition
.query
= query
;
793 hw3d
->render_condition
.mode
= mode
;
794 hw3d
->render_condition
.cond
= condition
;
798 ilo_texture_barrier(struct pipe_context
*pipe
)
800 struct ilo_context
*ilo
= ilo_context(pipe
);
801 struct ilo_3d
*hw3d
= ilo
->hw3d
;
803 if (ilo
->cp
->ring
!= INTEL_RING_RENDER
)
806 ilo_3d_pipeline_emit_flush(hw3d
->pipeline
);
809 if (ilo_dev_gen(ilo
->dev
) >= ILO_GEN(7))
810 ilo_cp_submit(hw3d
->cp
, "texture barrier");
814 ilo_get_sample_position(struct pipe_context
*pipe
,
815 unsigned sample_count
,
816 unsigned sample_index
,
819 struct ilo_context
*ilo
= ilo_context(pipe
);
820 struct ilo_3d
*hw3d
= ilo
->hw3d
;
822 ilo_3d_pipeline_get_sample_position(hw3d
->pipeline
,
823 sample_count
, sample_index
,
824 &out_value
[0], &out_value
[1]);
828 * Initialize 3D-related functions.
831 ilo_init_3d_functions(struct ilo_context
*ilo
)
833 ilo
->base
.draw_vbo
= ilo_draw_vbo
;
834 ilo
->base
.render_condition
= ilo_render_condition
;
835 ilo
->base
.texture_barrier
= ilo_texture_barrier
;
836 ilo
->base
.get_sample_position
= ilo_get_sample_position
;