ilo: Initialize need_flush in draw_vbo.
[mesa.git] / src / gallium / drivers / ilo / ilo_3d.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2012-2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "intel_winsys.h"
29
30 #include "ilo_3d_pipeline.h"
31 #include "ilo_context.h"
32 #include "ilo_cp.h"
33 #include "ilo_query.h"
34 #include "ilo_shader.h"
35 #include "ilo_state.h"
36 #include "ilo_3d.h"
37
38 static void
39 process_query_for_occlusion_counter(struct ilo_3d *hw3d,
40 struct ilo_query *q)
41 {
42 uint64_t *vals, depth_count = 0;
43 int i;
44
45 /* in pairs */
46 assert(q->reg_read % 2 == 0);
47
48 q->bo->map(q->bo, false);
49 vals = q->bo->get_virtual(q->bo);
50 for (i = 1; i < q->reg_read; i += 2)
51 depth_count += vals[i] - vals[i - 1];
52 q->bo->unmap(q->bo);
53
54 /* accumulate so that the query can be resumed if wanted */
55 q->data.u64 += depth_count;
56 q->reg_read = 0;
57 }
58
59 static uint64_t
60 timestamp_to_ns(uint64_t timestamp)
61 {
62 /* see ilo_get_timestamp() */
63 return (timestamp & 0xffffffff) * 80;
64 }
65
66 static void
67 process_query_for_timestamp(struct ilo_3d *hw3d, struct ilo_query *q)
68 {
69 uint64_t *vals, timestamp;
70
71 assert(q->reg_read == 1);
72
73 q->bo->map(q->bo, false);
74 vals = q->bo->get_virtual(q->bo);
75 timestamp = vals[0];
76 q->bo->unmap(q->bo);
77
78 q->data.u64 = timestamp_to_ns(timestamp);
79 q->reg_read = 0;
80 }
81
82 static void
83 process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
84 {
85 uint64_t *vals, elapsed = 0;
86 int i;
87
88 /* in pairs */
89 assert(q->reg_read % 2 == 0);
90
91 q->bo->map(q->bo, false);
92 vals = q->bo->get_virtual(q->bo);
93
94 for (i = 1; i < q->reg_read; i += 2)
95 elapsed += vals[i] - vals[i - 1];
96
97 q->bo->unmap(q->bo);
98
99 /* accumulate so that the query can be resumed if wanted */
100 q->data.u64 += timestamp_to_ns(elapsed);
101 q->reg_read = 0;
102 }
103
104 static void
105 ilo_3d_resume_queries(struct ilo_3d *hw3d)
106 {
107 struct ilo_query *q;
108
109 /* resume occlusion queries */
110 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
111 /* accumulate the result if the bo is alreay full */
112 if (q->reg_read >= q->reg_total)
113 process_query_for_occlusion_counter(hw3d, q);
114
115 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
116 q->bo, q->reg_read++);
117 }
118
119 /* resume timer queries */
120 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
121 /* accumulate the result if the bo is alreay full */
122 if (q->reg_read >= q->reg_total)
123 process_query_for_time_elapsed(hw3d, q);
124
125 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
126 q->bo, q->reg_read++);
127 }
128 }
129
130 static void
131 ilo_3d_pause_queries(struct ilo_3d *hw3d)
132 {
133 struct ilo_query *q;
134
135 /* pause occlusion queries */
136 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
137 assert(q->reg_read < q->reg_total);
138 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
139 q->bo, q->reg_read++);
140 }
141
142 /* pause timer queries */
143 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
144 assert(q->reg_read < q->reg_total);
145 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
146 q->bo, q->reg_read++);
147 }
148 }
149
150 static void
151 ilo_3d_release_render_ring(struct ilo_cp *cp, void *data)
152 {
153 struct ilo_3d *hw3d = data;
154
155 ilo_3d_pause_queries(hw3d);
156 }
157
158 static void
159 ilo_3d_own_render_ring(struct ilo_3d *hw3d)
160 {
161 ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
162
163 if (ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve))
164 ilo_3d_resume_queries(hw3d);
165 }
166
167 /**
168 * Begin a query.
169 */
170 void
171 ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
172 {
173 struct ilo_3d *hw3d = ilo->hw3d;
174
175 ilo_3d_own_render_ring(hw3d);
176
177 switch (q->type) {
178 case PIPE_QUERY_OCCLUSION_COUNTER:
179 /* reserve some space for pausing the query */
180 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
181 ILO_3D_PIPELINE_WRITE_DEPTH_COUNT, NULL);
182 hw3d->owner_reserve += q->reg_cmd_size;
183 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
184
185 q->data.u64 = 0;
186
187 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
188 /* XXX we should check the aperture size */
189 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
190 q->bo, q->reg_read++);
191
192 list_add(&q->list, &hw3d->occlusion_queries);
193 }
194 break;
195 case PIPE_QUERY_TIMESTAMP:
196 /* nop */
197 break;
198 case PIPE_QUERY_TIME_ELAPSED:
199 /* reserve some space for pausing the query */
200 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
201 ILO_3D_PIPELINE_WRITE_TIMESTAMP, NULL);
202 hw3d->owner_reserve += q->reg_cmd_size;
203 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
204
205 q->data.u64 = 0;
206
207 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
208 /* XXX we should check the aperture size */
209 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
210 q->bo, q->reg_read++);
211
212 list_add(&q->list, &hw3d->time_elapsed_queries);
213 }
214 break;
215 case PIPE_QUERY_PRIMITIVES_GENERATED:
216 q->data.u64 = 0;
217 list_add(&q->list, &hw3d->prim_generated_queries);
218 break;
219 case PIPE_QUERY_PRIMITIVES_EMITTED:
220 q->data.u64 = 0;
221 list_add(&q->list, &hw3d->prim_emitted_queries);
222 break;
223 default:
224 assert(!"unknown query type");
225 break;
226 }
227 }
228
229 /**
230 * End a query.
231 */
232 void
233 ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
234 {
235 struct ilo_3d *hw3d = ilo->hw3d;
236
237 ilo_3d_own_render_ring(hw3d);
238
239 switch (q->type) {
240 case PIPE_QUERY_OCCLUSION_COUNTER:
241 list_del(&q->list);
242
243 assert(q->reg_read < q->reg_total);
244 hw3d->owner_reserve -= q->reg_cmd_size;
245 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
246 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
247 q->bo, q->reg_read++);
248 break;
249 case PIPE_QUERY_TIMESTAMP:
250 q->data.u64 = 0;
251
252 if (ilo_query_alloc_bo(q, 1, 1, hw3d->cp->winsys)) {
253 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
254 q->bo, q->reg_read++);
255 }
256 break;
257 case PIPE_QUERY_TIME_ELAPSED:
258 list_del(&q->list);
259
260 assert(q->reg_read < q->reg_total);
261 hw3d->owner_reserve -= q->reg_cmd_size;
262 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
263 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
264 q->bo, q->reg_read++);
265 break;
266 case PIPE_QUERY_PRIMITIVES_GENERATED:
267 case PIPE_QUERY_PRIMITIVES_EMITTED:
268 list_del(&q->list);
269 break;
270 default:
271 assert(!"unknown query type");
272 break;
273 }
274 }
275
276 /**
277 * Process the raw query data.
278 */
279 void
280 ilo_3d_process_query(struct ilo_context *ilo, struct ilo_query *q)
281 {
282 struct ilo_3d *hw3d = ilo->hw3d;
283
284 switch (q->type) {
285 case PIPE_QUERY_OCCLUSION_COUNTER:
286 if (q->bo)
287 process_query_for_occlusion_counter(hw3d, q);
288 break;
289 case PIPE_QUERY_TIMESTAMP:
290 if (q->bo)
291 process_query_for_timestamp(hw3d, q);
292 break;
293 case PIPE_QUERY_TIME_ELAPSED:
294 if (q->bo)
295 process_query_for_time_elapsed(hw3d, q);
296 break;
297 case PIPE_QUERY_PRIMITIVES_GENERATED:
298 case PIPE_QUERY_PRIMITIVES_EMITTED:
299 break;
300 default:
301 assert(!"unknown query type");
302 break;
303 }
304 }
305
306 /**
307 * Hook for CP new-batch.
308 */
309 void
310 ilo_3d_cp_flushed(struct ilo_3d *hw3d)
311 {
312 if (ilo_debug & ILO_DEBUG_3D)
313 ilo_3d_pipeline_dump(hw3d->pipeline);
314
315 /* invalidate the pipeline */
316 ilo_3d_pipeline_invalidate(hw3d->pipeline,
317 ILO_3D_PIPELINE_INVALIDATE_BATCH_BO |
318 ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
319 if (!hw3d->cp->render_ctx) {
320 ilo_3d_pipeline_invalidate(hw3d->pipeline,
321 ILO_3D_PIPELINE_INVALIDATE_HW);
322 }
323
324 hw3d->new_batch = true;
325 }
326
327 /**
328 * Create a 3D context.
329 */
330 struct ilo_3d *
331 ilo_3d_create(struct ilo_cp *cp, const struct ilo_dev_info *dev)
332 {
333 struct ilo_3d *hw3d;
334
335 hw3d = CALLOC_STRUCT(ilo_3d);
336 if (!hw3d)
337 return NULL;
338
339 hw3d->cp = cp;
340 hw3d->owner.release_callback = ilo_3d_release_render_ring;
341 hw3d->owner.release_data = hw3d;
342
343 hw3d->new_batch = true;
344
345 list_inithead(&hw3d->occlusion_queries);
346 list_inithead(&hw3d->time_elapsed_queries);
347 list_inithead(&hw3d->prim_generated_queries);
348 list_inithead(&hw3d->prim_emitted_queries);
349
350 hw3d->pipeline = ilo_3d_pipeline_create(cp, dev);
351 if (!hw3d->pipeline) {
352 FREE(hw3d);
353 return NULL;
354 }
355
356 return hw3d;
357 }
358
359 /**
360 * Destroy a 3D context.
361 */
362 void
363 ilo_3d_destroy(struct ilo_3d *hw3d)
364 {
365 ilo_3d_pipeline_destroy(hw3d->pipeline);
366 FREE(hw3d);
367 }
368
369 static bool
370 draw_vbo(struct ilo_3d *hw3d, const struct ilo_context *ilo,
371 const struct pipe_draw_info *info,
372 int *prim_generated, int *prim_emitted)
373 {
374 bool need_flush = false;
375 int max_len;
376
377 ilo_3d_own_render_ring(hw3d);
378
379 if (!hw3d->new_batch) {
380 /*
381 * Without a better tracking mechanism, when the framebuffer changes, we
382 * have to assume that the old framebuffer may be sampled from. If that
383 * happens in the middle of a batch buffer, we need to insert manual
384 * flushes.
385 */
386 need_flush = (ilo->dirty & ILO_DIRTY_FRAMEBUFFER);
387
388 /* same to SO target changes */
389 need_flush |= (ilo->dirty & ILO_DIRTY_STREAM_OUTPUT_TARGETS);
390 }
391
392 /* make sure there is enough room first */
393 max_len = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
394 ILO_3D_PIPELINE_DRAW, ilo);
395 if (need_flush) {
396 max_len += ilo_3d_pipeline_estimate_size(hw3d->pipeline,
397 ILO_3D_PIPELINE_FLUSH, NULL);
398 }
399
400 if (max_len > ilo_cp_space(hw3d->cp)) {
401 ilo_cp_flush(hw3d->cp);
402 need_flush = false;
403 assert(max_len <= ilo_cp_space(hw3d->cp));
404 }
405
406 if (need_flush)
407 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
408
409 return ilo_3d_pipeline_emit_draw(hw3d->pipeline, ilo, info,
410 prim_generated, prim_emitted);
411 }
412
413 static void
414 update_prim_count(struct ilo_3d *hw3d, int generated, int emitted)
415 {
416 struct ilo_query *q;
417
418 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_generated_queries, list)
419 q->data.u64 += generated;
420
421 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_emitted_queries, list)
422 q->data.u64 += emitted;
423 }
424
425 static bool
426 pass_render_condition(struct ilo_3d *hw3d, struct pipe_context *pipe)
427 {
428 uint64_t result;
429 bool wait;
430
431 if (!hw3d->render_condition.query)
432 return true;
433
434 switch (hw3d->render_condition.mode) {
435 case PIPE_RENDER_COND_WAIT:
436 case PIPE_RENDER_COND_BY_REGION_WAIT:
437 wait = true;
438 break;
439 case PIPE_RENDER_COND_NO_WAIT:
440 case PIPE_RENDER_COND_BY_REGION_NO_WAIT:
441 default:
442 wait = false;
443 break;
444 }
445
446 if (pipe->get_query_result(pipe, hw3d->render_condition.query,
447 wait, (union pipe_query_result *) &result)) {
448 return (result > 0);
449 }
450 else {
451 return true;
452 }
453 }
454
455 #define UPDATE_MIN2(a, b) (a) = MIN2((a), (b))
456 #define UPDATE_MAX2(a, b) (a) = MAX2((a), (b))
457
458 /**
459 * \see find_sub_primitives() from core mesa
460 */
461 static int
462 ilo_find_sub_primitives(const void *elements, unsigned element_size,
463 const struct pipe_draw_info *orig_info,
464 struct pipe_draw_info *info)
465 {
466 const unsigned max_prims = orig_info->count - orig_info->start;
467 unsigned i, cur_start, cur_count;
468 int scan_index;
469 unsigned scan_num;
470
471 cur_start = orig_info->start;
472 cur_count = 0;
473 scan_num = 0;
474
475 #define IB_INDEX_READ(TYPE, INDEX) (((const TYPE *) elements)[INDEX])
476
477 #define SCAN_ELEMENTS(TYPE) \
478 info[scan_num] = *orig_info; \
479 info[scan_num].primitive_restart = false; \
480 for (i = orig_info->start; i < orig_info->count; i++) { \
481 scan_index = IB_INDEX_READ(TYPE, i); \
482 if (scan_index == orig_info->restart_index) { \
483 if (cur_count > 0) { \
484 assert(scan_num < max_prims); \
485 info[scan_num].start = cur_start; \
486 info[scan_num].count = cur_count; \
487 scan_num++; \
488 info[scan_num] = *orig_info; \
489 info[scan_num].primitive_restart = false; \
490 } \
491 cur_start = i + 1; \
492 cur_count = 0; \
493 } \
494 else { \
495 UPDATE_MIN2(info[scan_num].min_index, scan_index); \
496 UPDATE_MAX2(info[scan_num].max_index, scan_index); \
497 cur_count++; \
498 } \
499 } \
500 if (cur_count > 0) { \
501 assert(scan_num < max_prims); \
502 info[scan_num].start = cur_start; \
503 info[scan_num].count = cur_count; \
504 scan_num++; \
505 }
506
507 switch (element_size) {
508 case 1:
509 SCAN_ELEMENTS(uint8_t);
510 break;
511 case 2:
512 SCAN_ELEMENTS(uint16_t);
513 break;
514 case 4:
515 SCAN_ELEMENTS(uint32_t);
516 break;
517 default:
518 assert(0 && "bad index_size in find_sub_primitives()");
519 }
520
521 #undef SCAN_ELEMENTS
522
523 return scan_num;
524 }
525
526 static inline bool
527 ilo_check_restart_index(struct ilo_context *ilo,
528 const struct pipe_draw_info *info)
529 {
530 /*
531 * Haswell (GEN(7.5)) supports an arbitrary cut index, check everything
532 * older.
533 */
534 if (ilo->dev->gen >= ILO_GEN(7.5))
535 return true;
536
537 /* Note: indices must be unsigned byte, unsigned short or unsigned int */
538 switch (ilo->index_buffer.index_size) {
539 case 1:
540 return ((info->restart_index & 0xff) == 0xff);
541 break;
542 case 2:
543 return ((info->restart_index & 0xffff) == 0xffff);
544 break;
545 case 4:
546 return (info->restart_index == 0xffffffff);
547 break;
548 }
549 return false;
550 }
551
552 static inline bool
553 ilo_check_restart_prim_type(struct ilo_context *ilo,
554 const struct pipe_draw_info *info)
555 {
556 switch (info->mode) {
557 case PIPE_PRIM_POINTS:
558 case PIPE_PRIM_LINES:
559 case PIPE_PRIM_LINE_STRIP:
560 case PIPE_PRIM_TRIANGLES:
561 case PIPE_PRIM_TRIANGLE_STRIP:
562 /* All 965 GEN graphics support a cut index for these primitive types */
563 return true;
564 break;
565
566 case PIPE_PRIM_LINE_LOOP:
567 case PIPE_PRIM_POLYGON:
568 case PIPE_PRIM_QUAD_STRIP:
569 case PIPE_PRIM_QUADS:
570 case PIPE_PRIM_TRIANGLE_FAN:
571 if (ilo->dev->gen >= ILO_GEN(7.5)) {
572 /* Haswell and newer parts can handle these prim types. */
573 return true;
574 }
575 break;
576 }
577
578 return false;
579 }
580
581 /*
582 * Handle VBOs using primitive restart.
583 * Verify that restart index and primitive type can be handled by the HW.
584 * Return true if this routine did the rendering
585 * Return false if this routine did NOT render because restart can be handled
586 * in HW.
587 */
588 static void
589 ilo_draw_vbo_with_sw_restart(struct pipe_context *pipe,
590 const struct pipe_draw_info *info)
591 {
592 struct ilo_context *ilo = ilo_context(pipe);
593 struct pipe_draw_info *restart_info = NULL;
594 int sub_prim_count = 1;
595
596 /*
597 * We have to break up the primitive into chunks manually
598 * Worst case, every other index could be a restart index so
599 * need to have space for that many primitives
600 */
601 restart_info = MALLOC(((info->count + 1) / 2) * sizeof(*info));
602 if (NULL == restart_info) {
603 /* If we can't get memory for this, bail out */
604 ilo_err("%s:%d - Out of memory", __FILE__, __LINE__);
605 return;
606 }
607
608 struct pipe_transfer *transfer = NULL;
609 const void *map = NULL;
610 map = pipe_buffer_map(pipe,
611 ilo->index_buffer.buffer,
612 PIPE_TRANSFER_READ,
613 &transfer);
614
615 sub_prim_count = ilo_find_sub_primitives(map + ilo->index_buffer.offset,
616 ilo->index_buffer.index_size,
617 info,
618 restart_info);
619
620 pipe_buffer_unmap(pipe, transfer);
621
622 info = restart_info;
623
624 while (sub_prim_count > 0) {
625 pipe->draw_vbo(pipe, info);
626
627 sub_prim_count--;
628 info++;
629 }
630
631 FREE(restart_info);
632 }
633
634 static void
635 ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
636 {
637 struct ilo_context *ilo = ilo_context(pipe);
638 struct ilo_3d *hw3d = ilo->hw3d;
639 int prim_generated, prim_emitted;
640
641 if (!pass_render_condition(hw3d, pipe))
642 return;
643
644 if (info->primitive_restart && info->indexed) {
645 /*
646 * Want to draw an indexed primitive using primitive restart
647 * Check that HW can handle the request and fall to SW if not.
648 */
649 if (!ilo_check_restart_index(ilo, info) ||
650 !ilo_check_restart_prim_type(ilo, info)) {
651 ilo_draw_vbo_with_sw_restart(pipe, info);
652 return;
653 }
654 }
655
656 /* assume the cache is still in use by the previous batch */
657 if (hw3d->new_batch)
658 ilo_shader_cache_mark_busy(ilo->shader_cache);
659
660 ilo_finalize_states(ilo);
661
662 /* the shaders may be uploaded to a new shader cache */
663 if (hw3d->shader_cache_seqno != ilo->shader_cache->seqno) {
664 ilo_3d_pipeline_invalidate(hw3d->pipeline,
665 ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
666 }
667
668 /*
669 * The VBs and/or IB may have different BOs due to being mapped with
670 * PIPE_TRANSFER_DISCARD_x. We should track that instead of setting the
671 * dirty flags for the performance reason.
672 */
673 ilo->dirty |= ILO_DIRTY_VERTEX_BUFFERS | ILO_DIRTY_INDEX_BUFFER;
674
675 /* If draw_vbo ever fails, return immediately. */
676 if (!draw_vbo(hw3d, ilo, info, &prim_generated, &prim_emitted))
677 return;
678
679 /* clear dirty status */
680 ilo->dirty = 0x0;
681 hw3d->new_batch = false;
682 hw3d->shader_cache_seqno = ilo->shader_cache->seqno;
683
684 update_prim_count(hw3d, prim_generated, prim_emitted);
685
686 if (ilo_debug & ILO_DEBUG_NOCACHE)
687 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
688 }
689
690 static void
691 ilo_render_condition(struct pipe_context *pipe,
692 struct pipe_query *query,
693 uint mode)
694 {
695 struct ilo_context *ilo = ilo_context(pipe);
696 struct ilo_3d *hw3d = ilo->hw3d;
697
698 /* reference count? */
699 hw3d->render_condition.query = query;
700 hw3d->render_condition.mode = mode;
701 }
702
703 static void
704 ilo_texture_barrier(struct pipe_context *pipe)
705 {
706 struct ilo_context *ilo = ilo_context(pipe);
707 struct ilo_3d *hw3d = ilo->hw3d;
708
709 if (ilo->cp->ring != ILO_CP_RING_RENDER)
710 return;
711
712 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
713
714 /* don't know why */
715 if (ilo->dev->gen >= ILO_GEN(7))
716 ilo_cp_flush(hw3d->cp);
717 }
718
719 static void
720 ilo_get_sample_position(struct pipe_context *pipe,
721 unsigned sample_count,
722 unsigned sample_index,
723 float *out_value)
724 {
725 struct ilo_context *ilo = ilo_context(pipe);
726 struct ilo_3d *hw3d = ilo->hw3d;
727
728 ilo_3d_pipeline_get_sample_position(hw3d->pipeline,
729 sample_count, sample_index,
730 &out_value[0], &out_value[1]);
731 }
732
733 /**
734 * Initialize 3D-related functions.
735 */
736 void
737 ilo_init_3d_functions(struct ilo_context *ilo)
738 {
739 ilo->base.draw_vbo = ilo_draw_vbo;
740 ilo->base.render_condition = ilo_render_condition;
741 ilo->base.texture_barrier = ilo_texture_barrier;
742 ilo->base.get_sample_position = ilo_get_sample_position;
743 }