ilo: replace cp hooks by cp owner and flush callback
[mesa.git] / src / gallium / drivers / ilo / ilo_3d.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2012-2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "intel_winsys.h"
29
30 #include "ilo_3d_pipeline.h"
31 #include "ilo_context.h"
32 #include "ilo_cp.h"
33 #include "ilo_query.h"
34 #include "ilo_shader.h"
35 #include "ilo_state.h"
36 #include "ilo_3d.h"
37
38 static void
39 process_query_for_occlusion_counter(struct ilo_3d *hw3d,
40 struct ilo_query *q)
41 {
42 uint64_t *vals, depth_count = 0;
43 int i;
44
45 /* in pairs */
46 assert(q->reg_read % 2 == 0);
47
48 q->bo->map(q->bo, false);
49 vals = q->bo->get_virtual(q->bo);
50 for (i = 1; i < q->reg_read; i += 2)
51 depth_count += vals[i] - vals[i - 1];
52 q->bo->unmap(q->bo);
53
54 /* accumulate so that the query can be resumed if wanted */
55 q->data.u64 += depth_count;
56 q->reg_read = 0;
57 }
58
59 static uint64_t
60 timestamp_to_ns(uint64_t timestamp)
61 {
62 /* see ilo_get_timestamp() */
63 return (timestamp & 0xffffffff) * 80;
64 }
65
66 static void
67 process_query_for_timestamp(struct ilo_3d *hw3d, struct ilo_query *q)
68 {
69 uint64_t *vals, timestamp;
70
71 assert(q->reg_read == 1);
72
73 q->bo->map(q->bo, false);
74 vals = q->bo->get_virtual(q->bo);
75 timestamp = vals[0];
76 q->bo->unmap(q->bo);
77
78 q->data.u64 = timestamp_to_ns(timestamp);
79 q->reg_read = 0;
80 }
81
82 static void
83 process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
84 {
85 uint64_t *vals, elapsed = 0;
86 int i;
87
88 /* in pairs */
89 assert(q->reg_read % 2 == 0);
90
91 q->bo->map(q->bo, false);
92 vals = q->bo->get_virtual(q->bo);
93
94 for (i = 1; i < q->reg_read; i += 2)
95 elapsed += vals[i] - vals[i - 1];
96
97 q->bo->unmap(q->bo);
98
99 /* accumulate so that the query can be resumed if wanted */
100 q->data.u64 += timestamp_to_ns(elapsed);
101 q->reg_read = 0;
102 }
103
104 static void
105 ilo_3d_resume_queries(struct ilo_3d *hw3d)
106 {
107 struct ilo_query *q;
108
109 /* resume occlusion queries */
110 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
111 /* accumulate the result if the bo is alreay full */
112 if (q->reg_read >= q->reg_total)
113 process_query_for_occlusion_counter(hw3d, q);
114
115 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
116 q->bo, q->reg_read++);
117 }
118
119 /* resume timer queries */
120 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
121 /* accumulate the result if the bo is alreay full */
122 if (q->reg_read >= q->reg_total)
123 process_query_for_time_elapsed(hw3d, q);
124
125 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
126 q->bo, q->reg_read++);
127 }
128 }
129
130 static void
131 ilo_3d_pause_queries(struct ilo_3d *hw3d)
132 {
133 struct ilo_query *q;
134
135 /* pause occlusion queries */
136 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
137 assert(q->reg_read < q->reg_total);
138 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
139 q->bo, q->reg_read++);
140 }
141
142 /* pause timer queries */
143 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
144 assert(q->reg_read < q->reg_total);
145 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
146 q->bo, q->reg_read++);
147 }
148 }
149
150 static void
151 ilo_3d_release_render_ring(struct ilo_cp *cp, void *data)
152 {
153 struct ilo_3d *hw3d = data;
154
155 ilo_3d_pause_queries(hw3d);
156 }
157
158 static void
159 ilo_3d_own_render_ring(struct ilo_3d *hw3d)
160 {
161 ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
162
163 if (ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve))
164 ilo_3d_resume_queries(hw3d);
165 }
166
167 /**
168 * Begin a query.
169 */
170 void
171 ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
172 {
173 struct ilo_3d *hw3d = ilo->hw3d;
174
175 ilo_3d_own_render_ring(hw3d);
176
177 switch (q->type) {
178 case PIPE_QUERY_OCCLUSION_COUNTER:
179 /* reserve some space for pausing the query */
180 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
181 ILO_3D_PIPELINE_WRITE_DEPTH_COUNT, NULL);
182 hw3d->owner_reserve += q->reg_cmd_size;
183 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
184
185 q->data.u64 = 0;
186
187 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
188 /* XXX we should check the aperture size */
189 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
190 q->bo, q->reg_read++);
191
192 list_add(&q->list, &hw3d->occlusion_queries);
193 }
194 break;
195 case PIPE_QUERY_TIMESTAMP:
196 /* nop */
197 break;
198 case PIPE_QUERY_TIME_ELAPSED:
199 /* reserve some space for pausing the query */
200 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
201 ILO_3D_PIPELINE_WRITE_TIMESTAMP, NULL);
202 hw3d->owner_reserve += q->reg_cmd_size;
203 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
204
205 q->data.u64 = 0;
206
207 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
208 /* XXX we should check the aperture size */
209 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
210 q->bo, q->reg_read++);
211
212 list_add(&q->list, &hw3d->time_elapsed_queries);
213 }
214 break;
215 case PIPE_QUERY_PRIMITIVES_GENERATED:
216 q->data.u64 = 0;
217 list_add(&q->list, &hw3d->prim_generated_queries);
218 break;
219 case PIPE_QUERY_PRIMITIVES_EMITTED:
220 q->data.u64 = 0;
221 list_add(&q->list, &hw3d->prim_emitted_queries);
222 break;
223 default:
224 assert(!"unknown query type");
225 break;
226 }
227 }
228
229 /**
230 * End a query.
231 */
232 void
233 ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
234 {
235 struct ilo_3d *hw3d = ilo->hw3d;
236
237 ilo_3d_own_render_ring(hw3d);
238
239 switch (q->type) {
240 case PIPE_QUERY_OCCLUSION_COUNTER:
241 list_del(&q->list);
242
243 assert(q->reg_read < q->reg_total);
244 hw3d->owner_reserve -= q->reg_cmd_size;
245 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
246 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
247 q->bo, q->reg_read++);
248 break;
249 case PIPE_QUERY_TIMESTAMP:
250 q->data.u64 = 0;
251
252 if (ilo_query_alloc_bo(q, 1, 1, hw3d->cp->winsys)) {
253 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
254 q->bo, q->reg_read++);
255 }
256 break;
257 case PIPE_QUERY_TIME_ELAPSED:
258 list_del(&q->list);
259
260 assert(q->reg_read < q->reg_total);
261 hw3d->owner_reserve -= q->reg_cmd_size;
262 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
263 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
264 q->bo, q->reg_read++);
265 break;
266 case PIPE_QUERY_PRIMITIVES_GENERATED:
267 case PIPE_QUERY_PRIMITIVES_EMITTED:
268 list_del(&q->list);
269 break;
270 default:
271 assert(!"unknown query type");
272 break;
273 }
274 }
275
276 /**
277 * Process the raw query data.
278 */
279 void
280 ilo_3d_process_query(struct ilo_context *ilo, struct ilo_query *q)
281 {
282 struct ilo_3d *hw3d = ilo->hw3d;
283
284 switch (q->type) {
285 case PIPE_QUERY_OCCLUSION_COUNTER:
286 if (q->bo)
287 process_query_for_occlusion_counter(hw3d, q);
288 break;
289 case PIPE_QUERY_TIMESTAMP:
290 if (q->bo)
291 process_query_for_timestamp(hw3d, q);
292 break;
293 case PIPE_QUERY_TIME_ELAPSED:
294 if (q->bo)
295 process_query_for_time_elapsed(hw3d, q);
296 break;
297 case PIPE_QUERY_PRIMITIVES_GENERATED:
298 case PIPE_QUERY_PRIMITIVES_EMITTED:
299 break;
300 default:
301 assert(!"unknown query type");
302 break;
303 }
304 }
305
306 /**
307 * Hook for CP new-batch.
308 */
309 void
310 ilo_3d_cp_flushed(struct ilo_3d *hw3d)
311 {
312 if (ilo_debug & ILO_DEBUG_3D)
313 ilo_3d_pipeline_dump(hw3d->pipeline);
314
315 /* invalidate the pipeline */
316 ilo_3d_pipeline_invalidate(hw3d->pipeline,
317 ILO_3D_PIPELINE_INVALIDATE_BATCH_BO |
318 ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
319 if (!hw3d->cp->render_ctx) {
320 ilo_3d_pipeline_invalidate(hw3d->pipeline,
321 ILO_3D_PIPELINE_INVALIDATE_HW);
322 }
323
324 hw3d->new_batch = true;
325 }
326
327 /**
328 * Create a 3D context.
329 */
330 struct ilo_3d *
331 ilo_3d_create(struct ilo_cp *cp, const struct ilo_dev_info *dev)
332 {
333 struct ilo_3d *hw3d;
334
335 hw3d = CALLOC_STRUCT(ilo_3d);
336 if (!hw3d)
337 return NULL;
338
339 hw3d->cp = cp;
340 hw3d->owner.release_callback = ilo_3d_release_render_ring;
341 hw3d->owner.release_data = hw3d;
342
343 hw3d->new_batch = true;
344
345 list_inithead(&hw3d->occlusion_queries);
346 list_inithead(&hw3d->time_elapsed_queries);
347 list_inithead(&hw3d->prim_generated_queries);
348 list_inithead(&hw3d->prim_emitted_queries);
349
350 hw3d->pipeline = ilo_3d_pipeline_create(cp, dev);
351 if (!hw3d->pipeline) {
352 FREE(hw3d);
353 return NULL;
354 }
355
356 return hw3d;
357 }
358
359 /**
360 * Destroy a 3D context.
361 */
362 void
363 ilo_3d_destroy(struct ilo_3d *hw3d)
364 {
365 ilo_3d_pipeline_destroy(hw3d->pipeline);
366 FREE(hw3d);
367 }
368
369 static bool
370 draw_vbo(struct ilo_3d *hw3d, const struct ilo_context *ilo,
371 const struct pipe_draw_info *info,
372 int *prim_generated, int *prim_emitted)
373 {
374 bool need_flush;
375 int max_len;
376
377 ilo_3d_own_render_ring(hw3d);
378
379 /*
380 * Without a better tracking mechanism, when the framebuffer changes, we
381 * have to assume that the old framebuffer may be sampled from. If that
382 * happens in the middle of a batch buffer, we need to insert manual
383 * flushes.
384 */
385 need_flush = (!hw3d->new_batch && (ilo->dirty & ILO_DIRTY_FRAMEBUFFER));
386
387 /* make sure there is enough room first */
388 max_len = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
389 ILO_3D_PIPELINE_DRAW, ilo);
390 if (need_flush) {
391 max_len += ilo_3d_pipeline_estimate_size(hw3d->pipeline,
392 ILO_3D_PIPELINE_FLUSH, NULL);
393 }
394
395 if (max_len > ilo_cp_space(hw3d->cp)) {
396 ilo_cp_flush(hw3d->cp);
397 need_flush = false;
398 assert(max_len <= ilo_cp_space(hw3d->cp));
399 }
400
401 if (need_flush)
402 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
403
404 return ilo_3d_pipeline_emit_draw(hw3d->pipeline, ilo, info,
405 prim_generated, prim_emitted);
406 }
407
408 static void
409 update_prim_count(struct ilo_3d *hw3d, int generated, int emitted)
410 {
411 struct ilo_query *q;
412
413 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_generated_queries, list)
414 q->data.u64 += generated;
415
416 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_emitted_queries, list)
417 q->data.u64 += emitted;
418 }
419
420 static bool
421 pass_render_condition(struct ilo_3d *hw3d, struct pipe_context *pipe)
422 {
423 uint64_t result;
424 bool wait;
425
426 if (!hw3d->render_condition.query)
427 return true;
428
429 switch (hw3d->render_condition.mode) {
430 case PIPE_RENDER_COND_WAIT:
431 case PIPE_RENDER_COND_BY_REGION_WAIT:
432 wait = true;
433 break;
434 case PIPE_RENDER_COND_NO_WAIT:
435 case PIPE_RENDER_COND_BY_REGION_NO_WAIT:
436 default:
437 wait = false;
438 break;
439 }
440
441 if (pipe->get_query_result(pipe, hw3d->render_condition.query,
442 wait, (union pipe_query_result *) &result)) {
443 return (result > 0);
444 }
445 else {
446 return true;
447 }
448 }
449
450 #define UPDATE_MIN2(a, b) (a) = MIN2((a), (b))
451 #define UPDATE_MAX2(a, b) (a) = MAX2((a), (b))
452
453 /**
454 * \see find_sub_primitives() from core mesa
455 */
456 static int
457 ilo_find_sub_primitives(const void *elements, unsigned element_size,
458 const struct pipe_draw_info *orig_info,
459 struct pipe_draw_info *info)
460 {
461 const unsigned max_prims = orig_info->count - orig_info->start;
462 unsigned i, cur_start, cur_count;
463 int scan_index;
464 unsigned scan_num;
465
466 cur_start = orig_info->start;
467 cur_count = 0;
468 scan_num = 0;
469
470 #define IB_INDEX_READ(TYPE, INDEX) (((const TYPE *) elements)[INDEX])
471
472 #define SCAN_ELEMENTS(TYPE) \
473 info[scan_num] = *orig_info; \
474 info[scan_num].primitive_restart = false; \
475 for (i = orig_info->start; i < orig_info->count; i++) { \
476 scan_index = IB_INDEX_READ(TYPE, i); \
477 if (scan_index == orig_info->restart_index) { \
478 if (cur_count > 0) { \
479 assert(scan_num < max_prims); \
480 info[scan_num].start = cur_start; \
481 info[scan_num].count = cur_count; \
482 scan_num++; \
483 info[scan_num] = *orig_info; \
484 info[scan_num].primitive_restart = false; \
485 } \
486 cur_start = i + 1; \
487 cur_count = 0; \
488 } \
489 else { \
490 UPDATE_MIN2(info[scan_num].min_index, scan_index); \
491 UPDATE_MAX2(info[scan_num].max_index, scan_index); \
492 cur_count++; \
493 } \
494 } \
495 if (cur_count > 0) { \
496 assert(scan_num < max_prims); \
497 info[scan_num].start = cur_start; \
498 info[scan_num].count = cur_count; \
499 scan_num++; \
500 }
501
502 switch (element_size) {
503 case 1:
504 SCAN_ELEMENTS(uint8_t);
505 break;
506 case 2:
507 SCAN_ELEMENTS(uint16_t);
508 break;
509 case 4:
510 SCAN_ELEMENTS(uint32_t);
511 break;
512 default:
513 assert(0 && "bad index_size in find_sub_primitives()");
514 }
515
516 #undef SCAN_ELEMENTS
517
518 return scan_num;
519 }
520
521 static inline bool
522 ilo_check_restart_index(struct ilo_context *ilo,
523 const struct pipe_draw_info *info)
524 {
525 /*
526 * Haswell (GEN(7.5)) supports an arbitrary cut index, check everything
527 * older.
528 */
529 if (ilo->dev->gen >= ILO_GEN(7.5))
530 return true;
531
532 /* Note: indices must be unsigned byte, unsigned short or unsigned int */
533 switch (ilo->index_buffer.index_size) {
534 case 1:
535 return ((info->restart_index & 0xff) == 0xff);
536 break;
537 case 2:
538 return ((info->restart_index & 0xffff) == 0xffff);
539 break;
540 case 4:
541 return (info->restart_index == 0xffffffff);
542 break;
543 }
544 return false;
545 }
546
547 static inline bool
548 ilo_check_restart_prim_type(struct ilo_context *ilo,
549 const struct pipe_draw_info *info)
550 {
551 switch (info->mode) {
552 case PIPE_PRIM_POINTS:
553 case PIPE_PRIM_LINES:
554 case PIPE_PRIM_LINE_STRIP:
555 case PIPE_PRIM_TRIANGLES:
556 case PIPE_PRIM_TRIANGLE_STRIP:
557 /* All 965 GEN graphics support a cut index for these primitive types */
558 return true;
559 break;
560
561 case PIPE_PRIM_LINE_LOOP:
562 case PIPE_PRIM_POLYGON:
563 case PIPE_PRIM_QUAD_STRIP:
564 case PIPE_PRIM_QUADS:
565 case PIPE_PRIM_TRIANGLE_FAN:
566 if (ilo->dev->gen >= ILO_GEN(7.5)) {
567 /* Haswell and newer parts can handle these prim types. */
568 return true;
569 }
570 break;
571 }
572
573 return false;
574 }
575
576 /*
577 * Handle VBOs using primitive restart.
578 * Verify that restart index and primitive type can be handled by the HW.
579 * Return true if this routine did the rendering
580 * Return false if this routine did NOT render because restart can be handled
581 * in HW.
582 */
583 static void
584 ilo_draw_vbo_with_sw_restart(struct pipe_context *pipe,
585 const struct pipe_draw_info *info)
586 {
587 struct ilo_context *ilo = ilo_context(pipe);
588 struct pipe_draw_info *restart_info = NULL;
589 int sub_prim_count = 1;
590
591 /*
592 * We have to break up the primitive into chunks manually
593 * Worst case, every other index could be a restart index so
594 * need to have space for that many primitives
595 */
596 restart_info = MALLOC(((info->count + 1) / 2) * sizeof(*info));
597 if (NULL == restart_info) {
598 /* If we can't get memory for this, bail out */
599 ilo_err("%s:%d - Out of memory", __FILE__, __LINE__);
600 return;
601 }
602
603 struct pipe_transfer *transfer = NULL;
604 const void *map = NULL;
605 map = pipe_buffer_map(pipe,
606 ilo->index_buffer.buffer,
607 PIPE_TRANSFER_READ,
608 &transfer);
609
610 sub_prim_count = ilo_find_sub_primitives(map + ilo->index_buffer.offset,
611 ilo->index_buffer.index_size,
612 info,
613 restart_info);
614
615 pipe_buffer_unmap(pipe, transfer);
616
617 info = restart_info;
618
619 while (sub_prim_count > 0) {
620 pipe->draw_vbo(pipe, info);
621
622 sub_prim_count--;
623 info++;
624 }
625
626 FREE(restart_info);
627 }
628
629 static void
630 ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
631 {
632 struct ilo_context *ilo = ilo_context(pipe);
633 struct ilo_3d *hw3d = ilo->hw3d;
634 int prim_generated, prim_emitted;
635
636 if (!pass_render_condition(hw3d, pipe))
637 return;
638
639 if (info->primitive_restart && info->indexed) {
640 /*
641 * Want to draw an indexed primitive using primitive restart
642 * Check that HW can handle the request and fall to SW if not.
643 */
644 if (!ilo_check_restart_index(ilo, info) ||
645 !ilo_check_restart_prim_type(ilo, info)) {
646 ilo_draw_vbo_with_sw_restart(pipe, info);
647 return;
648 }
649 }
650
651 /* assume the cache is still in use by the previous batch */
652 if (hw3d->new_batch)
653 ilo_shader_cache_mark_busy(ilo->shader_cache);
654
655 ilo_finalize_states(ilo);
656
657 /* the shaders may be uploaded to a new shader cache */
658 if (hw3d->shader_cache_seqno != ilo->shader_cache->seqno) {
659 ilo_3d_pipeline_invalidate(hw3d->pipeline,
660 ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
661 }
662
663 /*
664 * The VBs and/or IB may have different BOs due to being mapped with
665 * PIPE_TRANSFER_DISCARD_x. We should track that instead of setting the
666 * dirty flags for the performance reason.
667 */
668 ilo->dirty |= ILO_DIRTY_VERTEX_BUFFERS | ILO_DIRTY_INDEX_BUFFER;
669
670 /* If draw_vbo ever fails, return immediately. */
671 if (!draw_vbo(hw3d, ilo, info, &prim_generated, &prim_emitted))
672 return;
673
674 /* clear dirty status */
675 ilo->dirty = 0x0;
676 hw3d->new_batch = false;
677 hw3d->shader_cache_seqno = ilo->shader_cache->seqno;
678
679 update_prim_count(hw3d, prim_generated, prim_emitted);
680
681 if (ilo_debug & ILO_DEBUG_NOCACHE)
682 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
683 }
684
685 static void
686 ilo_render_condition(struct pipe_context *pipe,
687 struct pipe_query *query,
688 uint mode)
689 {
690 struct ilo_context *ilo = ilo_context(pipe);
691 struct ilo_3d *hw3d = ilo->hw3d;
692
693 /* reference count? */
694 hw3d->render_condition.query = query;
695 hw3d->render_condition.mode = mode;
696 }
697
698 static void
699 ilo_texture_barrier(struct pipe_context *pipe)
700 {
701 struct ilo_context *ilo = ilo_context(pipe);
702 struct ilo_3d *hw3d = ilo->hw3d;
703
704 if (ilo->cp->ring != ILO_CP_RING_RENDER)
705 return;
706
707 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
708
709 /* don't know why */
710 if (ilo->dev->gen >= ILO_GEN(7))
711 ilo_cp_flush(hw3d->cp);
712 }
713
714 static void
715 ilo_get_sample_position(struct pipe_context *pipe,
716 unsigned sample_count,
717 unsigned sample_index,
718 float *out_value)
719 {
720 struct ilo_context *ilo = ilo_context(pipe);
721 struct ilo_3d *hw3d = ilo->hw3d;
722
723 ilo_3d_pipeline_get_sample_position(hw3d->pipeline,
724 sample_count, sample_index,
725 &out_value[0], &out_value[1]);
726 }
727
728 /**
729 * Initialize 3D-related functions.
730 */
731 void
732 ilo_init_3d_functions(struct ilo_context *ilo)
733 {
734 ilo->base.draw_vbo = ilo_draw_vbo;
735 ilo->base.render_condition = ilo_render_condition;
736 ilo->base.texture_barrier = ilo_texture_barrier;
737 ilo->base.get_sample_position = ilo_get_sample_position;
738 }