ilo: resolve Z/HiZ correctly
[mesa.git] / src / gallium / drivers / ilo / ilo_3d.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2012-2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "util/u_prim.h"
29 #include "intel_winsys.h"
30
31 #include "ilo_3d_pipeline.h"
32 #include "ilo_blit.h"
33 #include "ilo_context.h"
34 #include "ilo_cp.h"
35 #include "ilo_query.h"
36 #include "ilo_shader.h"
37 #include "ilo_state.h"
38 #include "ilo_3d.h"
39
40 static void
41 process_query_for_occlusion_counter(struct ilo_3d *hw3d,
42 struct ilo_query *q)
43 {
44 uint64_t *vals, depth_count = 0;
45 int i;
46
47 /* in pairs */
48 assert(q->reg_read % 2 == 0);
49
50 intel_bo_map(q->bo, false);
51 vals = intel_bo_get_virtual(q->bo);
52 for (i = 1; i < q->reg_read; i += 2)
53 depth_count += vals[i] - vals[i - 1];
54 intel_bo_unmap(q->bo);
55
56 /* accumulate so that the query can be resumed if wanted */
57 q->data.u64 += depth_count;
58 q->reg_read = 0;
59 }
60
61 static uint64_t
62 timestamp_to_ns(uint64_t timestamp)
63 {
64 /* see ilo_get_timestamp() */
65 return (timestamp & 0xffffffff) * 80;
66 }
67
68 static void
69 process_query_for_timestamp(struct ilo_3d *hw3d, struct ilo_query *q)
70 {
71 uint64_t *vals, timestamp;
72
73 assert(q->reg_read == 1);
74
75 intel_bo_map(q->bo, false);
76 vals = intel_bo_get_virtual(q->bo);
77 timestamp = vals[0];
78 intel_bo_unmap(q->bo);
79
80 q->data.u64 = timestamp_to_ns(timestamp);
81 q->reg_read = 0;
82 }
83
84 static void
85 process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
86 {
87 uint64_t *vals, elapsed = 0;
88 int i;
89
90 /* in pairs */
91 assert(q->reg_read % 2 == 0);
92
93 intel_bo_map(q->bo, false);
94 vals = intel_bo_get_virtual(q->bo);
95
96 for (i = 1; i < q->reg_read; i += 2)
97 elapsed += vals[i] - vals[i - 1];
98
99 intel_bo_unmap(q->bo);
100
101 /* accumulate so that the query can be resumed if wanted */
102 q->data.u64 += timestamp_to_ns(elapsed);
103 q->reg_read = 0;
104 }
105
106 static void
107 ilo_3d_resume_queries(struct ilo_3d *hw3d)
108 {
109 struct ilo_query *q;
110
111 /* resume occlusion queries */
112 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
113 /* accumulate the result if the bo is alreay full */
114 if (q->reg_read >= q->reg_total)
115 process_query_for_occlusion_counter(hw3d, q);
116
117 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
118 q->bo, q->reg_read++);
119 }
120
121 /* resume timer queries */
122 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
123 /* accumulate the result if the bo is alreay full */
124 if (q->reg_read >= q->reg_total)
125 process_query_for_time_elapsed(hw3d, q);
126
127 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
128 q->bo, q->reg_read++);
129 }
130 }
131
132 static void
133 ilo_3d_pause_queries(struct ilo_3d *hw3d)
134 {
135 struct ilo_query *q;
136
137 /* pause occlusion queries */
138 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
139 assert(q->reg_read < q->reg_total);
140 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
141 q->bo, q->reg_read++);
142 }
143
144 /* pause timer queries */
145 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
146 assert(q->reg_read < q->reg_total);
147 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
148 q->bo, q->reg_read++);
149 }
150 }
151
152 static void
153 ilo_3d_release_render_ring(struct ilo_cp *cp, void *data)
154 {
155 struct ilo_3d *hw3d = data;
156
157 ilo_3d_pause_queries(hw3d);
158 }
159
160 void
161 ilo_3d_own_render_ring(struct ilo_3d *hw3d)
162 {
163 ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
164
165 if (ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve))
166 ilo_3d_resume_queries(hw3d);
167 }
168
169 /**
170 * Begin a query.
171 */
172 void
173 ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
174 {
175 struct ilo_3d *hw3d = ilo->hw3d;
176
177 ilo_3d_own_render_ring(hw3d);
178
179 switch (q->type) {
180 case PIPE_QUERY_OCCLUSION_COUNTER:
181 /* reserve some space for pausing the query */
182 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
183 ILO_3D_PIPELINE_WRITE_DEPTH_COUNT, NULL);
184 hw3d->owner_reserve += q->reg_cmd_size;
185 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
186
187 q->data.u64 = 0;
188
189 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
190 /* XXX we should check the aperture size */
191 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
192 q->bo, q->reg_read++);
193
194 list_add(&q->list, &hw3d->occlusion_queries);
195 }
196 break;
197 case PIPE_QUERY_TIMESTAMP:
198 /* nop */
199 break;
200 case PIPE_QUERY_TIME_ELAPSED:
201 /* reserve some space for pausing the query */
202 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
203 ILO_3D_PIPELINE_WRITE_TIMESTAMP, NULL);
204 hw3d->owner_reserve += q->reg_cmd_size;
205 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
206
207 q->data.u64 = 0;
208
209 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
210 /* XXX we should check the aperture size */
211 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
212 q->bo, q->reg_read++);
213
214 list_add(&q->list, &hw3d->time_elapsed_queries);
215 }
216 break;
217 case PIPE_QUERY_PRIMITIVES_GENERATED:
218 q->data.u64 = 0;
219 list_add(&q->list, &hw3d->prim_generated_queries);
220 break;
221 case PIPE_QUERY_PRIMITIVES_EMITTED:
222 q->data.u64 = 0;
223 list_add(&q->list, &hw3d->prim_emitted_queries);
224 break;
225 default:
226 assert(!"unknown query type");
227 break;
228 }
229 }
230
231 /**
232 * End a query.
233 */
234 void
235 ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
236 {
237 struct ilo_3d *hw3d = ilo->hw3d;
238
239 ilo_3d_own_render_ring(hw3d);
240
241 switch (q->type) {
242 case PIPE_QUERY_OCCLUSION_COUNTER:
243 list_del(&q->list);
244
245 assert(q->reg_read < q->reg_total);
246 hw3d->owner_reserve -= q->reg_cmd_size;
247 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
248 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
249 q->bo, q->reg_read++);
250 break;
251 case PIPE_QUERY_TIMESTAMP:
252 q->data.u64 = 0;
253
254 if (ilo_query_alloc_bo(q, 1, 1, hw3d->cp->winsys)) {
255 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
256 q->bo, q->reg_read++);
257 }
258 break;
259 case PIPE_QUERY_TIME_ELAPSED:
260 list_del(&q->list);
261
262 assert(q->reg_read < q->reg_total);
263 hw3d->owner_reserve -= q->reg_cmd_size;
264 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
265 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
266 q->bo, q->reg_read++);
267 break;
268 case PIPE_QUERY_PRIMITIVES_GENERATED:
269 case PIPE_QUERY_PRIMITIVES_EMITTED:
270 list_del(&q->list);
271 break;
272 default:
273 assert(!"unknown query type");
274 break;
275 }
276 }
277
278 /**
279 * Process the raw query data.
280 */
281 void
282 ilo_3d_process_query(struct ilo_context *ilo, struct ilo_query *q)
283 {
284 struct ilo_3d *hw3d = ilo->hw3d;
285
286 switch (q->type) {
287 case PIPE_QUERY_OCCLUSION_COUNTER:
288 if (q->bo)
289 process_query_for_occlusion_counter(hw3d, q);
290 break;
291 case PIPE_QUERY_TIMESTAMP:
292 if (q->bo)
293 process_query_for_timestamp(hw3d, q);
294 break;
295 case PIPE_QUERY_TIME_ELAPSED:
296 if (q->bo)
297 process_query_for_time_elapsed(hw3d, q);
298 break;
299 case PIPE_QUERY_PRIMITIVES_GENERATED:
300 case PIPE_QUERY_PRIMITIVES_EMITTED:
301 break;
302 default:
303 assert(!"unknown query type");
304 break;
305 }
306 }
307
308 /**
309 * Hook for CP new-batch.
310 */
311 void
312 ilo_3d_cp_flushed(struct ilo_3d *hw3d)
313 {
314 if (ilo_debug & ILO_DEBUG_3D)
315 ilo_3d_pipeline_dump(hw3d->pipeline);
316
317 /* invalidate the pipeline */
318 ilo_3d_pipeline_invalidate(hw3d->pipeline,
319 ILO_3D_PIPELINE_INVALIDATE_BATCH_BO |
320 ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
321 if (!hw3d->cp->render_ctx) {
322 ilo_3d_pipeline_invalidate(hw3d->pipeline,
323 ILO_3D_PIPELINE_INVALIDATE_HW);
324 }
325
326 hw3d->new_batch = true;
327 }
328
329 /**
330 * Create a 3D context.
331 */
332 struct ilo_3d *
333 ilo_3d_create(struct ilo_cp *cp, const struct ilo_dev_info *dev)
334 {
335 struct ilo_3d *hw3d;
336
337 hw3d = CALLOC_STRUCT(ilo_3d);
338 if (!hw3d)
339 return NULL;
340
341 hw3d->cp = cp;
342 hw3d->owner.release_callback = ilo_3d_release_render_ring;
343 hw3d->owner.release_data = hw3d;
344
345 hw3d->new_batch = true;
346
347 list_inithead(&hw3d->occlusion_queries);
348 list_inithead(&hw3d->time_elapsed_queries);
349 list_inithead(&hw3d->prim_generated_queries);
350 list_inithead(&hw3d->prim_emitted_queries);
351
352 hw3d->pipeline = ilo_3d_pipeline_create(cp, dev);
353 if (!hw3d->pipeline) {
354 FREE(hw3d);
355 return NULL;
356 }
357
358 return hw3d;
359 }
360
361 /**
362 * Destroy a 3D context.
363 */
364 void
365 ilo_3d_destroy(struct ilo_3d *hw3d)
366 {
367 ilo_3d_pipeline_destroy(hw3d->pipeline);
368
369 if (hw3d->kernel.bo)
370 intel_bo_unreference(hw3d->kernel.bo);
371
372 FREE(hw3d);
373 }
374
375 static bool
376 draw_vbo(struct ilo_3d *hw3d, const struct ilo_context *ilo,
377 int *prim_generated, int *prim_emitted)
378 {
379 bool need_flush = false;
380 int max_len;
381
382 ilo_3d_own_render_ring(hw3d);
383
384 if (!hw3d->new_batch) {
385 /*
386 * Without a better tracking mechanism, when the framebuffer changes, we
387 * have to assume that the old framebuffer may be sampled from. If that
388 * happens in the middle of a batch buffer, we need to insert manual
389 * flushes.
390 */
391 need_flush = (ilo->dirty & ILO_DIRTY_FB);
392
393 /* same to SO target changes */
394 need_flush |= (ilo->dirty & ILO_DIRTY_SO);
395 }
396
397 /* make sure there is enough room first */
398 max_len = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
399 ILO_3D_PIPELINE_DRAW, ilo);
400 if (need_flush) {
401 max_len += ilo_3d_pipeline_estimate_size(hw3d->pipeline,
402 ILO_3D_PIPELINE_FLUSH, NULL);
403 }
404
405 if (max_len > ilo_cp_space(hw3d->cp)) {
406 ilo_cp_flush(hw3d->cp, "out of space");
407 need_flush = false;
408 assert(max_len <= ilo_cp_space(hw3d->cp));
409 }
410
411 if (need_flush)
412 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
413
414 return ilo_3d_pipeline_emit_draw(hw3d->pipeline, ilo,
415 prim_generated, prim_emitted);
416 }
417
418 static void
419 update_prim_count(struct ilo_3d *hw3d, int generated, int emitted)
420 {
421 struct ilo_query *q;
422
423 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_generated_queries, list)
424 q->data.u64 += generated;
425
426 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_emitted_queries, list)
427 q->data.u64 += emitted;
428 }
429
430 bool
431 ilo_3d_pass_render_condition(struct ilo_context *ilo)
432 {
433 struct ilo_3d *hw3d = ilo->hw3d;
434 uint64_t result;
435 bool wait;
436
437 if (!hw3d->render_condition.query)
438 return true;
439
440 switch (hw3d->render_condition.mode) {
441 case PIPE_RENDER_COND_WAIT:
442 case PIPE_RENDER_COND_BY_REGION_WAIT:
443 wait = true;
444 break;
445 case PIPE_RENDER_COND_NO_WAIT:
446 case PIPE_RENDER_COND_BY_REGION_NO_WAIT:
447 default:
448 wait = false;
449 break;
450 }
451
452 if (ilo->base.get_query_result(&ilo->base, hw3d->render_condition.query,
453 wait, (union pipe_query_result *) &result))
454 return (!result == hw3d->render_condition.cond);
455 else
456 return true;
457 }
458
459 #define UPDATE_MIN2(a, b) (a) = MIN2((a), (b))
460 #define UPDATE_MAX2(a, b) (a) = MAX2((a), (b))
461
462 /**
463 * \see find_sub_primitives() from core mesa
464 */
465 static int
466 ilo_find_sub_primitives(const void *elements, unsigned element_size,
467 const struct pipe_draw_info *orig_info,
468 struct pipe_draw_info *info)
469 {
470 const unsigned max_prims = orig_info->count - orig_info->start;
471 unsigned i, cur_start, cur_count;
472 int scan_index;
473 unsigned scan_num;
474
475 cur_start = orig_info->start;
476 cur_count = 0;
477 scan_num = 0;
478
479 #define IB_INDEX_READ(TYPE, INDEX) (((const TYPE *) elements)[INDEX])
480
481 #define SCAN_ELEMENTS(TYPE) \
482 info[scan_num] = *orig_info; \
483 info[scan_num].primitive_restart = false; \
484 for (i = orig_info->start; i < orig_info->count; i++) { \
485 scan_index = IB_INDEX_READ(TYPE, i); \
486 if (scan_index == orig_info->restart_index) { \
487 if (cur_count > 0) { \
488 assert(scan_num < max_prims); \
489 info[scan_num].start = cur_start; \
490 info[scan_num].count = cur_count; \
491 scan_num++; \
492 info[scan_num] = *orig_info; \
493 info[scan_num].primitive_restart = false; \
494 } \
495 cur_start = i + 1; \
496 cur_count = 0; \
497 } \
498 else { \
499 UPDATE_MIN2(info[scan_num].min_index, scan_index); \
500 UPDATE_MAX2(info[scan_num].max_index, scan_index); \
501 cur_count++; \
502 } \
503 } \
504 if (cur_count > 0) { \
505 assert(scan_num < max_prims); \
506 info[scan_num].start = cur_start; \
507 info[scan_num].count = cur_count; \
508 scan_num++; \
509 }
510
511 switch (element_size) {
512 case 1:
513 SCAN_ELEMENTS(uint8_t);
514 break;
515 case 2:
516 SCAN_ELEMENTS(uint16_t);
517 break;
518 case 4:
519 SCAN_ELEMENTS(uint32_t);
520 break;
521 default:
522 assert(0 && "bad index_size in find_sub_primitives()");
523 }
524
525 #undef SCAN_ELEMENTS
526
527 return scan_num;
528 }
529
530 static inline bool
531 ilo_check_restart_index(const struct ilo_context *ilo, unsigned restart_index)
532 {
533 /*
534 * Haswell (GEN(7.5)) supports an arbitrary cut index, check everything
535 * older.
536 */
537 if (ilo->dev->gen >= ILO_GEN(7.5))
538 return true;
539
540 /* Note: indices must be unsigned byte, unsigned short or unsigned int */
541 switch (ilo->ib.index_size) {
542 case 1:
543 return ((restart_index & 0xff) == 0xff);
544 break;
545 case 2:
546 return ((restart_index & 0xffff) == 0xffff);
547 break;
548 case 4:
549 return (restart_index == 0xffffffff);
550 break;
551 }
552 return false;
553 }
554
555 static inline bool
556 ilo_check_restart_prim_type(const struct ilo_context *ilo, unsigned prim)
557 {
558 switch (prim) {
559 case PIPE_PRIM_POINTS:
560 case PIPE_PRIM_LINES:
561 case PIPE_PRIM_LINE_STRIP:
562 case PIPE_PRIM_TRIANGLES:
563 case PIPE_PRIM_TRIANGLE_STRIP:
564 /* All 965 GEN graphics support a cut index for these primitive types */
565 return true;
566 break;
567
568 case PIPE_PRIM_LINE_LOOP:
569 case PIPE_PRIM_POLYGON:
570 case PIPE_PRIM_QUAD_STRIP:
571 case PIPE_PRIM_QUADS:
572 case PIPE_PRIM_TRIANGLE_FAN:
573 if (ilo->dev->gen >= ILO_GEN(7.5)) {
574 /* Haswell and newer parts can handle these prim types. */
575 return true;
576 }
577 break;
578 }
579
580 return false;
581 }
582
583 /*
584 * Handle VBOs using primitive restart.
585 * Verify that restart index and primitive type can be handled by the HW.
586 * Return true if this routine did the rendering
587 * Return false if this routine did NOT render because restart can be handled
588 * in HW.
589 */
590 static void
591 ilo_draw_vbo_with_sw_restart(struct pipe_context *pipe,
592 const struct pipe_draw_info *info)
593 {
594 struct ilo_context *ilo = ilo_context(pipe);
595 struct pipe_draw_info *restart_info = NULL;
596 int sub_prim_count = 1;
597
598 /*
599 * We have to break up the primitive into chunks manually
600 * Worst case, every other index could be a restart index so
601 * need to have space for that many primitives
602 */
603 restart_info = MALLOC(((info->count + 1) / 2) * sizeof(*info));
604 if (NULL == restart_info) {
605 /* If we can't get memory for this, bail out */
606 ilo_err("%s:%d - Out of memory", __FILE__, __LINE__);
607 return;
608 }
609
610 if (ilo->ib.buffer) {
611 struct pipe_transfer *transfer;
612 const void *map;
613
614 map = pipe_buffer_map(pipe, ilo->ib.buffer,
615 PIPE_TRANSFER_READ, &transfer);
616
617 sub_prim_count = ilo_find_sub_primitives(map + ilo->ib.offset,
618 ilo->ib.index_size, info, restart_info);
619
620 pipe_buffer_unmap(pipe, transfer);
621 }
622 else {
623 sub_prim_count = ilo_find_sub_primitives(ilo->ib.user_buffer,
624 ilo->ib.index_size, info, restart_info);
625 }
626
627 info = restart_info;
628
629 while (sub_prim_count > 0) {
630 pipe->draw_vbo(pipe, info);
631
632 sub_prim_count--;
633 info++;
634 }
635
636 FREE(restart_info);
637 }
638
639 static bool
640 upload_shaders(struct ilo_3d *hw3d, struct ilo_shader_cache *shc)
641 {
642 bool incremental = true;
643 int upload;
644
645 upload = ilo_shader_cache_upload(shc,
646 NULL, hw3d->kernel.used, incremental);
647 if (!upload)
648 return true;
649
650 /*
651 * Allocate a new bo. When this is a new batch, assume the bo is still in
652 * use by the previous batch and force allocation.
653 *
654 * Does it help to make shader cache upload with unsynchronized mapping,
655 * and remove the check for new batch here?
656 */
657 if (hw3d->kernel.used + upload > hw3d->kernel.size || hw3d->new_batch) {
658 unsigned new_size = (hw3d->kernel.size) ?
659 hw3d->kernel.size : (8 * 1024);
660
661 while (hw3d->kernel.used + upload > new_size)
662 new_size *= 2;
663
664 if (hw3d->kernel.bo)
665 intel_bo_unreference(hw3d->kernel.bo);
666
667 hw3d->kernel.bo = intel_winsys_alloc_buffer(hw3d->cp->winsys,
668 "kernel bo", new_size, 0);
669 if (!hw3d->kernel.bo) {
670 ilo_err("failed to allocate kernel bo\n");
671 return false;
672 }
673
674 hw3d->kernel.used = 0;
675 hw3d->kernel.size = new_size;
676 incremental = false;
677
678 assert(new_size >= ilo_shader_cache_upload(shc,
679 NULL, hw3d->kernel.used, incremental));
680
681 ilo_3d_pipeline_invalidate(hw3d->pipeline,
682 ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
683 }
684
685 upload = ilo_shader_cache_upload(shc,
686 hw3d->kernel.bo, hw3d->kernel.used, incremental);
687 if (upload < 0) {
688 ilo_err("failed to upload shaders\n");
689 return false;
690 }
691
692 hw3d->kernel.used += upload;
693
694 assert(hw3d->kernel.used <= hw3d->kernel.size);
695
696 return true;
697 }
698
699 static void
700 ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
701 {
702 struct ilo_context *ilo = ilo_context(pipe);
703 struct ilo_3d *hw3d = ilo->hw3d;
704 int prim_generated, prim_emitted;
705
706 if (ilo_debug & ILO_DEBUG_DRAW) {
707 if (info->indexed) {
708 ilo_printf("indexed draw %s: "
709 "index start %d, count %d, vertex range [%d, %d]\n",
710 u_prim_name(info->mode), info->start, info->count,
711 info->min_index, info->max_index);
712 }
713 else {
714 ilo_printf("draw %s: vertex start %d, count %d\n",
715 u_prim_name(info->mode), info->start, info->count);
716 }
717
718 ilo_dump_dirty_flags(ilo->dirty);
719 }
720
721 if (!ilo_3d_pass_render_condition(ilo))
722 return;
723
724 if (info->primitive_restart && info->indexed) {
725 /*
726 * Want to draw an indexed primitive using primitive restart
727 * Check that HW can handle the request and fall to SW if not.
728 */
729 if (!ilo_check_restart_index(ilo, info->restart_index) ||
730 !ilo_check_restart_prim_type(ilo, info->mode)) {
731 ilo_draw_vbo_with_sw_restart(pipe, info);
732 return;
733 }
734 }
735
736 ilo_finalize_3d_states(ilo, info);
737
738 if (!upload_shaders(hw3d, ilo->shader_cache))
739 return;
740
741 ilo_blit_resolve_framebuffer(ilo);
742
743 /* If draw_vbo ever fails, return immediately. */
744 if (!draw_vbo(hw3d, ilo, &prim_generated, &prim_emitted))
745 return;
746
747 /* clear dirty status */
748 ilo->dirty = 0x0;
749 hw3d->new_batch = false;
750
751 /* avoid dangling pointer reference */
752 ilo->draw = NULL;
753
754 update_prim_count(hw3d, prim_generated, prim_emitted);
755
756 if (ilo_debug & ILO_DEBUG_NOCACHE)
757 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
758 }
759
760 static void
761 ilo_render_condition(struct pipe_context *pipe,
762 struct pipe_query *query,
763 boolean condition,
764 uint mode)
765 {
766 struct ilo_context *ilo = ilo_context(pipe);
767 struct ilo_3d *hw3d = ilo->hw3d;
768
769 /* reference count? */
770 hw3d->render_condition.query = query;
771 hw3d->render_condition.mode = mode;
772 hw3d->render_condition.cond = condition;
773 }
774
775 static void
776 ilo_texture_barrier(struct pipe_context *pipe)
777 {
778 struct ilo_context *ilo = ilo_context(pipe);
779 struct ilo_3d *hw3d = ilo->hw3d;
780
781 if (ilo->cp->ring != ILO_CP_RING_RENDER)
782 return;
783
784 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
785
786 /* don't know why */
787 if (ilo->dev->gen >= ILO_GEN(7))
788 ilo_cp_flush(hw3d->cp, "texture barrier");
789 }
790
791 static void
792 ilo_get_sample_position(struct pipe_context *pipe,
793 unsigned sample_count,
794 unsigned sample_index,
795 float *out_value)
796 {
797 struct ilo_context *ilo = ilo_context(pipe);
798 struct ilo_3d *hw3d = ilo->hw3d;
799
800 ilo_3d_pipeline_get_sample_position(hw3d->pipeline,
801 sample_count, sample_index,
802 &out_value[0], &out_value[1]);
803 }
804
805 /**
806 * Initialize 3D-related functions.
807 */
808 void
809 ilo_init_3d_functions(struct ilo_context *ilo)
810 {
811 ilo->base.draw_vbo = ilo_draw_vbo;
812 ilo->base.render_condition = ilo_render_condition;
813 ilo->base.texture_barrier = ilo_texture_barrier;
814 ilo->base.get_sample_position = ilo_get_sample_position;
815 }