3a810adf6f6558f2bc52c859ca1eeedf7ca6373f
[mesa.git] / src / gallium / drivers / ilo / ilo_3d.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2012-2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "util/u_prim.h"
29 #include "intel_winsys.h"
30
31 #include "ilo_3d_pipeline.h"
32 #include "ilo_context.h"
33 #include "ilo_cp.h"
34 #include "ilo_query.h"
35 #include "ilo_shader.h"
36 #include "ilo_state.h"
37 #include "ilo_3d.h"
38
39 static void
40 process_query_for_occlusion_counter(struct ilo_3d *hw3d,
41 struct ilo_query *q)
42 {
43 uint64_t *vals, depth_count = 0;
44 int i;
45
46 /* in pairs */
47 assert(q->reg_read % 2 == 0);
48
49 intel_bo_map(q->bo, false);
50 vals = intel_bo_get_virtual(q->bo);
51 for (i = 1; i < q->reg_read; i += 2)
52 depth_count += vals[i] - vals[i - 1];
53 intel_bo_unmap(q->bo);
54
55 /* accumulate so that the query can be resumed if wanted */
56 q->data.u64 += depth_count;
57 q->reg_read = 0;
58 }
59
60 static uint64_t
61 timestamp_to_ns(uint64_t timestamp)
62 {
63 /* see ilo_get_timestamp() */
64 return (timestamp & 0xffffffff) * 80;
65 }
66
67 static void
68 process_query_for_timestamp(struct ilo_3d *hw3d, struct ilo_query *q)
69 {
70 uint64_t *vals, timestamp;
71
72 assert(q->reg_read == 1);
73
74 intel_bo_map(q->bo, false);
75 vals = intel_bo_get_virtual(q->bo);
76 timestamp = vals[0];
77 intel_bo_unmap(q->bo);
78
79 q->data.u64 = timestamp_to_ns(timestamp);
80 q->reg_read = 0;
81 }
82
83 static void
84 process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
85 {
86 uint64_t *vals, elapsed = 0;
87 int i;
88
89 /* in pairs */
90 assert(q->reg_read % 2 == 0);
91
92 intel_bo_map(q->bo, false);
93 vals = intel_bo_get_virtual(q->bo);
94
95 for (i = 1; i < q->reg_read; i += 2)
96 elapsed += vals[i] - vals[i - 1];
97
98 intel_bo_unmap(q->bo);
99
100 /* accumulate so that the query can be resumed if wanted */
101 q->data.u64 += timestamp_to_ns(elapsed);
102 q->reg_read = 0;
103 }
104
105 static void
106 ilo_3d_resume_queries(struct ilo_3d *hw3d)
107 {
108 struct ilo_query *q;
109
110 /* resume occlusion queries */
111 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
112 /* accumulate the result if the bo is alreay full */
113 if (q->reg_read >= q->reg_total)
114 process_query_for_occlusion_counter(hw3d, q);
115
116 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
117 q->bo, q->reg_read++);
118 }
119
120 /* resume timer queries */
121 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
122 /* accumulate the result if the bo is alreay full */
123 if (q->reg_read >= q->reg_total)
124 process_query_for_time_elapsed(hw3d, q);
125
126 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
127 q->bo, q->reg_read++);
128 }
129 }
130
131 static void
132 ilo_3d_pause_queries(struct ilo_3d *hw3d)
133 {
134 struct ilo_query *q;
135
136 /* pause occlusion queries */
137 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
138 assert(q->reg_read < q->reg_total);
139 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
140 q->bo, q->reg_read++);
141 }
142
143 /* pause timer queries */
144 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
145 assert(q->reg_read < q->reg_total);
146 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
147 q->bo, q->reg_read++);
148 }
149 }
150
151 static void
152 ilo_3d_release_render_ring(struct ilo_cp *cp, void *data)
153 {
154 struct ilo_3d *hw3d = data;
155
156 ilo_3d_pause_queries(hw3d);
157 }
158
159 static void
160 ilo_3d_own_render_ring(struct ilo_3d *hw3d)
161 {
162 ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
163
164 if (ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve))
165 ilo_3d_resume_queries(hw3d);
166 }
167
168 /**
169 * Begin a query.
170 */
171 void
172 ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
173 {
174 struct ilo_3d *hw3d = ilo->hw3d;
175
176 ilo_3d_own_render_ring(hw3d);
177
178 switch (q->type) {
179 case PIPE_QUERY_OCCLUSION_COUNTER:
180 /* reserve some space for pausing the query */
181 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
182 ILO_3D_PIPELINE_WRITE_DEPTH_COUNT, NULL);
183 hw3d->owner_reserve += q->reg_cmd_size;
184 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
185
186 q->data.u64 = 0;
187
188 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
189 /* XXX we should check the aperture size */
190 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
191 q->bo, q->reg_read++);
192
193 list_add(&q->list, &hw3d->occlusion_queries);
194 }
195 break;
196 case PIPE_QUERY_TIMESTAMP:
197 /* nop */
198 break;
199 case PIPE_QUERY_TIME_ELAPSED:
200 /* reserve some space for pausing the query */
201 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
202 ILO_3D_PIPELINE_WRITE_TIMESTAMP, NULL);
203 hw3d->owner_reserve += q->reg_cmd_size;
204 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
205
206 q->data.u64 = 0;
207
208 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
209 /* XXX we should check the aperture size */
210 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
211 q->bo, q->reg_read++);
212
213 list_add(&q->list, &hw3d->time_elapsed_queries);
214 }
215 break;
216 case PIPE_QUERY_PRIMITIVES_GENERATED:
217 q->data.u64 = 0;
218 list_add(&q->list, &hw3d->prim_generated_queries);
219 break;
220 case PIPE_QUERY_PRIMITIVES_EMITTED:
221 q->data.u64 = 0;
222 list_add(&q->list, &hw3d->prim_emitted_queries);
223 break;
224 default:
225 assert(!"unknown query type");
226 break;
227 }
228 }
229
230 /**
231 * End a query.
232 */
233 void
234 ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
235 {
236 struct ilo_3d *hw3d = ilo->hw3d;
237
238 ilo_3d_own_render_ring(hw3d);
239
240 switch (q->type) {
241 case PIPE_QUERY_OCCLUSION_COUNTER:
242 list_del(&q->list);
243
244 assert(q->reg_read < q->reg_total);
245 hw3d->owner_reserve -= q->reg_cmd_size;
246 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
247 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
248 q->bo, q->reg_read++);
249 break;
250 case PIPE_QUERY_TIMESTAMP:
251 q->data.u64 = 0;
252
253 if (ilo_query_alloc_bo(q, 1, 1, hw3d->cp->winsys)) {
254 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
255 q->bo, q->reg_read++);
256 }
257 break;
258 case PIPE_QUERY_TIME_ELAPSED:
259 list_del(&q->list);
260
261 assert(q->reg_read < q->reg_total);
262 hw3d->owner_reserve -= q->reg_cmd_size;
263 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
264 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
265 q->bo, q->reg_read++);
266 break;
267 case PIPE_QUERY_PRIMITIVES_GENERATED:
268 case PIPE_QUERY_PRIMITIVES_EMITTED:
269 list_del(&q->list);
270 break;
271 default:
272 assert(!"unknown query type");
273 break;
274 }
275 }
276
277 /**
278 * Process the raw query data.
279 */
280 void
281 ilo_3d_process_query(struct ilo_context *ilo, struct ilo_query *q)
282 {
283 struct ilo_3d *hw3d = ilo->hw3d;
284
285 switch (q->type) {
286 case PIPE_QUERY_OCCLUSION_COUNTER:
287 if (q->bo)
288 process_query_for_occlusion_counter(hw3d, q);
289 break;
290 case PIPE_QUERY_TIMESTAMP:
291 if (q->bo)
292 process_query_for_timestamp(hw3d, q);
293 break;
294 case PIPE_QUERY_TIME_ELAPSED:
295 if (q->bo)
296 process_query_for_time_elapsed(hw3d, q);
297 break;
298 case PIPE_QUERY_PRIMITIVES_GENERATED:
299 case PIPE_QUERY_PRIMITIVES_EMITTED:
300 break;
301 default:
302 assert(!"unknown query type");
303 break;
304 }
305 }
306
307 /**
308 * Hook for CP new-batch.
309 */
310 void
311 ilo_3d_cp_flushed(struct ilo_3d *hw3d)
312 {
313 if (ilo_debug & ILO_DEBUG_3D)
314 ilo_3d_pipeline_dump(hw3d->pipeline);
315
316 /* invalidate the pipeline */
317 ilo_3d_pipeline_invalidate(hw3d->pipeline,
318 ILO_3D_PIPELINE_INVALIDATE_BATCH_BO |
319 ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
320 if (!hw3d->cp->render_ctx) {
321 ilo_3d_pipeline_invalidate(hw3d->pipeline,
322 ILO_3D_PIPELINE_INVALIDATE_HW);
323 }
324
325 hw3d->new_batch = true;
326 }
327
328 /**
329 * Create a 3D context.
330 */
331 struct ilo_3d *
332 ilo_3d_create(struct ilo_cp *cp, const struct ilo_dev_info *dev)
333 {
334 struct ilo_3d *hw3d;
335
336 hw3d = CALLOC_STRUCT(ilo_3d);
337 if (!hw3d)
338 return NULL;
339
340 hw3d->cp = cp;
341 hw3d->owner.release_callback = ilo_3d_release_render_ring;
342 hw3d->owner.release_data = hw3d;
343
344 hw3d->new_batch = true;
345
346 list_inithead(&hw3d->occlusion_queries);
347 list_inithead(&hw3d->time_elapsed_queries);
348 list_inithead(&hw3d->prim_generated_queries);
349 list_inithead(&hw3d->prim_emitted_queries);
350
351 hw3d->pipeline = ilo_3d_pipeline_create(cp, dev);
352 if (!hw3d->pipeline) {
353 FREE(hw3d);
354 return NULL;
355 }
356
357 return hw3d;
358 }
359
360 /**
361 * Destroy a 3D context.
362 */
363 void
364 ilo_3d_destroy(struct ilo_3d *hw3d)
365 {
366 ilo_3d_pipeline_destroy(hw3d->pipeline);
367
368 if (hw3d->kernel.bo)
369 intel_bo_unreference(hw3d->kernel.bo);
370
371 FREE(hw3d);
372 }
373
374 static bool
375 draw_vbo(struct ilo_3d *hw3d, const struct ilo_context *ilo,
376 int *prim_generated, int *prim_emitted)
377 {
378 bool need_flush = false;
379 int max_len;
380
381 ilo_3d_own_render_ring(hw3d);
382
383 if (!hw3d->new_batch) {
384 /*
385 * Without a better tracking mechanism, when the framebuffer changes, we
386 * have to assume that the old framebuffer may be sampled from. If that
387 * happens in the middle of a batch buffer, we need to insert manual
388 * flushes.
389 */
390 need_flush = (ilo->dirty & ILO_DIRTY_FB);
391
392 /* same to SO target changes */
393 need_flush |= (ilo->dirty & ILO_DIRTY_SO);
394 }
395
396 /* make sure there is enough room first */
397 max_len = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
398 ILO_3D_PIPELINE_DRAW, ilo);
399 if (need_flush) {
400 max_len += ilo_3d_pipeline_estimate_size(hw3d->pipeline,
401 ILO_3D_PIPELINE_FLUSH, NULL);
402 }
403
404 if (max_len > ilo_cp_space(hw3d->cp)) {
405 ilo_cp_flush(hw3d->cp);
406 need_flush = false;
407 assert(max_len <= ilo_cp_space(hw3d->cp));
408 }
409
410 if (need_flush)
411 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
412
413 return ilo_3d_pipeline_emit_draw(hw3d->pipeline, ilo,
414 prim_generated, prim_emitted);
415 }
416
417 static void
418 update_prim_count(struct ilo_3d *hw3d, int generated, int emitted)
419 {
420 struct ilo_query *q;
421
422 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_generated_queries, list)
423 q->data.u64 += generated;
424
425 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_emitted_queries, list)
426 q->data.u64 += emitted;
427 }
428
429 bool
430 ilo_3d_pass_render_condition(struct ilo_context *ilo)
431 {
432 struct ilo_3d *hw3d = ilo->hw3d;
433 uint64_t result;
434 bool wait;
435
436 if (!hw3d->render_condition.query)
437 return true;
438
439 switch (hw3d->render_condition.mode) {
440 case PIPE_RENDER_COND_WAIT:
441 case PIPE_RENDER_COND_BY_REGION_WAIT:
442 wait = true;
443 break;
444 case PIPE_RENDER_COND_NO_WAIT:
445 case PIPE_RENDER_COND_BY_REGION_NO_WAIT:
446 default:
447 wait = false;
448 break;
449 }
450
451 if (ilo->base.get_query_result(&ilo->base, hw3d->render_condition.query,
452 wait, (union pipe_query_result *) &result))
453 return (!result == hw3d->render_condition.cond);
454 else
455 return true;
456 }
457
458 #define UPDATE_MIN2(a, b) (a) = MIN2((a), (b))
459 #define UPDATE_MAX2(a, b) (a) = MAX2((a), (b))
460
461 /**
462 * \see find_sub_primitives() from core mesa
463 */
464 static int
465 ilo_find_sub_primitives(const void *elements, unsigned element_size,
466 const struct pipe_draw_info *orig_info,
467 struct pipe_draw_info *info)
468 {
469 const unsigned max_prims = orig_info->count - orig_info->start;
470 unsigned i, cur_start, cur_count;
471 int scan_index;
472 unsigned scan_num;
473
474 cur_start = orig_info->start;
475 cur_count = 0;
476 scan_num = 0;
477
478 #define IB_INDEX_READ(TYPE, INDEX) (((const TYPE *) elements)[INDEX])
479
480 #define SCAN_ELEMENTS(TYPE) \
481 info[scan_num] = *orig_info; \
482 info[scan_num].primitive_restart = false; \
483 for (i = orig_info->start; i < orig_info->count; i++) { \
484 scan_index = IB_INDEX_READ(TYPE, i); \
485 if (scan_index == orig_info->restart_index) { \
486 if (cur_count > 0) { \
487 assert(scan_num < max_prims); \
488 info[scan_num].start = cur_start; \
489 info[scan_num].count = cur_count; \
490 scan_num++; \
491 info[scan_num] = *orig_info; \
492 info[scan_num].primitive_restart = false; \
493 } \
494 cur_start = i + 1; \
495 cur_count = 0; \
496 } \
497 else { \
498 UPDATE_MIN2(info[scan_num].min_index, scan_index); \
499 UPDATE_MAX2(info[scan_num].max_index, scan_index); \
500 cur_count++; \
501 } \
502 } \
503 if (cur_count > 0) { \
504 assert(scan_num < max_prims); \
505 info[scan_num].start = cur_start; \
506 info[scan_num].count = cur_count; \
507 scan_num++; \
508 }
509
510 switch (element_size) {
511 case 1:
512 SCAN_ELEMENTS(uint8_t);
513 break;
514 case 2:
515 SCAN_ELEMENTS(uint16_t);
516 break;
517 case 4:
518 SCAN_ELEMENTS(uint32_t);
519 break;
520 default:
521 assert(0 && "bad index_size in find_sub_primitives()");
522 }
523
524 #undef SCAN_ELEMENTS
525
526 return scan_num;
527 }
528
529 static inline bool
530 ilo_check_restart_index(const struct ilo_context *ilo, unsigned restart_index)
531 {
532 /*
533 * Haswell (GEN(7.5)) supports an arbitrary cut index, check everything
534 * older.
535 */
536 if (ilo->dev->gen >= ILO_GEN(7.5))
537 return true;
538
539 /* Note: indices must be unsigned byte, unsigned short or unsigned int */
540 switch (ilo->ib.index_size) {
541 case 1:
542 return ((restart_index & 0xff) == 0xff);
543 break;
544 case 2:
545 return ((restart_index & 0xffff) == 0xffff);
546 break;
547 case 4:
548 return (restart_index == 0xffffffff);
549 break;
550 }
551 return false;
552 }
553
554 static inline bool
555 ilo_check_restart_prim_type(const struct ilo_context *ilo, unsigned prim)
556 {
557 switch (prim) {
558 case PIPE_PRIM_POINTS:
559 case PIPE_PRIM_LINES:
560 case PIPE_PRIM_LINE_STRIP:
561 case PIPE_PRIM_TRIANGLES:
562 case PIPE_PRIM_TRIANGLE_STRIP:
563 /* All 965 GEN graphics support a cut index for these primitive types */
564 return true;
565 break;
566
567 case PIPE_PRIM_LINE_LOOP:
568 case PIPE_PRIM_POLYGON:
569 case PIPE_PRIM_QUAD_STRIP:
570 case PIPE_PRIM_QUADS:
571 case PIPE_PRIM_TRIANGLE_FAN:
572 if (ilo->dev->gen >= ILO_GEN(7.5)) {
573 /* Haswell and newer parts can handle these prim types. */
574 return true;
575 }
576 break;
577 }
578
579 return false;
580 }
581
582 /*
583 * Handle VBOs using primitive restart.
584 * Verify that restart index and primitive type can be handled by the HW.
585 * Return true if this routine did the rendering
586 * Return false if this routine did NOT render because restart can be handled
587 * in HW.
588 */
589 static void
590 ilo_draw_vbo_with_sw_restart(struct pipe_context *pipe,
591 const struct pipe_draw_info *info)
592 {
593 struct ilo_context *ilo = ilo_context(pipe);
594 struct pipe_draw_info *restart_info = NULL;
595 int sub_prim_count = 1;
596
597 /*
598 * We have to break up the primitive into chunks manually
599 * Worst case, every other index could be a restart index so
600 * need to have space for that many primitives
601 */
602 restart_info = MALLOC(((info->count + 1) / 2) * sizeof(*info));
603 if (NULL == restart_info) {
604 /* If we can't get memory for this, bail out */
605 ilo_err("%s:%d - Out of memory", __FILE__, __LINE__);
606 return;
607 }
608
609 if (ilo->ib.buffer) {
610 struct pipe_transfer *transfer;
611 const void *map;
612
613 map = pipe_buffer_map(pipe, ilo->ib.buffer,
614 PIPE_TRANSFER_READ, &transfer);
615
616 sub_prim_count = ilo_find_sub_primitives(map + ilo->ib.offset,
617 ilo->ib.index_size, info, restart_info);
618
619 pipe_buffer_unmap(pipe, transfer);
620 }
621 else {
622 sub_prim_count = ilo_find_sub_primitives(ilo->ib.user_buffer,
623 ilo->ib.index_size, info, restart_info);
624 }
625
626 info = restart_info;
627
628 while (sub_prim_count > 0) {
629 pipe->draw_vbo(pipe, info);
630
631 sub_prim_count--;
632 info++;
633 }
634
635 FREE(restart_info);
636 }
637
638 static bool
639 upload_shaders(struct ilo_3d *hw3d, struct ilo_shader_cache *shc)
640 {
641 bool incremental = true;
642 int upload;
643
644 upload = ilo_shader_cache_upload(shc,
645 NULL, hw3d->kernel.used, incremental);
646 if (!upload)
647 return true;
648
649 /*
650 * Allocate a new bo. When this is a new batch, assume the bo is still in
651 * use by the previous batch and force allocation.
652 *
653 * Does it help to make shader cache upload with unsynchronized mapping,
654 * and remove the check for new batch here?
655 */
656 if (hw3d->kernel.used + upload > hw3d->kernel.size || hw3d->new_batch) {
657 unsigned new_size = (hw3d->kernel.size) ?
658 hw3d->kernel.size : (8 * 1024);
659
660 while (hw3d->kernel.used + upload > new_size)
661 new_size *= 2;
662
663 if (hw3d->kernel.bo)
664 intel_bo_unreference(hw3d->kernel.bo);
665
666 hw3d->kernel.bo = intel_winsys_alloc_buffer(hw3d->cp->winsys,
667 "kernel bo", new_size, 0);
668 if (!hw3d->kernel.bo) {
669 ilo_err("failed to allocate kernel bo\n");
670 return false;
671 }
672
673 hw3d->kernel.used = 0;
674 hw3d->kernel.size = new_size;
675 incremental = false;
676
677 assert(new_size >= ilo_shader_cache_upload(shc,
678 NULL, hw3d->kernel.used, incremental));
679
680 ilo_3d_pipeline_invalidate(hw3d->pipeline,
681 ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
682 }
683
684 upload = ilo_shader_cache_upload(shc,
685 hw3d->kernel.bo, hw3d->kernel.used, incremental);
686 if (upload < 0) {
687 ilo_err("failed to upload shaders\n");
688 return false;
689 }
690
691 hw3d->kernel.used += upload;
692
693 assert(hw3d->kernel.used <= hw3d->kernel.size);
694
695 return true;
696 }
697
698 static void
699 ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
700 {
701 struct ilo_context *ilo = ilo_context(pipe);
702 struct ilo_3d *hw3d = ilo->hw3d;
703 int prim_generated, prim_emitted;
704
705 if (ilo_debug & ILO_DEBUG_DRAW) {
706 if (info->indexed) {
707 ilo_printf("indexed draw %s: "
708 "index start %d, count %d, vertex range [%d, %d]\n",
709 u_prim_name(info->mode), info->start, info->count,
710 info->min_index, info->max_index);
711 }
712 else {
713 ilo_printf("draw %s: vertex start %d, count %d\n",
714 u_prim_name(info->mode), info->start, info->count);
715 }
716
717 ilo_dump_dirty_flags(ilo->dirty);
718 }
719
720 if (!ilo_3d_pass_render_condition(ilo))
721 return;
722
723 if (info->primitive_restart && info->indexed) {
724 /*
725 * Want to draw an indexed primitive using primitive restart
726 * Check that HW can handle the request and fall to SW if not.
727 */
728 if (!ilo_check_restart_index(ilo, info->restart_index) ||
729 !ilo_check_restart_prim_type(ilo, info->mode)) {
730 ilo_draw_vbo_with_sw_restart(pipe, info);
731 return;
732 }
733 }
734
735 ilo_finalize_3d_states(ilo, info);
736
737 if (!upload_shaders(hw3d, ilo->shader_cache))
738 return;
739
740 /* If draw_vbo ever fails, return immediately. */
741 if (!draw_vbo(hw3d, ilo, &prim_generated, &prim_emitted))
742 return;
743
744 /* clear dirty status */
745 ilo->dirty = 0x0;
746 hw3d->new_batch = false;
747
748 /* avoid dangling pointer reference */
749 ilo->draw = NULL;
750
751 update_prim_count(hw3d, prim_generated, prim_emitted);
752
753 if (ilo_debug & ILO_DEBUG_NOCACHE)
754 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
755 }
756
757 static void
758 ilo_render_condition(struct pipe_context *pipe,
759 struct pipe_query *query,
760 boolean condition,
761 uint mode)
762 {
763 struct ilo_context *ilo = ilo_context(pipe);
764 struct ilo_3d *hw3d = ilo->hw3d;
765
766 /* reference count? */
767 hw3d->render_condition.query = query;
768 hw3d->render_condition.mode = mode;
769 hw3d->render_condition.cond = condition;
770 }
771
772 static void
773 ilo_texture_barrier(struct pipe_context *pipe)
774 {
775 struct ilo_context *ilo = ilo_context(pipe);
776 struct ilo_3d *hw3d = ilo->hw3d;
777
778 if (ilo->cp->ring != ILO_CP_RING_RENDER)
779 return;
780
781 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
782
783 /* don't know why */
784 if (ilo->dev->gen >= ILO_GEN(7))
785 ilo_cp_flush(hw3d->cp);
786 }
787
788 static void
789 ilo_get_sample_position(struct pipe_context *pipe,
790 unsigned sample_count,
791 unsigned sample_index,
792 float *out_value)
793 {
794 struct ilo_context *ilo = ilo_context(pipe);
795 struct ilo_3d *hw3d = ilo->hw3d;
796
797 ilo_3d_pipeline_get_sample_position(hw3d->pipeline,
798 sample_count, sample_index,
799 &out_value[0], &out_value[1]);
800 }
801
802 /**
803 * Initialize 3D-related functions.
804 */
805 void
806 ilo_init_3d_functions(struct ilo_context *ilo)
807 {
808 ilo->base.draw_vbo = ilo_draw_vbo;
809 ilo->base.render_condition = ilo_render_condition;
810 ilo->base.texture_barrier = ilo_texture_barrier;
811 ilo->base.get_sample_position = ilo_get_sample_position;
812 }