ilo: use an accessor for dev->gen
[mesa.git] / src / gallium / drivers / ilo / ilo_3d.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2012-2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "util/u_prim.h"
29 #include "intel_winsys.h"
30
31 #include "ilo_3d_pipeline.h"
32 #include "ilo_blit.h"
33 #include "ilo_context.h"
34 #include "ilo_cp.h"
35 #include "ilo_query.h"
36 #include "ilo_shader.h"
37 #include "ilo_state.h"
38 #include "ilo_3d.h"
39
40 static void
41 process_query_for_occlusion_counter(struct ilo_3d *hw3d,
42 struct ilo_query *q)
43 {
44 uint64_t *vals, depth_count = 0;
45 int i;
46
47 /* in pairs */
48 assert(q->reg_read % 2 == 0);
49
50 vals = intel_bo_map(q->bo, false);
51 for (i = 1; i < q->reg_read; i += 2)
52 depth_count += vals[i] - vals[i - 1];
53 intel_bo_unmap(q->bo);
54
55 /* accumulate so that the query can be resumed if wanted */
56 q->data.u64 += depth_count;
57 q->reg_read = 0;
58 }
59
60 static uint64_t
61 timestamp_to_ns(uint64_t timestamp)
62 {
63 /* see ilo_get_timestamp() */
64 return (timestamp & 0xffffffff) * 80;
65 }
66
67 static void
68 process_query_for_timestamp(struct ilo_3d *hw3d, struct ilo_query *q)
69 {
70 uint64_t *vals, timestamp;
71
72 assert(q->reg_read == 1);
73
74 vals = intel_bo_map(q->bo, false);
75 timestamp = vals[0];
76 intel_bo_unmap(q->bo);
77
78 q->data.u64 = timestamp_to_ns(timestamp);
79 q->reg_read = 0;
80 }
81
82 static void
83 process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
84 {
85 uint64_t *vals, elapsed = 0;
86 int i;
87
88 /* in pairs */
89 assert(q->reg_read % 2 == 0);
90
91 vals = intel_bo_map(q->bo, false);
92
93 for (i = 1; i < q->reg_read; i += 2)
94 elapsed += vals[i] - vals[i - 1];
95
96 intel_bo_unmap(q->bo);
97
98 /* accumulate so that the query can be resumed if wanted */
99 q->data.u64 += timestamp_to_ns(elapsed);
100 q->reg_read = 0;
101 }
102
103 static void
104 process_query_for_pipeline_statistics(struct ilo_3d *hw3d,
105 struct ilo_query *q)
106 {
107 const uint64_t *vals;
108 int i;
109
110 assert(q->reg_read % 22 == 0);
111
112 vals = intel_bo_map(q->bo, false);
113
114 for (i = 0; i < q->reg_read; i += 22) {
115 struct pipe_query_data_pipeline_statistics *stats =
116 &q->data.pipeline_statistics;
117 const uint64_t *begin = vals + i;
118 const uint64_t *end = begin + 11;
119
120 stats->ia_vertices += end[0] - begin[0];
121 stats->ia_primitives += end[1] - begin[1];
122 stats->vs_invocations += end[2] - begin[2];
123 stats->gs_invocations += end[3] - begin[3];
124 stats->gs_primitives += end[4] - begin[4];
125 stats->c_invocations += end[5] - begin[5];
126 stats->c_primitives += end[6] - begin[6];
127 stats->ps_invocations += end[7] - begin[7];
128 stats->hs_invocations += end[8] - begin[8];
129 stats->ds_invocations += end[9] - begin[9];
130 stats->cs_invocations += end[10] - begin[10];
131 }
132
133 intel_bo_unmap(q->bo);
134
135 q->reg_read = 0;
136 }
137
138 static void
139 ilo_3d_resume_queries(struct ilo_3d *hw3d)
140 {
141 struct ilo_query *q;
142
143 /* resume occlusion queries */
144 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
145 /* accumulate the result if the bo is alreay full */
146 if (q->reg_read >= q->reg_total)
147 process_query_for_occlusion_counter(hw3d, q);
148
149 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
150 q->bo, q->reg_read++);
151 }
152
153 /* resume timer queries */
154 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
155 /* accumulate the result if the bo is alreay full */
156 if (q->reg_read >= q->reg_total)
157 process_query_for_time_elapsed(hw3d, q);
158
159 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
160 q->bo, q->reg_read++);
161 }
162
163 /* resume pipeline statistics queries */
164 LIST_FOR_EACH_ENTRY(q, &hw3d->pipeline_statistics_queries, list) {
165 /* accumulate the result if the bo is alreay full */
166 if (q->reg_read >= q->reg_total)
167 process_query_for_pipeline_statistics(hw3d, q);
168
169 ilo_3d_pipeline_emit_write_statistics(hw3d->pipeline,
170 q->bo, q->reg_read);
171 q->reg_read += 11;
172 }
173 }
174
175 static void
176 ilo_3d_pause_queries(struct ilo_3d *hw3d)
177 {
178 struct ilo_query *q;
179
180 /* pause occlusion queries */
181 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
182 assert(q->reg_read < q->reg_total);
183 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
184 q->bo, q->reg_read++);
185 }
186
187 /* pause timer queries */
188 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
189 assert(q->reg_read < q->reg_total);
190 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
191 q->bo, q->reg_read++);
192 }
193
194 /* pause pipeline statistics queries */
195 LIST_FOR_EACH_ENTRY(q, &hw3d->pipeline_statistics_queries, list) {
196 assert(q->reg_read < q->reg_total);
197 ilo_3d_pipeline_emit_write_statistics(hw3d->pipeline,
198 q->bo, q->reg_read);
199 q->reg_read += 11;
200 }
201 }
202
203 static void
204 ilo_3d_release_render_ring(struct ilo_cp *cp, void *data)
205 {
206 struct ilo_3d *hw3d = data;
207
208 ilo_3d_pause_queries(hw3d);
209 }
210
211 void
212 ilo_3d_own_render_ring(struct ilo_3d *hw3d)
213 {
214 ilo_cp_set_ring(hw3d->cp, INTEL_RING_RENDER);
215
216 if (ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve))
217 ilo_3d_resume_queries(hw3d);
218 }
219
220 /**
221 * Begin a query.
222 */
223 void
224 ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
225 {
226 struct ilo_3d *hw3d = ilo->hw3d;
227
228 ilo_3d_own_render_ring(hw3d);
229
230 switch (q->type) {
231 case PIPE_QUERY_OCCLUSION_COUNTER:
232 /* reserve some space for pausing the query */
233 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
234 ILO_3D_PIPELINE_WRITE_DEPTH_COUNT, NULL);
235 hw3d->owner_reserve += q->reg_cmd_size;
236 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
237
238 q->data.u64 = 0;
239
240 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
241 /* XXX we should check the aperture size */
242 if (q->reg_cmd_size > ilo_cp_space(hw3d->cp)) {
243 ilo_cp_flush(hw3d->cp, "out of space");
244 assert(q->reg_cmd_size <= ilo_cp_space(hw3d->cp));
245 }
246
247 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
248 q->bo, q->reg_read++);
249
250 list_add(&q->list, &hw3d->occlusion_queries);
251 }
252 break;
253 case PIPE_QUERY_TIMESTAMP:
254 /* nop */
255 break;
256 case PIPE_QUERY_TIME_ELAPSED:
257 /* reserve some space for pausing the query */
258 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
259 ILO_3D_PIPELINE_WRITE_TIMESTAMP, NULL);
260 hw3d->owner_reserve += q->reg_cmd_size;
261 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
262
263 q->data.u64 = 0;
264
265 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
266 /* XXX we should check the aperture size */
267 if (q->reg_cmd_size > ilo_cp_space(hw3d->cp)) {
268 ilo_cp_flush(hw3d->cp, "out of space");
269 assert(q->reg_cmd_size <= ilo_cp_space(hw3d->cp));
270 }
271
272 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
273 q->bo, q->reg_read++);
274
275 list_add(&q->list, &hw3d->time_elapsed_queries);
276 }
277 break;
278 case PIPE_QUERY_PRIMITIVES_GENERATED:
279 q->data.u64 = 0;
280 list_add(&q->list, &hw3d->prim_generated_queries);
281 break;
282 case PIPE_QUERY_PRIMITIVES_EMITTED:
283 q->data.u64 = 0;
284 list_add(&q->list, &hw3d->prim_emitted_queries);
285 break;
286 case PIPE_QUERY_PIPELINE_STATISTICS:
287 /* reserve some space for pausing the query */
288 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
289 ILO_3D_PIPELINE_WRITE_STATISTICS, NULL);
290 hw3d->owner_reserve += q->reg_cmd_size;
291 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
292
293 memset(&q->data.pipeline_statistics, 0,
294 sizeof(q->data.pipeline_statistics));
295
296 if (ilo_query_alloc_bo(q, 11 * 2, -1, hw3d->cp->winsys)) {
297 /* XXX we should check the aperture size */
298 if (q->reg_cmd_size > ilo_cp_space(hw3d->cp)) {
299 ilo_cp_flush(hw3d->cp, "out of space");
300 assert(q->reg_cmd_size <= ilo_cp_space(hw3d->cp));
301 }
302
303 ilo_3d_pipeline_emit_write_statistics(hw3d->pipeline,
304 q->bo, q->reg_read);
305 q->reg_read += 11;
306
307 list_add(&q->list, &hw3d->pipeline_statistics_queries);
308 }
309 break;
310 default:
311 assert(!"unknown query type");
312 break;
313 }
314 }
315
316 /**
317 * End a query.
318 */
319 void
320 ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
321 {
322 struct ilo_3d *hw3d = ilo->hw3d;
323
324 ilo_3d_own_render_ring(hw3d);
325
326 switch (q->type) {
327 case PIPE_QUERY_OCCLUSION_COUNTER:
328 list_del(&q->list);
329
330 assert(q->reg_read < q->reg_total);
331 hw3d->owner_reserve -= q->reg_cmd_size;
332 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
333 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
334 q->bo, q->reg_read++);
335 break;
336 case PIPE_QUERY_TIMESTAMP:
337 q->data.u64 = 0;
338
339 if (ilo_query_alloc_bo(q, 1, 1, hw3d->cp->winsys)) {
340 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
341 q->bo, q->reg_read++);
342 }
343 break;
344 case PIPE_QUERY_TIME_ELAPSED:
345 list_del(&q->list);
346
347 assert(q->reg_read < q->reg_total);
348 hw3d->owner_reserve -= q->reg_cmd_size;
349 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
350 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
351 q->bo, q->reg_read++);
352 break;
353 case PIPE_QUERY_PRIMITIVES_GENERATED:
354 case PIPE_QUERY_PRIMITIVES_EMITTED:
355 list_del(&q->list);
356 break;
357 case PIPE_QUERY_PIPELINE_STATISTICS:
358 list_del(&q->list);
359
360 assert(q->reg_read + 11 <= q->reg_total);
361 hw3d->owner_reserve -= q->reg_cmd_size;
362 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
363 ilo_3d_pipeline_emit_write_statistics(hw3d->pipeline,
364 q->bo, q->reg_read);
365 q->reg_read += 11;
366 break;
367 default:
368 assert(!"unknown query type");
369 break;
370 }
371 }
372
373 /**
374 * Process the raw query data.
375 */
376 void
377 ilo_3d_process_query(struct ilo_context *ilo, struct ilo_query *q)
378 {
379 struct ilo_3d *hw3d = ilo->hw3d;
380
381 switch (q->type) {
382 case PIPE_QUERY_OCCLUSION_COUNTER:
383 if (q->bo)
384 process_query_for_occlusion_counter(hw3d, q);
385 break;
386 case PIPE_QUERY_TIMESTAMP:
387 if (q->bo)
388 process_query_for_timestamp(hw3d, q);
389 break;
390 case PIPE_QUERY_TIME_ELAPSED:
391 if (q->bo)
392 process_query_for_time_elapsed(hw3d, q);
393 break;
394 case PIPE_QUERY_PRIMITIVES_GENERATED:
395 case PIPE_QUERY_PRIMITIVES_EMITTED:
396 break;
397 case PIPE_QUERY_PIPELINE_STATISTICS:
398 if (q->bo)
399 process_query_for_pipeline_statistics(hw3d, q);
400 break;
401 default:
402 assert(!"unknown query type");
403 break;
404 }
405 }
406
407 /**
408 * Hook for CP new-batch.
409 */
410 void
411 ilo_3d_cp_flushed(struct ilo_3d *hw3d)
412 {
413 if (ilo_debug & ILO_DEBUG_3D)
414 ilo_builder_decode(&hw3d->cp->builder);
415
416 /* invalidate the pipeline */
417 ilo_3d_pipeline_invalidate(hw3d->pipeline,
418 ILO_3D_PIPELINE_INVALIDATE_BATCH_BO |
419 ILO_3D_PIPELINE_INVALIDATE_STATE_BO |
420 ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
421
422 hw3d->new_batch = true;
423 }
424
425 /**
426 * Create a 3D context.
427 */
428 struct ilo_3d *
429 ilo_3d_create(struct ilo_cp *cp, const struct ilo_dev_info *dev)
430 {
431 struct ilo_3d *hw3d;
432
433 hw3d = CALLOC_STRUCT(ilo_3d);
434 if (!hw3d)
435 return NULL;
436
437 hw3d->cp = cp;
438 hw3d->owner.release_callback = ilo_3d_release_render_ring;
439 hw3d->owner.release_data = hw3d;
440
441 hw3d->new_batch = true;
442
443 list_inithead(&hw3d->occlusion_queries);
444 list_inithead(&hw3d->time_elapsed_queries);
445 list_inithead(&hw3d->prim_generated_queries);
446 list_inithead(&hw3d->prim_emitted_queries);
447 list_inithead(&hw3d->pipeline_statistics_queries);
448
449 hw3d->pipeline = ilo_3d_pipeline_create(cp, dev);
450 if (!hw3d->pipeline) {
451 FREE(hw3d);
452 return NULL;
453 }
454
455 return hw3d;
456 }
457
458 /**
459 * Destroy a 3D context.
460 */
461 void
462 ilo_3d_destroy(struct ilo_3d *hw3d)
463 {
464 ilo_3d_pipeline_destroy(hw3d->pipeline);
465 FREE(hw3d);
466 }
467
468 static bool
469 draw_vbo(struct ilo_3d *hw3d, const struct ilo_context *ilo,
470 int *prim_generated, int *prim_emitted)
471 {
472 bool need_flush = false;
473 int max_len;
474
475 ilo_3d_own_render_ring(hw3d);
476
477 if (!hw3d->new_batch) {
478 /*
479 * Without a better tracking mechanism, when the framebuffer changes, we
480 * have to assume that the old framebuffer may be sampled from. If that
481 * happens in the middle of a batch buffer, we need to insert manual
482 * flushes.
483 */
484 need_flush = (ilo->dirty & ILO_DIRTY_FB);
485
486 /* same to SO target changes */
487 need_flush |= (ilo->dirty & ILO_DIRTY_SO);
488 }
489
490 /* make sure there is enough room first */
491 max_len = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
492 ILO_3D_PIPELINE_DRAW, ilo);
493 if (need_flush) {
494 max_len += ilo_3d_pipeline_estimate_size(hw3d->pipeline,
495 ILO_3D_PIPELINE_FLUSH, NULL);
496 }
497
498 if (max_len > ilo_cp_space(hw3d->cp)) {
499 ilo_cp_flush(hw3d->cp, "out of space");
500 need_flush = false;
501 assert(max_len <= ilo_cp_space(hw3d->cp));
502 }
503
504 if (need_flush)
505 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
506
507 return ilo_3d_pipeline_emit_draw(hw3d->pipeline, ilo,
508 prim_generated, prim_emitted);
509 }
510
511 static void
512 update_prim_count(struct ilo_3d *hw3d, int generated, int emitted)
513 {
514 struct ilo_query *q;
515
516 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_generated_queries, list)
517 q->data.u64 += generated;
518
519 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_emitted_queries, list)
520 q->data.u64 += emitted;
521 }
522
523 bool
524 ilo_3d_pass_render_condition(struct ilo_context *ilo)
525 {
526 struct ilo_3d *hw3d = ilo->hw3d;
527 uint64_t result;
528 bool wait;
529
530 if (!hw3d->render_condition.query)
531 return true;
532
533 switch (hw3d->render_condition.mode) {
534 case PIPE_RENDER_COND_WAIT:
535 case PIPE_RENDER_COND_BY_REGION_WAIT:
536 wait = true;
537 break;
538 case PIPE_RENDER_COND_NO_WAIT:
539 case PIPE_RENDER_COND_BY_REGION_NO_WAIT:
540 default:
541 wait = false;
542 break;
543 }
544
545 if (ilo->base.get_query_result(&ilo->base, hw3d->render_condition.query,
546 wait, (union pipe_query_result *) &result))
547 return (!result == hw3d->render_condition.cond);
548 else
549 return true;
550 }
551
552 #define UPDATE_MIN2(a, b) (a) = MIN2((a), (b))
553 #define UPDATE_MAX2(a, b) (a) = MAX2((a), (b))
554
555 /**
556 * \see find_sub_primitives() from core mesa
557 */
558 static int
559 ilo_find_sub_primitives(const void *elements, unsigned element_size,
560 const struct pipe_draw_info *orig_info,
561 struct pipe_draw_info *info)
562 {
563 const unsigned max_prims = orig_info->count - orig_info->start;
564 unsigned i, cur_start, cur_count;
565 int scan_index;
566 unsigned scan_num;
567
568 cur_start = orig_info->start;
569 cur_count = 0;
570 scan_num = 0;
571
572 #define IB_INDEX_READ(TYPE, INDEX) (((const TYPE *) elements)[INDEX])
573
574 #define SCAN_ELEMENTS(TYPE) \
575 info[scan_num] = *orig_info; \
576 info[scan_num].primitive_restart = false; \
577 for (i = orig_info->start; i < orig_info->count; i++) { \
578 scan_index = IB_INDEX_READ(TYPE, i); \
579 if (scan_index == orig_info->restart_index) { \
580 if (cur_count > 0) { \
581 assert(scan_num < max_prims); \
582 info[scan_num].start = cur_start; \
583 info[scan_num].count = cur_count; \
584 scan_num++; \
585 info[scan_num] = *orig_info; \
586 info[scan_num].primitive_restart = false; \
587 } \
588 cur_start = i + 1; \
589 cur_count = 0; \
590 } \
591 else { \
592 UPDATE_MIN2(info[scan_num].min_index, scan_index); \
593 UPDATE_MAX2(info[scan_num].max_index, scan_index); \
594 cur_count++; \
595 } \
596 } \
597 if (cur_count > 0) { \
598 assert(scan_num < max_prims); \
599 info[scan_num].start = cur_start; \
600 info[scan_num].count = cur_count; \
601 scan_num++; \
602 }
603
604 switch (element_size) {
605 case 1:
606 SCAN_ELEMENTS(uint8_t);
607 break;
608 case 2:
609 SCAN_ELEMENTS(uint16_t);
610 break;
611 case 4:
612 SCAN_ELEMENTS(uint32_t);
613 break;
614 default:
615 assert(0 && "bad index_size in find_sub_primitives()");
616 }
617
618 #undef SCAN_ELEMENTS
619
620 return scan_num;
621 }
622
623 static inline bool
624 ilo_check_restart_index(const struct ilo_context *ilo, unsigned restart_index)
625 {
626 /*
627 * Haswell (GEN(7.5)) supports an arbitrary cut index, check everything
628 * older.
629 */
630 if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7.5))
631 return true;
632
633 /* Note: indices must be unsigned byte, unsigned short or unsigned int */
634 switch (ilo->ib.index_size) {
635 case 1:
636 return ((restart_index & 0xff) == 0xff);
637 break;
638 case 2:
639 return ((restart_index & 0xffff) == 0xffff);
640 break;
641 case 4:
642 return (restart_index == 0xffffffff);
643 break;
644 }
645 return false;
646 }
647
648 static inline bool
649 ilo_check_restart_prim_type(const struct ilo_context *ilo, unsigned prim)
650 {
651 switch (prim) {
652 case PIPE_PRIM_POINTS:
653 case PIPE_PRIM_LINES:
654 case PIPE_PRIM_LINE_STRIP:
655 case PIPE_PRIM_TRIANGLES:
656 case PIPE_PRIM_TRIANGLE_STRIP:
657 /* All 965 GEN graphics support a cut index for these primitive types */
658 return true;
659 break;
660
661 case PIPE_PRIM_LINE_LOOP:
662 case PIPE_PRIM_POLYGON:
663 case PIPE_PRIM_QUAD_STRIP:
664 case PIPE_PRIM_QUADS:
665 case PIPE_PRIM_TRIANGLE_FAN:
666 if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7.5)) {
667 /* Haswell and newer parts can handle these prim types. */
668 return true;
669 }
670 break;
671 }
672
673 return false;
674 }
675
676 /*
677 * Handle VBOs using primitive restart.
678 * Verify that restart index and primitive type can be handled by the HW.
679 * Return true if this routine did the rendering
680 * Return false if this routine did NOT render because restart can be handled
681 * in HW.
682 */
683 static void
684 ilo_draw_vbo_with_sw_restart(struct pipe_context *pipe,
685 const struct pipe_draw_info *info)
686 {
687 struct ilo_context *ilo = ilo_context(pipe);
688 struct pipe_draw_info *restart_info = NULL;
689 int sub_prim_count = 1;
690
691 /*
692 * We have to break up the primitive into chunks manually
693 * Worst case, every other index could be a restart index so
694 * need to have space for that many primitives
695 */
696 restart_info = MALLOC(((info->count + 1) / 2) * sizeof(*info));
697 if (NULL == restart_info) {
698 /* If we can't get memory for this, bail out */
699 ilo_err("%s:%d - Out of memory", __FILE__, __LINE__);
700 return;
701 }
702
703 if (ilo->ib.buffer) {
704 struct pipe_transfer *transfer;
705 const void *map;
706
707 map = pipe_buffer_map(pipe, ilo->ib.buffer,
708 PIPE_TRANSFER_READ, &transfer);
709
710 sub_prim_count = ilo_find_sub_primitives(map + ilo->ib.offset,
711 ilo->ib.index_size, info, restart_info);
712
713 pipe_buffer_unmap(pipe, transfer);
714 }
715 else {
716 sub_prim_count = ilo_find_sub_primitives(ilo->ib.user_buffer,
717 ilo->ib.index_size, info, restart_info);
718 }
719
720 info = restart_info;
721
722 while (sub_prim_count > 0) {
723 pipe->draw_vbo(pipe, info);
724
725 sub_prim_count--;
726 info++;
727 }
728
729 FREE(restart_info);
730 }
731
732 static void
733 ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
734 {
735 struct ilo_context *ilo = ilo_context(pipe);
736 struct ilo_3d *hw3d = ilo->hw3d;
737 int prim_generated, prim_emitted;
738
739 if (ilo_debug & ILO_DEBUG_DRAW) {
740 if (info->indexed) {
741 ilo_printf("indexed draw %s: "
742 "index start %d, count %d, vertex range [%d, %d]\n",
743 u_prim_name(info->mode), info->start, info->count,
744 info->min_index, info->max_index);
745 }
746 else {
747 ilo_printf("draw %s: vertex start %d, count %d\n",
748 u_prim_name(info->mode), info->start, info->count);
749 }
750
751 ilo_dump_dirty_flags(ilo->dirty);
752 }
753
754 if (!ilo_3d_pass_render_condition(ilo))
755 return;
756
757 if (info->primitive_restart && info->indexed) {
758 /*
759 * Want to draw an indexed primitive using primitive restart
760 * Check that HW can handle the request and fall to SW if not.
761 */
762 if (!ilo_check_restart_index(ilo, info->restart_index) ||
763 !ilo_check_restart_prim_type(ilo, info->mode)) {
764 ilo_draw_vbo_with_sw_restart(pipe, info);
765 return;
766 }
767 }
768
769 ilo_finalize_3d_states(ilo, info);
770
771 ilo_shader_cache_upload(ilo->shader_cache, &hw3d->cp->builder);
772
773 ilo_blit_resolve_framebuffer(ilo);
774
775 /* If draw_vbo ever fails, return immediately. */
776 if (!draw_vbo(hw3d, ilo, &prim_generated, &prim_emitted))
777 return;
778
779 /* clear dirty status */
780 ilo->dirty = 0x0;
781 hw3d->new_batch = false;
782
783 /* avoid dangling pointer reference */
784 ilo->draw = NULL;
785
786 update_prim_count(hw3d, prim_generated, prim_emitted);
787
788 if (ilo_debug & ILO_DEBUG_NOCACHE)
789 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
790 }
791
792 static void
793 ilo_render_condition(struct pipe_context *pipe,
794 struct pipe_query *query,
795 boolean condition,
796 uint mode)
797 {
798 struct ilo_context *ilo = ilo_context(pipe);
799 struct ilo_3d *hw3d = ilo->hw3d;
800
801 /* reference count? */
802 hw3d->render_condition.query = query;
803 hw3d->render_condition.mode = mode;
804 hw3d->render_condition.cond = condition;
805 }
806
807 static void
808 ilo_texture_barrier(struct pipe_context *pipe)
809 {
810 struct ilo_context *ilo = ilo_context(pipe);
811 struct ilo_3d *hw3d = ilo->hw3d;
812
813 if (ilo->cp->ring != INTEL_RING_RENDER)
814 return;
815
816 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
817
818 /* don't know why */
819 if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7))
820 ilo_cp_flush(hw3d->cp, "texture barrier");
821 }
822
823 static void
824 ilo_get_sample_position(struct pipe_context *pipe,
825 unsigned sample_count,
826 unsigned sample_index,
827 float *out_value)
828 {
829 struct ilo_context *ilo = ilo_context(pipe);
830 struct ilo_3d *hw3d = ilo->hw3d;
831
832 ilo_3d_pipeline_get_sample_position(hw3d->pipeline,
833 sample_count, sample_index,
834 &out_value[0], &out_value[1]);
835 }
836
837 /**
838 * Initialize 3D-related functions.
839 */
840 void
841 ilo_init_3d_functions(struct ilo_context *ilo)
842 {
843 ilo->base.draw_vbo = ilo_draw_vbo;
844 ilo->base.render_condition = ilo_render_condition;
845 ilo->base.texture_barrier = ilo_texture_barrier;
846 ilo->base.get_sample_position = ilo_get_sample_position;
847 }