ilo: rename ilo_cp_flush()
[mesa.git] / src / gallium / drivers / ilo / ilo_3d.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2012-2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "util/u_prim.h"
29 #include "intel_winsys.h"
30
31 #include "ilo_3d_pipeline.h"
32 #include "ilo_blit.h"
33 #include "ilo_context.h"
34 #include "ilo_cp.h"
35 #include "ilo_query.h"
36 #include "ilo_shader.h"
37 #include "ilo_state.h"
38 #include "ilo_3d.h"
39
40 static void
41 process_query_for_occlusion_counter(struct ilo_3d *hw3d,
42 struct ilo_query *q)
43 {
44 uint64_t *vals, depth_count = 0;
45 int i;
46
47 /* in pairs */
48 assert(q->reg_read % 2 == 0);
49
50 vals = intel_bo_map(q->bo, false);
51 for (i = 1; i < q->reg_read; i += 2)
52 depth_count += vals[i] - vals[i - 1];
53 intel_bo_unmap(q->bo);
54
55 /* accumulate so that the query can be resumed if wanted */
56 q->data.u64 += depth_count;
57 q->reg_read = 0;
58 }
59
60 static uint64_t
61 timestamp_to_ns(uint64_t timestamp)
62 {
63 /* see ilo_get_timestamp() */
64 return (timestamp & 0xffffffff) * 80;
65 }
66
67 static void
68 process_query_for_timestamp(struct ilo_3d *hw3d, struct ilo_query *q)
69 {
70 uint64_t *vals, timestamp;
71
72 assert(q->reg_read == 1);
73
74 vals = intel_bo_map(q->bo, false);
75 timestamp = vals[0];
76 intel_bo_unmap(q->bo);
77
78 q->data.u64 = timestamp_to_ns(timestamp);
79 q->reg_read = 0;
80 }
81
82 static void
83 process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
84 {
85 uint64_t *vals, elapsed = 0;
86 int i;
87
88 /* in pairs */
89 assert(q->reg_read % 2 == 0);
90
91 vals = intel_bo_map(q->bo, false);
92
93 for (i = 1; i < q->reg_read; i += 2)
94 elapsed += vals[i] - vals[i - 1];
95
96 intel_bo_unmap(q->bo);
97
98 /* accumulate so that the query can be resumed if wanted */
99 q->data.u64 += timestamp_to_ns(elapsed);
100 q->reg_read = 0;
101 }
102
103 static void
104 process_query_for_pipeline_statistics(struct ilo_3d *hw3d,
105 struct ilo_query *q)
106 {
107 const uint64_t *vals;
108 int i;
109
110 assert(q->reg_read % 22 == 0);
111
112 vals = intel_bo_map(q->bo, false);
113
114 for (i = 0; i < q->reg_read; i += 22) {
115 struct pipe_query_data_pipeline_statistics *stats =
116 &q->data.pipeline_statistics;
117 const uint64_t *begin = vals + i;
118 const uint64_t *end = begin + 11;
119
120 stats->ia_vertices += end[0] - begin[0];
121 stats->ia_primitives += end[1] - begin[1];
122 stats->vs_invocations += end[2] - begin[2];
123 stats->gs_invocations += end[3] - begin[3];
124 stats->gs_primitives += end[4] - begin[4];
125 stats->c_invocations += end[5] - begin[5];
126 stats->c_primitives += end[6] - begin[6];
127 stats->ps_invocations += end[7] - begin[7];
128 stats->hs_invocations += end[8] - begin[8];
129 stats->ds_invocations += end[9] - begin[9];
130 stats->cs_invocations += end[10] - begin[10];
131 }
132
133 intel_bo_unmap(q->bo);
134
135 q->reg_read = 0;
136 }
137
138 static void
139 ilo_3d_resume_queries(struct ilo_3d *hw3d)
140 {
141 struct ilo_query *q;
142
143 /* resume occlusion queries */
144 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
145 /* accumulate the result if the bo is alreay full */
146 if (q->reg_read >= q->reg_total)
147 process_query_for_occlusion_counter(hw3d, q);
148
149 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
150 q->bo, q->reg_read++);
151 }
152
153 /* resume timer queries */
154 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
155 /* accumulate the result if the bo is alreay full */
156 if (q->reg_read >= q->reg_total)
157 process_query_for_time_elapsed(hw3d, q);
158
159 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
160 q->bo, q->reg_read++);
161 }
162
163 /* resume pipeline statistics queries */
164 LIST_FOR_EACH_ENTRY(q, &hw3d->pipeline_statistics_queries, list) {
165 /* accumulate the result if the bo is alreay full */
166 if (q->reg_read >= q->reg_total)
167 process_query_for_pipeline_statistics(hw3d, q);
168
169 ilo_3d_pipeline_emit_write_statistics(hw3d->pipeline,
170 q->bo, q->reg_read);
171 q->reg_read += 11;
172 }
173 }
174
175 static void
176 ilo_3d_pause_queries(struct ilo_3d *hw3d)
177 {
178 struct ilo_query *q;
179
180 /* pause occlusion queries */
181 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
182 assert(q->reg_read < q->reg_total);
183 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
184 q->bo, q->reg_read++);
185 }
186
187 /* pause timer queries */
188 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
189 assert(q->reg_read < q->reg_total);
190 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
191 q->bo, q->reg_read++);
192 }
193
194 /* pause pipeline statistics queries */
195 LIST_FOR_EACH_ENTRY(q, &hw3d->pipeline_statistics_queries, list) {
196 assert(q->reg_read < q->reg_total);
197 ilo_3d_pipeline_emit_write_statistics(hw3d->pipeline,
198 q->bo, q->reg_read);
199 q->reg_read += 11;
200 }
201 }
202
203 void
204 ilo_3d_own_render_ring(struct ilo_3d *hw3d)
205 {
206 ilo_cp_set_owner(hw3d->cp, INTEL_RING_RENDER, &hw3d->owner);
207 }
208
209 static void
210 ilo_3d_reserve_for_query(struct ilo_3d *hw3d, struct ilo_query *q,
211 enum ilo_3d_pipeline_action act)
212 {
213 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline, act, NULL);
214
215 /* XXX we should check the aperture size */
216 if (ilo_cp_space(hw3d->cp) < q->reg_cmd_size * 2) {
217 ilo_cp_submit(hw3d->cp, "out of space");
218 assert(ilo_cp_space(hw3d->cp) >= q->reg_cmd_size * 2);
219 }
220
221 /* reserve space for pausing the query */
222 hw3d->owner.reserve += q->reg_cmd_size;
223 }
224
225 /**
226 * Begin a query.
227 */
228 void
229 ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
230 {
231 struct ilo_3d *hw3d = ilo->hw3d;
232
233 ilo_3d_own_render_ring(hw3d);
234
235 switch (q->type) {
236 case PIPE_QUERY_OCCLUSION_COUNTER:
237 ilo_3d_reserve_for_query(hw3d, q, ILO_3D_PIPELINE_WRITE_DEPTH_COUNT);
238 q->data.u64 = 0;
239
240 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
241 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
242 q->bo, q->reg_read++);
243
244 list_add(&q->list, &hw3d->occlusion_queries);
245 }
246 break;
247 case PIPE_QUERY_TIMESTAMP:
248 /* nop */
249 break;
250 case PIPE_QUERY_TIME_ELAPSED:
251 ilo_3d_reserve_for_query(hw3d, q, ILO_3D_PIPELINE_WRITE_TIMESTAMP);
252 q->data.u64 = 0;
253
254 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
255 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
256 q->bo, q->reg_read++);
257
258 list_add(&q->list, &hw3d->time_elapsed_queries);
259 }
260 break;
261 case PIPE_QUERY_PRIMITIVES_GENERATED:
262 q->data.u64 = 0;
263 list_add(&q->list, &hw3d->prim_generated_queries);
264 break;
265 case PIPE_QUERY_PRIMITIVES_EMITTED:
266 q->data.u64 = 0;
267 list_add(&q->list, &hw3d->prim_emitted_queries);
268 break;
269 case PIPE_QUERY_PIPELINE_STATISTICS:
270 ilo_3d_reserve_for_query(hw3d, q, ILO_3D_PIPELINE_WRITE_STATISTICS);
271 memset(&q->data.pipeline_statistics, 0,
272 sizeof(q->data.pipeline_statistics));
273
274 if (ilo_query_alloc_bo(q, 11 * 2, -1, hw3d->cp->winsys)) {
275 ilo_3d_pipeline_emit_write_statistics(hw3d->pipeline,
276 q->bo, q->reg_read);
277 q->reg_read += 11;
278
279 list_add(&q->list, &hw3d->pipeline_statistics_queries);
280 }
281 break;
282 default:
283 assert(!"unknown query type");
284 break;
285 }
286 }
287
288 /**
289 * End a query.
290 */
291 void
292 ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
293 {
294 struct ilo_3d *hw3d = ilo->hw3d;
295
296 ilo_3d_own_render_ring(hw3d);
297
298 switch (q->type) {
299 case PIPE_QUERY_OCCLUSION_COUNTER:
300 list_del(&q->list);
301
302 assert(q->reg_read < q->reg_total);
303 assert(hw3d->owner.reserve >= q->reg_cmd_size);
304 hw3d->owner.reserve -= q->reg_cmd_size;
305
306 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
307 q->bo, q->reg_read++);
308 break;
309 case PIPE_QUERY_TIMESTAMP:
310 q->data.u64 = 0;
311
312 if (ilo_query_alloc_bo(q, 1, 1, hw3d->cp->winsys)) {
313 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
314 q->bo, q->reg_read++);
315 }
316 break;
317 case PIPE_QUERY_TIME_ELAPSED:
318 list_del(&q->list);
319
320 assert(q->reg_read < q->reg_total);
321 assert(hw3d->owner.reserve >= q->reg_cmd_size);
322 hw3d->owner.reserve -= q->reg_cmd_size;
323
324 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
325 q->bo, q->reg_read++);
326 break;
327 case PIPE_QUERY_PRIMITIVES_GENERATED:
328 case PIPE_QUERY_PRIMITIVES_EMITTED:
329 list_del(&q->list);
330 break;
331 case PIPE_QUERY_PIPELINE_STATISTICS:
332 list_del(&q->list);
333
334 assert(q->reg_read + 11 <= q->reg_total);
335 assert(hw3d->owner.reserve >= q->reg_cmd_size);
336 hw3d->owner.reserve -= q->reg_cmd_size;
337
338 ilo_3d_pipeline_emit_write_statistics(hw3d->pipeline,
339 q->bo, q->reg_read);
340 q->reg_read += 11;
341 break;
342 default:
343 assert(!"unknown query type");
344 break;
345 }
346 }
347
348 /**
349 * Process the raw query data.
350 */
351 void
352 ilo_3d_process_query(struct ilo_context *ilo, struct ilo_query *q)
353 {
354 struct ilo_3d *hw3d = ilo->hw3d;
355
356 switch (q->type) {
357 case PIPE_QUERY_OCCLUSION_COUNTER:
358 if (q->bo)
359 process_query_for_occlusion_counter(hw3d, q);
360 break;
361 case PIPE_QUERY_TIMESTAMP:
362 if (q->bo)
363 process_query_for_timestamp(hw3d, q);
364 break;
365 case PIPE_QUERY_TIME_ELAPSED:
366 if (q->bo)
367 process_query_for_time_elapsed(hw3d, q);
368 break;
369 case PIPE_QUERY_PRIMITIVES_GENERATED:
370 case PIPE_QUERY_PRIMITIVES_EMITTED:
371 break;
372 case PIPE_QUERY_PIPELINE_STATISTICS:
373 if (q->bo)
374 process_query_for_pipeline_statistics(hw3d, q);
375 break;
376 default:
377 assert(!"unknown query type");
378 break;
379 }
380 }
381
382 /**
383 * Hook for CP new-batch.
384 */
385 void
386 ilo_3d_cp_submitted(struct ilo_3d *hw3d)
387 {
388 if (ilo_debug & ILO_DEBUG_3D)
389 ilo_builder_decode(&hw3d->cp->builder);
390
391 /* invalidate the pipeline */
392 ilo_3d_pipeline_invalidate(hw3d->pipeline,
393 ILO_3D_PIPELINE_INVALIDATE_BATCH_BO |
394 ILO_3D_PIPELINE_INVALIDATE_STATE_BO |
395 ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
396
397 hw3d->new_batch = true;
398 }
399
400 static void
401 ilo_3d_own_cp(struct ilo_cp *cp, void *data)
402 {
403 struct ilo_3d *hw3d = data;
404
405 ilo_3d_resume_queries(hw3d);
406 }
407
408 static void
409 ilo_3d_release_cp(struct ilo_cp *cp, void *data)
410 {
411 struct ilo_3d *hw3d = data;
412
413 ilo_3d_pause_queries(hw3d);
414 }
415
416 /**
417 * Create a 3D context.
418 */
419 struct ilo_3d *
420 ilo_3d_create(struct ilo_cp *cp, const struct ilo_dev_info *dev)
421 {
422 struct ilo_3d *hw3d;
423
424 hw3d = CALLOC_STRUCT(ilo_3d);
425 if (!hw3d)
426 return NULL;
427
428 hw3d->cp = cp;
429 hw3d->owner.own = ilo_3d_own_cp;
430 hw3d->owner.release = ilo_3d_release_cp;
431 hw3d->owner.data = hw3d;
432 hw3d->owner.reserve = 0;
433
434 hw3d->new_batch = true;
435
436 list_inithead(&hw3d->occlusion_queries);
437 list_inithead(&hw3d->time_elapsed_queries);
438 list_inithead(&hw3d->prim_generated_queries);
439 list_inithead(&hw3d->prim_emitted_queries);
440 list_inithead(&hw3d->pipeline_statistics_queries);
441
442 hw3d->pipeline = ilo_3d_pipeline_create(cp, dev);
443 if (!hw3d->pipeline) {
444 FREE(hw3d);
445 return NULL;
446 }
447
448 return hw3d;
449 }
450
451 /**
452 * Destroy a 3D context.
453 */
454 void
455 ilo_3d_destroy(struct ilo_3d *hw3d)
456 {
457 ilo_3d_pipeline_destroy(hw3d->pipeline);
458 FREE(hw3d);
459 }
460
461 static bool
462 draw_vbo(struct ilo_3d *hw3d, const struct ilo_context *ilo,
463 int *prim_generated, int *prim_emitted)
464 {
465 bool need_flush = false;
466 int max_len;
467
468 ilo_3d_own_render_ring(hw3d);
469
470 if (!hw3d->new_batch) {
471 /*
472 * Without a better tracking mechanism, when the framebuffer changes, we
473 * have to assume that the old framebuffer may be sampled from. If that
474 * happens in the middle of a batch buffer, we need to insert manual
475 * flushes.
476 */
477 need_flush = (ilo->dirty & ILO_DIRTY_FB);
478
479 /* same to SO target changes */
480 need_flush |= (ilo->dirty & ILO_DIRTY_SO);
481 }
482
483 /* make sure there is enough room first */
484 max_len = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
485 ILO_3D_PIPELINE_DRAW, ilo);
486 if (need_flush) {
487 max_len += ilo_3d_pipeline_estimate_size(hw3d->pipeline,
488 ILO_3D_PIPELINE_FLUSH, NULL);
489 }
490
491 if (max_len > ilo_cp_space(hw3d->cp)) {
492 ilo_cp_submit(hw3d->cp, "out of space");
493 need_flush = false;
494 assert(max_len <= ilo_cp_space(hw3d->cp));
495 }
496
497 if (need_flush)
498 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
499
500 return ilo_3d_pipeline_emit_draw(hw3d->pipeline, ilo,
501 prim_generated, prim_emitted);
502 }
503
504 static void
505 update_prim_count(struct ilo_3d *hw3d, int generated, int emitted)
506 {
507 struct ilo_query *q;
508
509 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_generated_queries, list)
510 q->data.u64 += generated;
511
512 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_emitted_queries, list)
513 q->data.u64 += emitted;
514 }
515
516 bool
517 ilo_3d_pass_render_condition(struct ilo_context *ilo)
518 {
519 struct ilo_3d *hw3d = ilo->hw3d;
520 uint64_t result;
521 bool wait;
522
523 if (!hw3d->render_condition.query)
524 return true;
525
526 switch (hw3d->render_condition.mode) {
527 case PIPE_RENDER_COND_WAIT:
528 case PIPE_RENDER_COND_BY_REGION_WAIT:
529 wait = true;
530 break;
531 case PIPE_RENDER_COND_NO_WAIT:
532 case PIPE_RENDER_COND_BY_REGION_NO_WAIT:
533 default:
534 wait = false;
535 break;
536 }
537
538 if (ilo->base.get_query_result(&ilo->base, hw3d->render_condition.query,
539 wait, (union pipe_query_result *) &result))
540 return (!result == hw3d->render_condition.cond);
541 else
542 return true;
543 }
544
545 #define UPDATE_MIN2(a, b) (a) = MIN2((a), (b))
546 #define UPDATE_MAX2(a, b) (a) = MAX2((a), (b))
547
548 /**
549 * \see find_sub_primitives() from core mesa
550 */
551 static int
552 ilo_find_sub_primitives(const void *elements, unsigned element_size,
553 const struct pipe_draw_info *orig_info,
554 struct pipe_draw_info *info)
555 {
556 const unsigned max_prims = orig_info->count - orig_info->start;
557 unsigned i, cur_start, cur_count;
558 int scan_index;
559 unsigned scan_num;
560
561 cur_start = orig_info->start;
562 cur_count = 0;
563 scan_num = 0;
564
565 #define IB_INDEX_READ(TYPE, INDEX) (((const TYPE *) elements)[INDEX])
566
567 #define SCAN_ELEMENTS(TYPE) \
568 info[scan_num] = *orig_info; \
569 info[scan_num].primitive_restart = false; \
570 for (i = orig_info->start; i < orig_info->count; i++) { \
571 scan_index = IB_INDEX_READ(TYPE, i); \
572 if (scan_index == orig_info->restart_index) { \
573 if (cur_count > 0) { \
574 assert(scan_num < max_prims); \
575 info[scan_num].start = cur_start; \
576 info[scan_num].count = cur_count; \
577 scan_num++; \
578 info[scan_num] = *orig_info; \
579 info[scan_num].primitive_restart = false; \
580 } \
581 cur_start = i + 1; \
582 cur_count = 0; \
583 } \
584 else { \
585 UPDATE_MIN2(info[scan_num].min_index, scan_index); \
586 UPDATE_MAX2(info[scan_num].max_index, scan_index); \
587 cur_count++; \
588 } \
589 } \
590 if (cur_count > 0) { \
591 assert(scan_num < max_prims); \
592 info[scan_num].start = cur_start; \
593 info[scan_num].count = cur_count; \
594 scan_num++; \
595 }
596
597 switch (element_size) {
598 case 1:
599 SCAN_ELEMENTS(uint8_t);
600 break;
601 case 2:
602 SCAN_ELEMENTS(uint16_t);
603 break;
604 case 4:
605 SCAN_ELEMENTS(uint32_t);
606 break;
607 default:
608 assert(0 && "bad index_size in find_sub_primitives()");
609 }
610
611 #undef SCAN_ELEMENTS
612
613 return scan_num;
614 }
615
616 static inline bool
617 ilo_check_restart_index(const struct ilo_context *ilo, unsigned restart_index)
618 {
619 /*
620 * Haswell (GEN(7.5)) supports an arbitrary cut index, check everything
621 * older.
622 */
623 if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7.5))
624 return true;
625
626 /* Note: indices must be unsigned byte, unsigned short or unsigned int */
627 switch (ilo->ib.index_size) {
628 case 1:
629 return ((restart_index & 0xff) == 0xff);
630 break;
631 case 2:
632 return ((restart_index & 0xffff) == 0xffff);
633 break;
634 case 4:
635 return (restart_index == 0xffffffff);
636 break;
637 }
638 return false;
639 }
640
641 static inline bool
642 ilo_check_restart_prim_type(const struct ilo_context *ilo, unsigned prim)
643 {
644 switch (prim) {
645 case PIPE_PRIM_POINTS:
646 case PIPE_PRIM_LINES:
647 case PIPE_PRIM_LINE_STRIP:
648 case PIPE_PRIM_TRIANGLES:
649 case PIPE_PRIM_TRIANGLE_STRIP:
650 /* All 965 GEN graphics support a cut index for these primitive types */
651 return true;
652 break;
653
654 case PIPE_PRIM_LINE_LOOP:
655 case PIPE_PRIM_POLYGON:
656 case PIPE_PRIM_QUAD_STRIP:
657 case PIPE_PRIM_QUADS:
658 case PIPE_PRIM_TRIANGLE_FAN:
659 if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7.5)) {
660 /* Haswell and newer parts can handle these prim types. */
661 return true;
662 }
663 break;
664 }
665
666 return false;
667 }
668
669 /*
670 * Handle VBOs using primitive restart.
671 * Verify that restart index and primitive type can be handled by the HW.
672 * Return true if this routine did the rendering
673 * Return false if this routine did NOT render because restart can be handled
674 * in HW.
675 */
676 static void
677 ilo_draw_vbo_with_sw_restart(struct pipe_context *pipe,
678 const struct pipe_draw_info *info)
679 {
680 struct ilo_context *ilo = ilo_context(pipe);
681 struct pipe_draw_info *restart_info = NULL;
682 int sub_prim_count = 1;
683
684 /*
685 * We have to break up the primitive into chunks manually
686 * Worst case, every other index could be a restart index so
687 * need to have space for that many primitives
688 */
689 restart_info = MALLOC(((info->count + 1) / 2) * sizeof(*info));
690 if (NULL == restart_info) {
691 /* If we can't get memory for this, bail out */
692 ilo_err("%s:%d - Out of memory", __FILE__, __LINE__);
693 return;
694 }
695
696 if (ilo->ib.buffer) {
697 struct pipe_transfer *transfer;
698 const void *map;
699
700 map = pipe_buffer_map(pipe, ilo->ib.buffer,
701 PIPE_TRANSFER_READ, &transfer);
702
703 sub_prim_count = ilo_find_sub_primitives(map + ilo->ib.offset,
704 ilo->ib.index_size, info, restart_info);
705
706 pipe_buffer_unmap(pipe, transfer);
707 }
708 else {
709 sub_prim_count = ilo_find_sub_primitives(ilo->ib.user_buffer,
710 ilo->ib.index_size, info, restart_info);
711 }
712
713 info = restart_info;
714
715 while (sub_prim_count > 0) {
716 pipe->draw_vbo(pipe, info);
717
718 sub_prim_count--;
719 info++;
720 }
721
722 FREE(restart_info);
723 }
724
725 static void
726 ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
727 {
728 struct ilo_context *ilo = ilo_context(pipe);
729 struct ilo_3d *hw3d = ilo->hw3d;
730 int prim_generated, prim_emitted;
731
732 if (ilo_debug & ILO_DEBUG_DRAW) {
733 if (info->indexed) {
734 ilo_printf("indexed draw %s: "
735 "index start %d, count %d, vertex range [%d, %d]\n",
736 u_prim_name(info->mode), info->start, info->count,
737 info->min_index, info->max_index);
738 }
739 else {
740 ilo_printf("draw %s: vertex start %d, count %d\n",
741 u_prim_name(info->mode), info->start, info->count);
742 }
743
744 ilo_dump_dirty_flags(ilo->dirty);
745 }
746
747 if (!ilo_3d_pass_render_condition(ilo))
748 return;
749
750 if (info->primitive_restart && info->indexed) {
751 /*
752 * Want to draw an indexed primitive using primitive restart
753 * Check that HW can handle the request and fall to SW if not.
754 */
755 if (!ilo_check_restart_index(ilo, info->restart_index) ||
756 !ilo_check_restart_prim_type(ilo, info->mode)) {
757 ilo_draw_vbo_with_sw_restart(pipe, info);
758 return;
759 }
760 }
761
762 ilo_finalize_3d_states(ilo, info);
763
764 ilo_shader_cache_upload(ilo->shader_cache, &hw3d->cp->builder);
765
766 ilo_blit_resolve_framebuffer(ilo);
767
768 /* If draw_vbo ever fails, return immediately. */
769 if (!draw_vbo(hw3d, ilo, &prim_generated, &prim_emitted))
770 return;
771
772 /* clear dirty status */
773 ilo->dirty = 0x0;
774 hw3d->new_batch = false;
775
776 /* avoid dangling pointer reference */
777 ilo->draw = NULL;
778
779 update_prim_count(hw3d, prim_generated, prim_emitted);
780
781 if (ilo_debug & ILO_DEBUG_NOCACHE)
782 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
783 }
784
785 static void
786 ilo_render_condition(struct pipe_context *pipe,
787 struct pipe_query *query,
788 boolean condition,
789 uint mode)
790 {
791 struct ilo_context *ilo = ilo_context(pipe);
792 struct ilo_3d *hw3d = ilo->hw3d;
793
794 /* reference count? */
795 hw3d->render_condition.query = query;
796 hw3d->render_condition.mode = mode;
797 hw3d->render_condition.cond = condition;
798 }
799
800 static void
801 ilo_texture_barrier(struct pipe_context *pipe)
802 {
803 struct ilo_context *ilo = ilo_context(pipe);
804 struct ilo_3d *hw3d = ilo->hw3d;
805
806 if (ilo->cp->ring != INTEL_RING_RENDER)
807 return;
808
809 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
810
811 /* don't know why */
812 if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7))
813 ilo_cp_submit(hw3d->cp, "texture barrier");
814 }
815
816 static void
817 ilo_get_sample_position(struct pipe_context *pipe,
818 unsigned sample_count,
819 unsigned sample_index,
820 float *out_value)
821 {
822 struct ilo_context *ilo = ilo_context(pipe);
823 struct ilo_3d *hw3d = ilo->hw3d;
824
825 ilo_3d_pipeline_get_sample_position(hw3d->pipeline,
826 sample_count, sample_index,
827 &out_value[0], &out_value[1]);
828 }
829
830 /**
831 * Initialize 3D-related functions.
832 */
833 void
834 ilo_init_3d_functions(struct ilo_context *ilo)
835 {
836 ilo->base.draw_vbo = ilo_draw_vbo;
837 ilo->base.render_condition = ilo_render_condition;
838 ilo->base.texture_barrier = ilo_texture_barrier;
839 ilo->base.get_sample_position = ilo_get_sample_position;
840 }