1e5ebbcabd8feebfc97e587cc5b0ca7ab608d18b
[mesa.git] / src / gallium / drivers / ilo / ilo_3d.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2012-2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "util/u_prim.h"
29 #include "intel_winsys.h"
30
31 #include "ilo_3d_pipeline.h"
32 #include "ilo_blit.h"
33 #include "ilo_context.h"
34 #include "ilo_cp.h"
35 #include "ilo_query.h"
36 #include "ilo_shader.h"
37 #include "ilo_state.h"
38 #include "ilo_3d.h"
39
40 static void
41 process_query_for_occlusion_counter(struct ilo_3d *hw3d,
42 struct ilo_query *q)
43 {
44 uint64_t *vals, depth_count = 0;
45 int i;
46
47 /* in pairs */
48 assert(q->reg_read % 2 == 0);
49
50 vals = intel_bo_map(q->bo, false);
51 for (i = 1; i < q->reg_read; i += 2)
52 depth_count += vals[i] - vals[i - 1];
53 intel_bo_unmap(q->bo);
54
55 /* accumulate so that the query can be resumed if wanted */
56 q->data.u64 += depth_count;
57 q->reg_read = 0;
58 }
59
60 static uint64_t
61 timestamp_to_ns(uint64_t timestamp)
62 {
63 /* see ilo_get_timestamp() */
64 return (timestamp & 0xffffffff) * 80;
65 }
66
67 static void
68 process_query_for_timestamp(struct ilo_3d *hw3d, struct ilo_query *q)
69 {
70 uint64_t *vals, timestamp;
71
72 assert(q->reg_read == 1);
73
74 vals = intel_bo_map(q->bo, false);
75 timestamp = vals[0];
76 intel_bo_unmap(q->bo);
77
78 q->data.u64 = timestamp_to_ns(timestamp);
79 q->reg_read = 0;
80 }
81
82 static void
83 process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
84 {
85 uint64_t *vals, elapsed = 0;
86 int i;
87
88 /* in pairs */
89 assert(q->reg_read % 2 == 0);
90
91 vals = intel_bo_map(q->bo, false);
92
93 for (i = 1; i < q->reg_read; i += 2)
94 elapsed += vals[i] - vals[i - 1];
95
96 intel_bo_unmap(q->bo);
97
98 /* accumulate so that the query can be resumed if wanted */
99 q->data.u64 += timestamp_to_ns(elapsed);
100 q->reg_read = 0;
101 }
102
103 static void
104 ilo_3d_resume_queries(struct ilo_3d *hw3d)
105 {
106 struct ilo_query *q;
107
108 /* resume occlusion queries */
109 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
110 /* accumulate the result if the bo is alreay full */
111 if (q->reg_read >= q->reg_total)
112 process_query_for_occlusion_counter(hw3d, q);
113
114 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
115 q->bo, q->reg_read++);
116 }
117
118 /* resume timer queries */
119 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
120 /* accumulate the result if the bo is alreay full */
121 if (q->reg_read >= q->reg_total)
122 process_query_for_time_elapsed(hw3d, q);
123
124 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
125 q->bo, q->reg_read++);
126 }
127 }
128
129 static void
130 ilo_3d_pause_queries(struct ilo_3d *hw3d)
131 {
132 struct ilo_query *q;
133
134 /* pause occlusion queries */
135 LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
136 assert(q->reg_read < q->reg_total);
137 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
138 q->bo, q->reg_read++);
139 }
140
141 /* pause timer queries */
142 LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
143 assert(q->reg_read < q->reg_total);
144 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
145 q->bo, q->reg_read++);
146 }
147 }
148
149 static void
150 ilo_3d_release_render_ring(struct ilo_cp *cp, void *data)
151 {
152 struct ilo_3d *hw3d = data;
153
154 ilo_3d_pause_queries(hw3d);
155 }
156
157 void
158 ilo_3d_own_render_ring(struct ilo_3d *hw3d)
159 {
160 ilo_cp_set_ring(hw3d->cp, INTEL_RING_RENDER);
161
162 if (ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve))
163 ilo_3d_resume_queries(hw3d);
164 }
165
166 /**
167 * Begin a query.
168 */
169 void
170 ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
171 {
172 struct ilo_3d *hw3d = ilo->hw3d;
173
174 ilo_3d_own_render_ring(hw3d);
175
176 switch (q->type) {
177 case PIPE_QUERY_OCCLUSION_COUNTER:
178 /* reserve some space for pausing the query */
179 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
180 ILO_3D_PIPELINE_WRITE_DEPTH_COUNT, NULL);
181 hw3d->owner_reserve += q->reg_cmd_size;
182 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
183
184 q->data.u64 = 0;
185
186 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
187 /* XXX we should check the aperture size */
188 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
189 q->bo, q->reg_read++);
190
191 list_add(&q->list, &hw3d->occlusion_queries);
192 }
193 break;
194 case PIPE_QUERY_TIMESTAMP:
195 /* nop */
196 break;
197 case PIPE_QUERY_TIME_ELAPSED:
198 /* reserve some space for pausing the query */
199 q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
200 ILO_3D_PIPELINE_WRITE_TIMESTAMP, NULL);
201 hw3d->owner_reserve += q->reg_cmd_size;
202 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
203
204 q->data.u64 = 0;
205
206 if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
207 /* XXX we should check the aperture size */
208 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
209 q->bo, q->reg_read++);
210
211 list_add(&q->list, &hw3d->time_elapsed_queries);
212 }
213 break;
214 case PIPE_QUERY_PRIMITIVES_GENERATED:
215 q->data.u64 = 0;
216 list_add(&q->list, &hw3d->prim_generated_queries);
217 break;
218 case PIPE_QUERY_PRIMITIVES_EMITTED:
219 q->data.u64 = 0;
220 list_add(&q->list, &hw3d->prim_emitted_queries);
221 break;
222 default:
223 assert(!"unknown query type");
224 break;
225 }
226 }
227
228 /**
229 * End a query.
230 */
231 void
232 ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
233 {
234 struct ilo_3d *hw3d = ilo->hw3d;
235
236 ilo_3d_own_render_ring(hw3d);
237
238 switch (q->type) {
239 case PIPE_QUERY_OCCLUSION_COUNTER:
240 list_del(&q->list);
241
242 assert(q->reg_read < q->reg_total);
243 hw3d->owner_reserve -= q->reg_cmd_size;
244 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
245 ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
246 q->bo, q->reg_read++);
247 break;
248 case PIPE_QUERY_TIMESTAMP:
249 q->data.u64 = 0;
250
251 if (ilo_query_alloc_bo(q, 1, 1, hw3d->cp->winsys)) {
252 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
253 q->bo, q->reg_read++);
254 }
255 break;
256 case PIPE_QUERY_TIME_ELAPSED:
257 list_del(&q->list);
258
259 assert(q->reg_read < q->reg_total);
260 hw3d->owner_reserve -= q->reg_cmd_size;
261 ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
262 ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
263 q->bo, q->reg_read++);
264 break;
265 case PIPE_QUERY_PRIMITIVES_GENERATED:
266 case PIPE_QUERY_PRIMITIVES_EMITTED:
267 list_del(&q->list);
268 break;
269 default:
270 assert(!"unknown query type");
271 break;
272 }
273 }
274
275 /**
276 * Process the raw query data.
277 */
278 void
279 ilo_3d_process_query(struct ilo_context *ilo, struct ilo_query *q)
280 {
281 struct ilo_3d *hw3d = ilo->hw3d;
282
283 switch (q->type) {
284 case PIPE_QUERY_OCCLUSION_COUNTER:
285 if (q->bo)
286 process_query_for_occlusion_counter(hw3d, q);
287 break;
288 case PIPE_QUERY_TIMESTAMP:
289 if (q->bo)
290 process_query_for_timestamp(hw3d, q);
291 break;
292 case PIPE_QUERY_TIME_ELAPSED:
293 if (q->bo)
294 process_query_for_time_elapsed(hw3d, q);
295 break;
296 case PIPE_QUERY_PRIMITIVES_GENERATED:
297 case PIPE_QUERY_PRIMITIVES_EMITTED:
298 break;
299 default:
300 assert(!"unknown query type");
301 break;
302 }
303 }
304
305 /**
306 * Hook for CP new-batch.
307 */
308 void
309 ilo_3d_cp_flushed(struct ilo_3d *hw3d)
310 {
311 if (ilo_debug & ILO_DEBUG_3D)
312 ilo_3d_pipeline_dump(hw3d->pipeline);
313
314 /* invalidate the pipeline */
315 ilo_3d_pipeline_invalidate(hw3d->pipeline,
316 ILO_3D_PIPELINE_INVALIDATE_BATCH_BO |
317 ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
318 if (!hw3d->cp->render_ctx) {
319 ilo_3d_pipeline_invalidate(hw3d->pipeline,
320 ILO_3D_PIPELINE_INVALIDATE_HW);
321 }
322
323 hw3d->new_batch = true;
324 }
325
326 /**
327 * Create a 3D context.
328 */
329 struct ilo_3d *
330 ilo_3d_create(struct ilo_cp *cp, const struct ilo_dev_info *dev)
331 {
332 struct ilo_3d *hw3d;
333
334 hw3d = CALLOC_STRUCT(ilo_3d);
335 if (!hw3d)
336 return NULL;
337
338 hw3d->cp = cp;
339 hw3d->owner.release_callback = ilo_3d_release_render_ring;
340 hw3d->owner.release_data = hw3d;
341
342 hw3d->new_batch = true;
343
344 list_inithead(&hw3d->occlusion_queries);
345 list_inithead(&hw3d->time_elapsed_queries);
346 list_inithead(&hw3d->prim_generated_queries);
347 list_inithead(&hw3d->prim_emitted_queries);
348
349 hw3d->pipeline = ilo_3d_pipeline_create(cp, dev);
350 if (!hw3d->pipeline) {
351 FREE(hw3d);
352 return NULL;
353 }
354
355 return hw3d;
356 }
357
358 /**
359 * Destroy a 3D context.
360 */
361 void
362 ilo_3d_destroy(struct ilo_3d *hw3d)
363 {
364 ilo_3d_pipeline_destroy(hw3d->pipeline);
365
366 if (hw3d->kernel.bo)
367 intel_bo_unreference(hw3d->kernel.bo);
368
369 FREE(hw3d);
370 }
371
372 static bool
373 draw_vbo(struct ilo_3d *hw3d, const struct ilo_context *ilo,
374 int *prim_generated, int *prim_emitted)
375 {
376 bool need_flush = false;
377 int max_len;
378
379 ilo_3d_own_render_ring(hw3d);
380
381 if (!hw3d->new_batch) {
382 /*
383 * Without a better tracking mechanism, when the framebuffer changes, we
384 * have to assume that the old framebuffer may be sampled from. If that
385 * happens in the middle of a batch buffer, we need to insert manual
386 * flushes.
387 */
388 need_flush = (ilo->dirty & ILO_DIRTY_FB);
389
390 /* same to SO target changes */
391 need_flush |= (ilo->dirty & ILO_DIRTY_SO);
392 }
393
394 /* make sure there is enough room first */
395 max_len = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
396 ILO_3D_PIPELINE_DRAW, ilo);
397 if (need_flush) {
398 max_len += ilo_3d_pipeline_estimate_size(hw3d->pipeline,
399 ILO_3D_PIPELINE_FLUSH, NULL);
400 }
401
402 if (max_len > ilo_cp_space(hw3d->cp)) {
403 ilo_cp_flush(hw3d->cp, "out of space");
404 need_flush = false;
405 assert(max_len <= ilo_cp_space(hw3d->cp));
406 }
407
408 if (need_flush)
409 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
410
411 return ilo_3d_pipeline_emit_draw(hw3d->pipeline, ilo,
412 prim_generated, prim_emitted);
413 }
414
415 static void
416 update_prim_count(struct ilo_3d *hw3d, int generated, int emitted)
417 {
418 struct ilo_query *q;
419
420 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_generated_queries, list)
421 q->data.u64 += generated;
422
423 LIST_FOR_EACH_ENTRY(q, &hw3d->prim_emitted_queries, list)
424 q->data.u64 += emitted;
425 }
426
427 bool
428 ilo_3d_pass_render_condition(struct ilo_context *ilo)
429 {
430 struct ilo_3d *hw3d = ilo->hw3d;
431 uint64_t result;
432 bool wait;
433
434 if (!hw3d->render_condition.query)
435 return true;
436
437 switch (hw3d->render_condition.mode) {
438 case PIPE_RENDER_COND_WAIT:
439 case PIPE_RENDER_COND_BY_REGION_WAIT:
440 wait = true;
441 break;
442 case PIPE_RENDER_COND_NO_WAIT:
443 case PIPE_RENDER_COND_BY_REGION_NO_WAIT:
444 default:
445 wait = false;
446 break;
447 }
448
449 if (ilo->base.get_query_result(&ilo->base, hw3d->render_condition.query,
450 wait, (union pipe_query_result *) &result))
451 return (!result == hw3d->render_condition.cond);
452 else
453 return true;
454 }
455
456 #define UPDATE_MIN2(a, b) (a) = MIN2((a), (b))
457 #define UPDATE_MAX2(a, b) (a) = MAX2((a), (b))
458
459 /**
460 * \see find_sub_primitives() from core mesa
461 */
462 static int
463 ilo_find_sub_primitives(const void *elements, unsigned element_size,
464 const struct pipe_draw_info *orig_info,
465 struct pipe_draw_info *info)
466 {
467 const unsigned max_prims = orig_info->count - orig_info->start;
468 unsigned i, cur_start, cur_count;
469 int scan_index;
470 unsigned scan_num;
471
472 cur_start = orig_info->start;
473 cur_count = 0;
474 scan_num = 0;
475
476 #define IB_INDEX_READ(TYPE, INDEX) (((const TYPE *) elements)[INDEX])
477
478 #define SCAN_ELEMENTS(TYPE) \
479 info[scan_num] = *orig_info; \
480 info[scan_num].primitive_restart = false; \
481 for (i = orig_info->start; i < orig_info->count; i++) { \
482 scan_index = IB_INDEX_READ(TYPE, i); \
483 if (scan_index == orig_info->restart_index) { \
484 if (cur_count > 0) { \
485 assert(scan_num < max_prims); \
486 info[scan_num].start = cur_start; \
487 info[scan_num].count = cur_count; \
488 scan_num++; \
489 info[scan_num] = *orig_info; \
490 info[scan_num].primitive_restart = false; \
491 } \
492 cur_start = i + 1; \
493 cur_count = 0; \
494 } \
495 else { \
496 UPDATE_MIN2(info[scan_num].min_index, scan_index); \
497 UPDATE_MAX2(info[scan_num].max_index, scan_index); \
498 cur_count++; \
499 } \
500 } \
501 if (cur_count > 0) { \
502 assert(scan_num < max_prims); \
503 info[scan_num].start = cur_start; \
504 info[scan_num].count = cur_count; \
505 scan_num++; \
506 }
507
508 switch (element_size) {
509 case 1:
510 SCAN_ELEMENTS(uint8_t);
511 break;
512 case 2:
513 SCAN_ELEMENTS(uint16_t);
514 break;
515 case 4:
516 SCAN_ELEMENTS(uint32_t);
517 break;
518 default:
519 assert(0 && "bad index_size in find_sub_primitives()");
520 }
521
522 #undef SCAN_ELEMENTS
523
524 return scan_num;
525 }
526
527 static inline bool
528 ilo_check_restart_index(const struct ilo_context *ilo, unsigned restart_index)
529 {
530 /*
531 * Haswell (GEN(7.5)) supports an arbitrary cut index, check everything
532 * older.
533 */
534 if (ilo->dev->gen >= ILO_GEN(7.5))
535 return true;
536
537 /* Note: indices must be unsigned byte, unsigned short or unsigned int */
538 switch (ilo->ib.index_size) {
539 case 1:
540 return ((restart_index & 0xff) == 0xff);
541 break;
542 case 2:
543 return ((restart_index & 0xffff) == 0xffff);
544 break;
545 case 4:
546 return (restart_index == 0xffffffff);
547 break;
548 }
549 return false;
550 }
551
552 static inline bool
553 ilo_check_restart_prim_type(const struct ilo_context *ilo, unsigned prim)
554 {
555 switch (prim) {
556 case PIPE_PRIM_POINTS:
557 case PIPE_PRIM_LINES:
558 case PIPE_PRIM_LINE_STRIP:
559 case PIPE_PRIM_TRIANGLES:
560 case PIPE_PRIM_TRIANGLE_STRIP:
561 /* All 965 GEN graphics support a cut index for these primitive types */
562 return true;
563 break;
564
565 case PIPE_PRIM_LINE_LOOP:
566 case PIPE_PRIM_POLYGON:
567 case PIPE_PRIM_QUAD_STRIP:
568 case PIPE_PRIM_QUADS:
569 case PIPE_PRIM_TRIANGLE_FAN:
570 if (ilo->dev->gen >= ILO_GEN(7.5)) {
571 /* Haswell and newer parts can handle these prim types. */
572 return true;
573 }
574 break;
575 }
576
577 return false;
578 }
579
580 /*
581 * Handle VBOs using primitive restart.
582 * Verify that restart index and primitive type can be handled by the HW.
583 * Return true if this routine did the rendering
584 * Return false if this routine did NOT render because restart can be handled
585 * in HW.
586 */
587 static void
588 ilo_draw_vbo_with_sw_restart(struct pipe_context *pipe,
589 const struct pipe_draw_info *info)
590 {
591 struct ilo_context *ilo = ilo_context(pipe);
592 struct pipe_draw_info *restart_info = NULL;
593 int sub_prim_count = 1;
594
595 /*
596 * We have to break up the primitive into chunks manually
597 * Worst case, every other index could be a restart index so
598 * need to have space for that many primitives
599 */
600 restart_info = MALLOC(((info->count + 1) / 2) * sizeof(*info));
601 if (NULL == restart_info) {
602 /* If we can't get memory for this, bail out */
603 ilo_err("%s:%d - Out of memory", __FILE__, __LINE__);
604 return;
605 }
606
607 if (ilo->ib.buffer) {
608 struct pipe_transfer *transfer;
609 const void *map;
610
611 map = pipe_buffer_map(pipe, ilo->ib.buffer,
612 PIPE_TRANSFER_READ, &transfer);
613
614 sub_prim_count = ilo_find_sub_primitives(map + ilo->ib.offset,
615 ilo->ib.index_size, info, restart_info);
616
617 pipe_buffer_unmap(pipe, transfer);
618 }
619 else {
620 sub_prim_count = ilo_find_sub_primitives(ilo->ib.user_buffer,
621 ilo->ib.index_size, info, restart_info);
622 }
623
624 info = restart_info;
625
626 while (sub_prim_count > 0) {
627 pipe->draw_vbo(pipe, info);
628
629 sub_prim_count--;
630 info++;
631 }
632
633 FREE(restart_info);
634 }
635
636 static bool
637 upload_shaders(struct ilo_3d *hw3d, struct ilo_shader_cache *shc)
638 {
639 bool incremental = true;
640 int upload;
641
642 upload = ilo_shader_cache_upload(shc,
643 NULL, hw3d->kernel.used, incremental);
644 if (!upload)
645 return true;
646
647 /*
648 * Allocate a new bo. When this is a new batch, assume the bo is still in
649 * use by the previous batch and force allocation.
650 *
651 * Does it help to make shader cache upload with unsynchronized mapping,
652 * and remove the check for new batch here?
653 */
654 if (hw3d->kernel.used + upload > hw3d->kernel.size || hw3d->new_batch) {
655 unsigned new_size = (hw3d->kernel.size) ?
656 hw3d->kernel.size : (8 * 1024);
657
658 while (hw3d->kernel.used + upload > new_size)
659 new_size *= 2;
660
661 if (hw3d->kernel.bo)
662 intel_bo_unreference(hw3d->kernel.bo);
663
664 hw3d->kernel.bo = intel_winsys_alloc_buffer(hw3d->cp->winsys,
665 "kernel bo", new_size, INTEL_DOMAIN_CPU);
666 if (!hw3d->kernel.bo) {
667 ilo_err("failed to allocate kernel bo\n");
668 return false;
669 }
670
671 hw3d->kernel.used = 0;
672 hw3d->kernel.size = new_size;
673 incremental = false;
674
675 assert(new_size >= ilo_shader_cache_upload(shc,
676 NULL, hw3d->kernel.used, incremental));
677
678 ilo_3d_pipeline_invalidate(hw3d->pipeline,
679 ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
680 }
681
682 upload = ilo_shader_cache_upload(shc,
683 hw3d->kernel.bo, hw3d->kernel.used, incremental);
684 if (upload < 0) {
685 ilo_err("failed to upload shaders\n");
686 return false;
687 }
688
689 hw3d->kernel.used += upload;
690
691 assert(hw3d->kernel.used <= hw3d->kernel.size);
692
693 return true;
694 }
695
696 static void
697 ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
698 {
699 struct ilo_context *ilo = ilo_context(pipe);
700 struct ilo_3d *hw3d = ilo->hw3d;
701 int prim_generated, prim_emitted;
702
703 if (ilo_debug & ILO_DEBUG_DRAW) {
704 if (info->indexed) {
705 ilo_printf("indexed draw %s: "
706 "index start %d, count %d, vertex range [%d, %d]\n",
707 u_prim_name(info->mode), info->start, info->count,
708 info->min_index, info->max_index);
709 }
710 else {
711 ilo_printf("draw %s: vertex start %d, count %d\n",
712 u_prim_name(info->mode), info->start, info->count);
713 }
714
715 ilo_dump_dirty_flags(ilo->dirty);
716 }
717
718 if (!ilo_3d_pass_render_condition(ilo))
719 return;
720
721 if (info->primitive_restart && info->indexed) {
722 /*
723 * Want to draw an indexed primitive using primitive restart
724 * Check that HW can handle the request and fall to SW if not.
725 */
726 if (!ilo_check_restart_index(ilo, info->restart_index) ||
727 !ilo_check_restart_prim_type(ilo, info->mode)) {
728 ilo_draw_vbo_with_sw_restart(pipe, info);
729 return;
730 }
731 }
732
733 ilo_finalize_3d_states(ilo, info);
734
735 if (!upload_shaders(hw3d, ilo->shader_cache))
736 return;
737
738 ilo_blit_resolve_framebuffer(ilo);
739
740 /* If draw_vbo ever fails, return immediately. */
741 if (!draw_vbo(hw3d, ilo, &prim_generated, &prim_emitted))
742 return;
743
744 /* clear dirty status */
745 ilo->dirty = 0x0;
746 hw3d->new_batch = false;
747
748 /* avoid dangling pointer reference */
749 ilo->draw = NULL;
750
751 update_prim_count(hw3d, prim_generated, prim_emitted);
752
753 if (ilo_debug & ILO_DEBUG_NOCACHE)
754 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
755 }
756
757 static void
758 ilo_render_condition(struct pipe_context *pipe,
759 struct pipe_query *query,
760 boolean condition,
761 uint mode)
762 {
763 struct ilo_context *ilo = ilo_context(pipe);
764 struct ilo_3d *hw3d = ilo->hw3d;
765
766 /* reference count? */
767 hw3d->render_condition.query = query;
768 hw3d->render_condition.mode = mode;
769 hw3d->render_condition.cond = condition;
770 }
771
772 static void
773 ilo_texture_barrier(struct pipe_context *pipe)
774 {
775 struct ilo_context *ilo = ilo_context(pipe);
776 struct ilo_3d *hw3d = ilo->hw3d;
777
778 if (ilo->cp->ring != INTEL_RING_RENDER)
779 return;
780
781 ilo_3d_pipeline_emit_flush(hw3d->pipeline);
782
783 /* don't know why */
784 if (ilo->dev->gen >= ILO_GEN(7))
785 ilo_cp_flush(hw3d->cp, "texture barrier");
786 }
787
788 static void
789 ilo_get_sample_position(struct pipe_context *pipe,
790 unsigned sample_count,
791 unsigned sample_index,
792 float *out_value)
793 {
794 struct ilo_context *ilo = ilo_context(pipe);
795 struct ilo_3d *hw3d = ilo->hw3d;
796
797 ilo_3d_pipeline_get_sample_position(hw3d->pipeline,
798 sample_count, sample_index,
799 &out_value[0], &out_value[1]);
800 }
801
802 /**
803 * Initialize 3D-related functions.
804 */
805 void
806 ilo_init_3d_functions(struct ilo_context *ilo)
807 {
808 ilo->base.draw_vbo = ilo_draw_vbo;
809 ilo->base.render_condition = ilo_render_condition;
810 ilo->base.texture_barrier = ilo_texture_barrier;
811 ilo->base.get_sample_position = ilo_get_sample_position;
812 }