2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
25 #include "pipe/p_defines.h"
26 #include "pipe/p_state.h"
27 #include "util/ralloc.h"
28 #include "util/u_inlines.h"
29 #include "util/u_format.h"
30 #include "util/u_upload_mgr.h"
31 #include "drm-uapi/i915_drm.h"
32 #include "iris_context.h"
33 #include "iris_resource.h"
34 #include "iris_screen.h"
35 #include "common/gen_defines.h"
36 #include "common/gen_sample_positions.h"
39 * For debugging purposes, this returns a time in seconds.
46 clock_gettime(CLOCK_MONOTONIC
, &tp
);
48 return tp
.tv_sec
+ tp
.tv_nsec
/ 1000000000.0;
52 * The pipe->set_debug_callback() driver hook.
55 iris_set_debug_callback(struct pipe_context
*ctx
,
56 const struct pipe_debug_callback
*cb
)
58 struct iris_context
*ice
= (struct iris_context
*)ctx
;
63 memset(&ice
->dbg
, 0, sizeof(ice
->dbg
));
67 * Called from the batch module when it detects a GPU hang.
69 * In this case, we've lost our GEM context, and can't rely on any existing
70 * state on the GPU. We must mark everything dirty and wipe away any saved
71 * assumptions about the last known state of the GPU.
74 iris_lost_context_state(struct iris_batch
*batch
)
76 /* The batch module doesn't have an iris_context, because we want to
77 * avoid introducing lots of layering violations. Unfortunately, here
78 * we do need to inform the context of batch catastrophe. We know the
79 * batch is one of our context's, so hackily claw our way back.
81 struct iris_context
*ice
= NULL
;
82 struct iris_screen
*screen
;
84 if (batch
->name
== IRIS_BATCH_RENDER
) {
85 ice
= container_of(batch
, ice
, batches
[IRIS_BATCH_RENDER
]);
86 assert(&ice
->batches
[IRIS_BATCH_RENDER
] == batch
);
87 screen
= (void *) ice
->ctx
.screen
;
89 ice
->vtbl
.init_render_context(screen
, batch
, &ice
->vtbl
, &ice
->dbg
);
90 } else if (batch
->name
== IRIS_BATCH_COMPUTE
) {
91 ice
= container_of(batch
, ice
, batches
[IRIS_BATCH_COMPUTE
]);
92 assert(&ice
->batches
[IRIS_BATCH_COMPUTE
] == batch
);
93 screen
= (void *) ice
->ctx
.screen
;
95 ice
->vtbl
.init_compute_context(screen
, batch
, &ice
->vtbl
, &ice
->dbg
);
97 unreachable("unhandled batch reset");
100 ice
->state
.dirty
= ~0ull;
101 ice
->state
.current_hash_scale
= 0;
102 memset(ice
->state
.last_grid
, 0, sizeof(ice
->state
.last_grid
));
103 batch
->last_surface_base_address
= ~0ull;
104 ice
->vtbl
.lost_genx_state(ice
, batch
);
107 static enum pipe_reset_status
108 iris_get_device_reset_status(struct pipe_context
*ctx
)
110 struct iris_context
*ice
= (struct iris_context
*)ctx
;
112 enum pipe_reset_status worst_reset
= PIPE_NO_RESET
;
114 /* Check the reset status of each batch's hardware context, and take the
115 * worst status (if one was guilty, proclaim guilt).
117 for (int i
= 0; i
< IRIS_BATCH_COUNT
; i
++) {
118 /* This will also recreate the hardware contexts as necessary, so any
119 * future queries will show no resets. We only want to report once.
121 enum pipe_reset_status batch_reset
=
122 iris_batch_check_for_reset(&ice
->batches
[i
]);
124 if (batch_reset
== PIPE_NO_RESET
)
127 if (worst_reset
== PIPE_NO_RESET
) {
128 worst_reset
= batch_reset
;
130 /* GUILTY < INNOCENT < UNKNOWN */
131 worst_reset
= MIN2(worst_reset
, batch_reset
);
135 if (worst_reset
!= PIPE_NO_RESET
&& ice
->reset
.reset
)
136 ice
->reset
.reset(ice
->reset
.data
, worst_reset
);
142 iris_set_device_reset_callback(struct pipe_context
*ctx
,
143 const struct pipe_device_reset_callback
*cb
)
145 struct iris_context
*ice
= (struct iris_context
*)ctx
;
150 memset(&ice
->reset
, 0, sizeof(ice
->reset
));
154 iris_get_sample_position(struct pipe_context
*ctx
,
155 unsigned sample_count
,
156 unsigned sample_index
,
165 float _0XOffset
, _1XOffset
, _2XOffset
, _3XOffset
,
166 _4XOffset
, _5XOffset
, _6XOffset
, _7XOffset
,
167 _8XOffset
, _9XOffset
, _10XOffset
, _11XOffset
,
168 _12XOffset
, _13XOffset
, _14XOffset
, _15XOffset
;
169 float _0YOffset
, _1YOffset
, _2YOffset
, _3YOffset
,
170 _4YOffset
, _5YOffset
, _6YOffset
, _7YOffset
,
171 _8YOffset
, _9YOffset
, _10YOffset
, _11YOffset
,
172 _12YOffset
, _13YOffset
, _14YOffset
, _15YOffset
;
175 switch (sample_count
) {
176 case 1: GEN_SAMPLE_POS_1X(u
.v
._
); break;
177 case 2: GEN_SAMPLE_POS_2X(u
.v
._
); break;
178 case 4: GEN_SAMPLE_POS_4X(u
.v
._
); break;
179 case 8: GEN_SAMPLE_POS_8X(u
.v
._
); break;
180 case 16: GEN_SAMPLE_POS_16X(u
.v
._
); break;
181 default: unreachable("invalid sample count");
184 out_value
[0] = u
.a
.x
[sample_index
];
185 out_value
[1] = u
.a
.y
[sample_index
];
189 * Destroy a context, freeing any associated memory.
192 iris_destroy_context(struct pipe_context
*ctx
)
194 struct iris_context
*ice
= (struct iris_context
*)ctx
;
196 if (ctx
->stream_uploader
)
197 u_upload_destroy(ctx
->stream_uploader
);
199 ice
->vtbl
.destroy_state(ice
);
200 iris_destroy_program_cache(ice
);
201 iris_destroy_border_color_pool(ice
);
202 u_upload_destroy(ice
->state
.surface_uploader
);
203 u_upload_destroy(ice
->state
.dynamic_uploader
);
204 u_upload_destroy(ice
->query_buffer_uploader
);
206 slab_destroy_child(&ice
->transfer_pool
);
208 iris_batch_free(&ice
->batches
[IRIS_BATCH_RENDER
]);
209 iris_batch_free(&ice
->batches
[IRIS_BATCH_COMPUTE
]);
210 iris_destroy_binder(&ice
->state
.binder
);
215 #define genX_call(devinfo, func, ...) \
216 switch (devinfo->gen) { \
218 gen11_##func(__VA_ARGS__); \
221 gen10_##func(__VA_ARGS__); \
224 gen9_##func(__VA_ARGS__); \
227 gen8_##func(__VA_ARGS__); \
230 unreachable("Unknown hardware generation"); \
236 * This is where each context begins.
238 struct pipe_context
*
239 iris_create_context(struct pipe_screen
*pscreen
, void *priv
, unsigned flags
)
241 struct iris_screen
*screen
= (struct iris_screen
*)pscreen
;
242 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
243 struct iris_context
*ice
= rzalloc(NULL
, struct iris_context
);
248 struct pipe_context
*ctx
= &ice
->ctx
;
250 ctx
->screen
= pscreen
;
253 ctx
->stream_uploader
= u_upload_create_default(ctx
);
254 if (!ctx
->stream_uploader
) {
258 ctx
->const_uploader
= ctx
->stream_uploader
;
260 ctx
->destroy
= iris_destroy_context
;
261 ctx
->set_debug_callback
= iris_set_debug_callback
;
262 ctx
->set_device_reset_callback
= iris_set_device_reset_callback
;
263 ctx
->get_device_reset_status
= iris_get_device_reset_status
;
264 ctx
->get_sample_position
= iris_get_sample_position
;
266 ice
->shaders
.urb_size
= devinfo
->urb
.size
;
268 iris_init_context_fence_functions(ctx
);
269 iris_init_blit_functions(ctx
);
270 iris_init_clear_functions(ctx
);
271 iris_init_program_functions(ctx
);
272 iris_init_resource_functions(ctx
);
273 iris_init_flush_functions(ctx
);
275 iris_init_program_cache(ice
);
276 iris_init_border_color_pool(ice
);
277 iris_init_binder(ice
);
279 slab_create_child(&ice
->transfer_pool
, &screen
->transfer_pool
);
281 ice
->state
.surface_uploader
=
282 u_upload_create(ctx
, 16384, PIPE_BIND_CUSTOM
, PIPE_USAGE_IMMUTABLE
,
283 IRIS_RESOURCE_FLAG_SURFACE_MEMZONE
);
284 ice
->state
.dynamic_uploader
=
285 u_upload_create(ctx
, 16384, PIPE_BIND_CUSTOM
, PIPE_USAGE_IMMUTABLE
,
286 IRIS_RESOURCE_FLAG_DYNAMIC_MEMZONE
);
288 ice
->query_buffer_uploader
=
289 u_upload_create(ctx
, 4096, PIPE_BIND_CUSTOM
, PIPE_USAGE_STAGING
,
292 genX_call(devinfo
, init_state
, ice
);
293 genX_call(devinfo
, init_blorp
, ice
);
294 genX_call(devinfo
, init_query
, ice
);
297 if (flags
& PIPE_CONTEXT_HIGH_PRIORITY
)
298 priority
= GEN_CONTEXT_HIGH_PRIORITY
;
299 if (flags
& PIPE_CONTEXT_LOW_PRIORITY
)
300 priority
= GEN_CONTEXT_LOW_PRIORITY
;
302 if (unlikely(INTEL_DEBUG
& DEBUG_BATCH
))
303 ice
->state
.sizes
= _mesa_hash_table_u64_create(ice
);
305 for (int i
= 0; i
< IRIS_BATCH_COUNT
; i
++) {
306 iris_init_batch(&ice
->batches
[i
], screen
, &ice
->vtbl
, &ice
->dbg
,
307 &ice
->reset
, ice
->state
.sizes
,
308 ice
->batches
, (enum iris_batch_name
) i
,
309 I915_EXEC_RENDER
, priority
);
312 ice
->vtbl
.init_render_context(screen
, &ice
->batches
[IRIS_BATCH_RENDER
],
313 &ice
->vtbl
, &ice
->dbg
);
314 ice
->vtbl
.init_compute_context(screen
, &ice
->batches
[IRIS_BATCH_COMPUTE
],
315 &ice
->vtbl
, &ice
->dbg
);