i965: Print size of validation and relocation lists in INTEL_DEBUG=flush
[mesa.git] / src / mesa / drivers / dri / i965 / intel_batchbuffer.c
1 /*
2 * Copyright 2006 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include "intel_batchbuffer.h"
27 #include "intel_buffer_objects.h"
28 #include "brw_bufmgr.h"
29 #include "intel_buffers.h"
30 #include "intel_fbo.h"
31 #include "brw_context.h"
32 #include "brw_defines.h"
33 #include "brw_state.h"
34 #include "common/gen_decoder.h"
35
36 #include "util/hash_table.h"
37
38 #include <xf86drm.h>
39 #include <i915_drm.h>
40
41 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
42
43 /**
44 * Target sizes of the batch and state buffers. We create the initial
45 * buffers at these sizes, and flush when they're nearly full. If we
46 * underestimate how close we are to the end, and suddenly need more space
47 * in the middle of a draw, we can grow the buffers, and finish the draw.
48 * At that point, we'll be over our target size, so the next operation
49 * should flush. Each time we flush the batch, we recreate both buffers
50 * at the original target size, so it doesn't grow without bound.
51 */
52 #define BATCH_SZ (20 * 1024)
53 #define STATE_SZ (16 * 1024)
54
55 /* The kernel assumes batchbuffers are smaller than 256kB. */
56 #define MAX_BATCH_SIZE (256 * 1024)
57
58 /* 3DSTATE_BINDING_TABLE_POINTERS has a U16 offset from Surface State Base
59 * Address, which means that we can't put binding tables beyond 64kB. This
60 * effectively limits the maximum statebuffer size to 64kB.
61 */
62 #define MAX_STATE_SIZE (64 * 1024)
63
64 static void
65 intel_batchbuffer_reset(struct intel_batchbuffer *batch,
66 struct intel_screen *screen);
67
68 static bool
69 uint_key_compare(const void *a, const void *b)
70 {
71 return a == b;
72 }
73
74 static uint32_t
75 uint_key_hash(const void *key)
76 {
77 return (uintptr_t) key;
78 }
79
80 static void
81 init_reloc_list(struct brw_reloc_list *rlist, int count)
82 {
83 rlist->reloc_count = 0;
84 rlist->reloc_array_size = count;
85 rlist->relocs = malloc(rlist->reloc_array_size *
86 sizeof(struct drm_i915_gem_relocation_entry));
87 }
88
89 void
90 intel_batchbuffer_init(struct intel_screen *screen,
91 struct intel_batchbuffer *batch)
92 {
93 const struct gen_device_info *devinfo = &screen->devinfo;
94
95 if (!devinfo->has_llc) {
96 batch->batch_cpu_map = malloc(BATCH_SZ);
97 batch->map = batch->batch_cpu_map;
98 batch->map_next = batch->map;
99 batch->state_cpu_map = malloc(STATE_SZ);
100 batch->state_map = batch->state_cpu_map;
101 }
102
103 init_reloc_list(&batch->batch_relocs, 250);
104 init_reloc_list(&batch->state_relocs, 250);
105
106 batch->exec_count = 0;
107 batch->exec_array_size = 100;
108 batch->exec_bos =
109 malloc(batch->exec_array_size * sizeof(batch->exec_bos[0]));
110 batch->validation_list =
111 malloc(batch->exec_array_size * sizeof(batch->validation_list[0]));
112
113 if (INTEL_DEBUG & DEBUG_BATCH) {
114 batch->state_batch_sizes =
115 _mesa_hash_table_create(NULL, uint_key_hash, uint_key_compare);
116 }
117
118 batch->use_batch_first =
119 screen->kernel_features & KERNEL_ALLOWS_EXEC_BATCH_FIRST;
120
121 /* PIPE_CONTROL needs a w/a but only on gen6 */
122 batch->valid_reloc_flags = EXEC_OBJECT_WRITE;
123 if (devinfo->gen == 6)
124 batch->valid_reloc_flags |= EXEC_OBJECT_NEEDS_GTT;
125
126 intel_batchbuffer_reset(batch, screen);
127 }
128
129 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
130
131 static unsigned
132 add_exec_bo(struct intel_batchbuffer *batch, struct brw_bo *bo)
133 {
134 unsigned index = READ_ONCE(bo->index);
135
136 if (index < batch->exec_count && batch->exec_bos[index] == bo)
137 return index;
138
139 /* May have been shared between multiple active batches */
140 for (index = 0; index < batch->exec_count; index++) {
141 if (batch->exec_bos[index] == bo)
142 return index;
143 }
144
145 brw_bo_reference(bo);
146
147 if (batch->exec_count == batch->exec_array_size) {
148 batch->exec_array_size *= 2;
149 batch->exec_bos =
150 realloc(batch->exec_bos,
151 batch->exec_array_size * sizeof(batch->exec_bos[0]));
152 batch->validation_list =
153 realloc(batch->validation_list,
154 batch->exec_array_size * sizeof(batch->validation_list[0]));
155 }
156
157 batch->validation_list[batch->exec_count] =
158 (struct drm_i915_gem_exec_object2) {
159 .handle = bo->gem_handle,
160 .alignment = bo->align,
161 .offset = bo->gtt_offset,
162 .flags = bo->kflags,
163 };
164
165 bo->index = batch->exec_count;
166 batch->exec_bos[batch->exec_count] = bo;
167 batch->aperture_space += bo->size;
168
169 return batch->exec_count++;
170 }
171
172 static void
173 intel_batchbuffer_reset(struct intel_batchbuffer *batch,
174 struct intel_screen *screen)
175 {
176 struct brw_bufmgr *bufmgr = screen->bufmgr;
177 const struct gen_device_info *devinfo = &screen->devinfo;
178
179 if (batch->last_bo != NULL) {
180 brw_bo_unreference(batch->last_bo);
181 batch->last_bo = NULL;
182 }
183 batch->last_bo = batch->bo;
184
185 batch->bo = brw_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
186 if (!batch->batch_cpu_map) {
187 batch->map = brw_bo_map(NULL, batch->bo, MAP_READ | MAP_WRITE);
188 }
189 batch->map_next = batch->map;
190
191 batch->state_bo = brw_bo_alloc(bufmgr, "statebuffer", STATE_SZ, 4096);
192 batch->state_bo->kflags =
193 can_do_exec_capture(screen) ? EXEC_OBJECT_CAPTURE : 0;
194 if (!batch->state_cpu_map) {
195 batch->state_map =
196 brw_bo_map(NULL, batch->state_bo, MAP_READ | MAP_WRITE);
197 }
198
199 /* Avoid making 0 a valid state offset - otherwise the decoder will try
200 * and decode data when we use offset 0 as a null pointer.
201 */
202 batch->state_used = 1;
203
204 add_exec_bo(batch, batch->bo);
205 assert(batch->bo->index == 0);
206
207 batch->needs_sol_reset = false;
208 batch->state_base_address_emitted = false;
209
210 /* We don't know what ring the new batch will be sent to until we see the
211 * first BEGIN_BATCH or BEGIN_BATCH_BLT. Mark it as unknown.
212 */
213 batch->ring = UNKNOWN_RING;
214
215 if (batch->state_batch_sizes)
216 _mesa_hash_table_clear(batch->state_batch_sizes, NULL);
217 }
218
219 static void
220 intel_batchbuffer_reset_and_clear_render_cache(struct brw_context *brw)
221 {
222 intel_batchbuffer_reset(&brw->batch, brw->screen);
223 brw_render_cache_set_clear(brw);
224 }
225
226 void
227 intel_batchbuffer_save_state(struct brw_context *brw)
228 {
229 brw->batch.saved.map_next = brw->batch.map_next;
230 brw->batch.saved.batch_reloc_count = brw->batch.batch_relocs.reloc_count;
231 brw->batch.saved.state_reloc_count = brw->batch.state_relocs.reloc_count;
232 brw->batch.saved.exec_count = brw->batch.exec_count;
233 }
234
235 void
236 intel_batchbuffer_reset_to_saved(struct brw_context *brw)
237 {
238 for (int i = brw->batch.saved.exec_count;
239 i < brw->batch.exec_count; i++) {
240 brw_bo_unreference(brw->batch.exec_bos[i]);
241 }
242 brw->batch.batch_relocs.reloc_count = brw->batch.saved.batch_reloc_count;
243 brw->batch.state_relocs.reloc_count = brw->batch.saved.state_reloc_count;
244 brw->batch.exec_count = brw->batch.saved.exec_count;
245
246 brw->batch.map_next = brw->batch.saved.map_next;
247 if (USED_BATCH(brw->batch) == 0)
248 brw->batch.ring = UNKNOWN_RING;
249 }
250
251 void
252 intel_batchbuffer_free(struct intel_batchbuffer *batch)
253 {
254 free(batch->batch_cpu_map);
255 free(batch->state_cpu_map);
256
257 for (int i = 0; i < batch->exec_count; i++) {
258 brw_bo_unreference(batch->exec_bos[i]);
259 }
260 free(batch->batch_relocs.relocs);
261 free(batch->state_relocs.relocs);
262 free(batch->exec_bos);
263 free(batch->validation_list);
264
265 brw_bo_unreference(batch->last_bo);
266 brw_bo_unreference(batch->bo);
267 brw_bo_unreference(batch->state_bo);
268 if (batch->state_batch_sizes)
269 _mesa_hash_table_destroy(batch->state_batch_sizes, NULL);
270 }
271
272 static void
273 replace_bo_in_reloc_list(struct brw_reloc_list *rlist,
274 uint32_t old_handle, uint32_t new_handle)
275 {
276 for (int i = 0; i < rlist->reloc_count; i++) {
277 if (rlist->relocs[i].target_handle == old_handle)
278 rlist->relocs[i].target_handle = new_handle;
279 }
280 }
281
282 /**
283 * Grow either the batch or state buffer to a new larger size.
284 *
285 * We can't actually grow buffers, so we allocate a new one, copy over
286 * the existing contents, and update our lists to refer to the new one.
287 *
288 * Note that this is only temporary - each new batch recreates the buffers
289 * at their original target size (BATCH_SZ or STATE_SZ).
290 */
291 static void
292 grow_buffer(struct brw_context *brw,
293 struct brw_bo **bo_ptr,
294 uint32_t **map_ptr,
295 uint32_t **cpu_map_ptr,
296 unsigned existing_bytes,
297 unsigned new_size)
298 {
299 struct intel_batchbuffer *batch = &brw->batch;
300 struct brw_bufmgr *bufmgr = brw->bufmgr;
301
302 uint32_t *old_map = *map_ptr;
303 struct brw_bo *old_bo = *bo_ptr;
304
305 struct brw_bo *new_bo = brw_bo_alloc(bufmgr, old_bo->name, new_size, 4096);
306 uint32_t *new_map;
307
308 perf_debug("Growing %s - ran out of space\n", old_bo->name);
309
310 /* Copy existing data to the new larger buffer */
311 if (*cpu_map_ptr) {
312 *cpu_map_ptr = new_map = realloc(*cpu_map_ptr, new_size);
313 } else {
314 new_map = brw_bo_map(brw, new_bo, MAP_READ | MAP_WRITE);
315 memcpy(new_map, old_map, existing_bytes);
316 }
317
318 /* Try to put the new BO at the same GTT offset as the old BO (which
319 * we're throwing away, so it doesn't need to be there).
320 *
321 * This guarantees that our relocations continue to work: values we've
322 * already written into the buffer, values we're going to write into the
323 * buffer, and the validation/relocation lists all will match.
324 */
325 new_bo->gtt_offset = old_bo->gtt_offset;
326 new_bo->index = old_bo->index;
327
328 /* Batch/state buffers are per-context, and if we've run out of space,
329 * we must have actually used them before, so...they will be in the list.
330 */
331 assert(old_bo->index < batch->exec_count);
332 assert(batch->exec_bos[old_bo->index] == old_bo);
333
334 /* Update the validation list to use the new BO. */
335 batch->exec_bos[old_bo->index] = new_bo;
336 batch->validation_list[old_bo->index].handle = new_bo->gem_handle;
337 brw_bo_reference(new_bo);
338 brw_bo_unreference(old_bo);
339
340 if (!batch->use_batch_first) {
341 /* We're not using I915_EXEC_HANDLE_LUT, which means we need to go
342 * update the relocation list entries to point at the new BO as well.
343 * (With newer kernels, the "handle" is an offset into the validation
344 * list, which remains unchanged, so we can skip this.)
345 */
346 replace_bo_in_reloc_list(&batch->batch_relocs,
347 old_bo->gem_handle, new_bo->gem_handle);
348 replace_bo_in_reloc_list(&batch->state_relocs,
349 old_bo->gem_handle, new_bo->gem_handle);
350 }
351
352 /* Drop the *bo_ptr reference. This should free the old BO. */
353 brw_bo_unreference(old_bo);
354
355 *bo_ptr = new_bo;
356 *map_ptr = new_map;
357 }
358
359 void
360 intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
361 enum brw_gpu_ring ring)
362 {
363 const struct gen_device_info *devinfo = &brw->screen->devinfo;
364 struct intel_batchbuffer *batch = &brw->batch;
365
366 /* If we're switching rings, implicitly flush the batch. */
367 if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING &&
368 devinfo->gen >= 6) {
369 intel_batchbuffer_flush(brw);
370 }
371
372 const unsigned batch_used = USED_BATCH(*batch) * 4;
373 if (batch_used + sz >= BATCH_SZ) {
374 if (!brw->no_batch_wrap) {
375 intel_batchbuffer_flush(brw);
376 } else {
377 const unsigned new_size =
378 MIN2(batch->bo->size + batch->bo->size / 2, MAX_BATCH_SIZE);
379 grow_buffer(brw, &batch->bo, &batch->map, &batch->batch_cpu_map,
380 batch_used, new_size);
381 batch->map_next = (void *) batch->map + batch_used;
382 assert(batch_used + sz < batch->bo->size);
383 }
384 }
385
386 /* The intel_batchbuffer_flush() calls above might have changed
387 * brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
388 */
389 brw->batch.ring = ring;
390 }
391
392 #ifdef DEBUG
393 #define CSI "\e["
394 #define BLUE_HEADER CSI "0;44m"
395 #define NORMAL CSI "0m"
396
397
398 static void
399 decode_struct(struct brw_context *brw, struct gen_spec *spec,
400 const char *struct_name, uint32_t *data,
401 uint32_t gtt_offset, uint32_t offset, bool color)
402 {
403 struct gen_group *group = gen_spec_find_struct(spec, struct_name);
404 if (!group)
405 return;
406
407 fprintf(stderr, "%s\n", struct_name);
408 gen_print_group(stderr, group, gtt_offset + offset,
409 &data[offset / 4], color);
410 }
411
412 static void
413 decode_structs(struct brw_context *brw, struct gen_spec *spec,
414 const char *struct_name,
415 uint32_t *data, uint32_t gtt_offset, uint32_t offset,
416 int struct_size, bool color)
417 {
418 struct gen_group *group = gen_spec_find_struct(spec, struct_name);
419 if (!group)
420 return;
421
422 int entries = brw_state_batch_size(brw, offset) / struct_size;
423 for (int i = 0; i < entries; i++) {
424 fprintf(stderr, "%s %d\n", struct_name, i);
425 gen_print_group(stderr, group, gtt_offset + offset,
426 &data[(offset + i * struct_size) / 4], color);
427 }
428 }
429
430 static void
431 do_batch_dump(struct brw_context *brw)
432 {
433 const struct gen_device_info *devinfo = &brw->screen->devinfo;
434 struct intel_batchbuffer *batch = &brw->batch;
435 struct gen_spec *spec = gen_spec_load(&brw->screen->devinfo);
436
437 if (batch->ring != RENDER_RING)
438 return;
439
440 uint32_t *batch_data = brw_bo_map(brw, batch->bo, MAP_READ);
441 uint32_t *state = brw_bo_map(brw, batch->state_bo, MAP_READ);
442 if (batch == NULL || state == NULL) {
443 fprintf(stderr, "WARNING: failed to map batchbuffer/statebuffer\n");
444 return;
445 }
446
447 uint32_t *end = batch_data + USED_BATCH(*batch);
448 uint32_t batch_gtt_offset = batch->bo->gtt_offset;
449 uint32_t state_gtt_offset = batch->state_bo->gtt_offset;
450 int length;
451
452 bool color = INTEL_DEBUG & DEBUG_COLOR;
453 const char *header_color = color ? BLUE_HEADER : "";
454 const char *reset_color = color ? NORMAL : "";
455
456 for (uint32_t *p = batch_data; p < end; p += length) {
457 struct gen_group *inst = gen_spec_find_instruction(spec, p);
458 length = gen_group_get_length(inst, p);
459 assert(inst == NULL || length > 0);
460 length = MAX2(1, length);
461 if (inst == NULL) {
462 fprintf(stderr, "unknown instruction %08x\n", p[0]);
463 continue;
464 }
465
466 uint64_t offset = batch_gtt_offset + 4 * (p - batch_data);
467
468 fprintf(stderr, "%s0x%08"PRIx64": 0x%08x: %-80s%s\n", header_color,
469 offset, p[0], gen_group_get_name(inst), reset_color);
470
471 gen_print_group(stderr, inst, offset, p, color);
472
473 switch (gen_group_get_opcode(inst) >> 16) {
474 case _3DSTATE_PIPELINED_POINTERS:
475 /* Note: these Gen4-5 pointers are full relocations rather than
476 * offsets from the start of the statebuffer. So we need to subtract
477 * gtt_offset (the start of the statebuffer) to obtain an offset we
478 * can add to the map and get at the data.
479 */
480 decode_struct(brw, spec, "VS_STATE", state, state_gtt_offset,
481 (p[1] & ~0x1fu) - state_gtt_offset, color);
482 if (p[2] & 1) {
483 decode_struct(brw, spec, "GS_STATE", state, state_gtt_offset,
484 (p[2] & ~0x1fu) - state_gtt_offset, color);
485 }
486 if (p[3] & 1) {
487 decode_struct(brw, spec, "CLIP_STATE", state, state_gtt_offset,
488 (p[3] & ~0x1fu) - state_gtt_offset, color);
489 }
490 decode_struct(brw, spec, "SF_STATE", state, state_gtt_offset,
491 (p[4] & ~0x1fu) - state_gtt_offset, color);
492 decode_struct(brw, spec, "WM_STATE", state, state_gtt_offset,
493 (p[5] & ~0x1fu) - state_gtt_offset, color);
494 decode_struct(brw, spec, "COLOR_CALC_STATE", state, state_gtt_offset,
495 (p[6] & ~0x3fu) - state_gtt_offset, color);
496 break;
497 case _3DSTATE_BINDING_TABLE_POINTERS_VS:
498 case _3DSTATE_BINDING_TABLE_POINTERS_HS:
499 case _3DSTATE_BINDING_TABLE_POINTERS_DS:
500 case _3DSTATE_BINDING_TABLE_POINTERS_GS:
501 case _3DSTATE_BINDING_TABLE_POINTERS_PS: {
502 struct gen_group *group =
503 gen_spec_find_struct(spec, "RENDER_SURFACE_STATE");
504 if (!group)
505 break;
506
507 uint32_t bt_offset = p[1] & ~0x1fu;
508 int bt_entries = brw_state_batch_size(brw, bt_offset) / 4;
509 uint32_t *bt_pointers = &state[bt_offset / 4];
510 for (int i = 0; i < bt_entries; i++) {
511 fprintf(stderr, "SURFACE_STATE - BTI = %d\n", i);
512 gen_print_group(stderr, group, state_gtt_offset + bt_pointers[i],
513 &state[bt_pointers[i] / 4], color);
514 }
515 break;
516 }
517 case _3DSTATE_SAMPLER_STATE_POINTERS_VS:
518 case _3DSTATE_SAMPLER_STATE_POINTERS_HS:
519 case _3DSTATE_SAMPLER_STATE_POINTERS_DS:
520 case _3DSTATE_SAMPLER_STATE_POINTERS_GS:
521 case _3DSTATE_SAMPLER_STATE_POINTERS_PS:
522 decode_structs(brw, spec, "SAMPLER_STATE", state,
523 state_gtt_offset, p[1] & ~0x1fu, 4 * 4, color);
524 break;
525 case _3DSTATE_VIEWPORT_STATE_POINTERS:
526 decode_structs(brw, spec, "CLIP_VIEWPORT", state,
527 state_gtt_offset, p[1] & ~0x3fu, 4 * 4, color);
528 decode_structs(brw, spec, "SF_VIEWPORT", state,
529 state_gtt_offset, p[1] & ~0x3fu, 8 * 4, color);
530 decode_structs(brw, spec, "CC_VIEWPORT", state,
531 state_gtt_offset, p[3] & ~0x3fu, 2 * 4, color);
532 break;
533 case _3DSTATE_VIEWPORT_STATE_POINTERS_CC:
534 decode_structs(brw, spec, "CC_VIEWPORT", state,
535 state_gtt_offset, p[1] & ~0x3fu, 2 * 4, color);
536 break;
537 case _3DSTATE_VIEWPORT_STATE_POINTERS_SF_CL:
538 decode_structs(brw, spec, "SF_CLIP_VIEWPORT", state,
539 state_gtt_offset, p[1] & ~0x3fu, 16 * 4, color);
540 break;
541 case _3DSTATE_SCISSOR_STATE_POINTERS:
542 decode_structs(brw, spec, "SCISSOR_RECT", state,
543 state_gtt_offset, p[1] & ~0x1fu, 2 * 4, color);
544 break;
545 case _3DSTATE_BLEND_STATE_POINTERS:
546 /* TODO: handle Gen8+ extra dword at the beginning */
547 decode_structs(brw, spec, "BLEND_STATE", state,
548 state_gtt_offset, p[1] & ~0x3fu, 8 * 4, color);
549 break;
550 case _3DSTATE_CC_STATE_POINTERS:
551 if (devinfo->gen >= 7) {
552 decode_struct(brw, spec, "COLOR_CALC_STATE", state,
553 state_gtt_offset, p[1] & ~0x3fu, color);
554 } else if (devinfo->gen == 6) {
555 decode_structs(brw, spec, "BLEND_STATE", state,
556 state_gtt_offset, p[1] & ~0x3fu, 2 * 4, color);
557 decode_struct(brw, spec, "DEPTH_STENCIL_STATE", state,
558 state_gtt_offset, p[2] & ~0x3fu, color);
559 decode_struct(brw, spec, "COLOR_CALC_STATE", state,
560 state_gtt_offset, p[3] & ~0x3fu, color);
561 }
562 break;
563 case _3DSTATE_DEPTH_STENCIL_STATE_POINTERS:
564 decode_struct(brw, spec, "DEPTH_STENCIL_STATE", state,
565 state_gtt_offset, p[1] & ~0x3fu, color);
566 break;
567 }
568 }
569
570 brw_bo_unmap(batch->bo);
571 brw_bo_unmap(batch->state_bo);
572 }
573 #else
574 static void do_batch_dump(struct brw_context *brw) { }
575 #endif
576
577 /**
578 * Called when starting a new batch buffer.
579 */
580 static void
581 brw_new_batch(struct brw_context *brw)
582 {
583 /* Unreference any BOs held by the previous batch, and reset counts. */
584 for (int i = 0; i < brw->batch.exec_count; i++) {
585 brw_bo_unreference(brw->batch.exec_bos[i]);
586 brw->batch.exec_bos[i] = NULL;
587 }
588 brw->batch.batch_relocs.reloc_count = 0;
589 brw->batch.state_relocs.reloc_count = 0;
590 brw->batch.exec_count = 0;
591 brw->batch.aperture_space = 0;
592
593 brw_bo_unreference(brw->batch.state_bo);
594
595 /* Create a new batchbuffer and reset the associated state: */
596 intel_batchbuffer_reset_and_clear_render_cache(brw);
597
598 /* If the kernel supports hardware contexts, then most hardware state is
599 * preserved between batches; we only need to re-emit state that is required
600 * to be in every batch. Otherwise we need to re-emit all the state that
601 * would otherwise be stored in the context (which for all intents and
602 * purposes means everything).
603 */
604 if (brw->hw_ctx == 0)
605 brw->ctx.NewDriverState |= BRW_NEW_CONTEXT;
606
607 brw->ctx.NewDriverState |= BRW_NEW_BATCH;
608
609 brw->ib.index_size = -1;
610
611 /* We need to periodically reap the shader time results, because rollover
612 * happens every few seconds. We also want to see results every once in a
613 * while, because many programs won't cleanly destroy our context, so the
614 * end-of-run printout may not happen.
615 */
616 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
617 brw_collect_and_report_shader_time(brw);
618 }
619
620 /**
621 * Called from intel_batchbuffer_flush before emitting MI_BATCHBUFFER_END and
622 * sending it off.
623 *
624 * This function can emit state (say, to preserve registers that aren't saved
625 * between batches). All of this state MUST fit in the reserved space at the
626 * end of the batchbuffer. If you add more GPU state, increase the reserved
627 * space by updating the BATCH_RESERVED macro.
628 */
629 static void
630 brw_finish_batch(struct brw_context *brw)
631 {
632 const struct gen_device_info *devinfo = &brw->screen->devinfo;
633
634 /* Capture the closing pipeline statistics register values necessary to
635 * support query objects (in the non-hardware context world).
636 */
637 brw_emit_query_end(brw);
638
639 if (brw->batch.ring == RENDER_RING) {
640 /* Work around L3 state leaks into contexts set MI_RESTORE_INHIBIT which
641 * assume that the L3 cache is configured according to the hardware
642 * defaults.
643 */
644 if (devinfo->gen >= 7)
645 gen7_restore_default_l3_config(brw);
646
647 if (devinfo->is_haswell) {
648 /* From the Haswell PRM, Volume 2b, Command Reference: Instructions,
649 * 3DSTATE_CC_STATE_POINTERS > "Note":
650 *
651 * "SW must program 3DSTATE_CC_STATE_POINTERS command at the end of every
652 * 3D batch buffer followed by a PIPE_CONTROL with RC flush and CS stall."
653 *
654 * From the example in the docs, it seems to expect a regular pipe control
655 * flush here as well. We may have done it already, but meh.
656 *
657 * See also WaAvoidRCZCounterRollover.
658 */
659 brw_emit_mi_flush(brw);
660 BEGIN_BATCH(2);
661 OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
662 OUT_BATCH(brw->cc.state_offset | 1);
663 ADVANCE_BATCH();
664 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_RENDER_TARGET_FLUSH |
665 PIPE_CONTROL_CS_STALL);
666 }
667 }
668 }
669
670 static void
671 throttle(struct brw_context *brw)
672 {
673 /* Wait for the swapbuffers before the one we just emitted, so we
674 * don't get too many swaps outstanding for apps that are GPU-heavy
675 * but not CPU-heavy.
676 *
677 * We're using intelDRI2Flush (called from the loader before
678 * swapbuffer) and glFlush (for front buffer rendering) as the
679 * indicator that a frame is done and then throttle when we get
680 * here as we prepare to render the next frame. At this point for
681 * round trips for swap/copy and getting new buffers are done and
682 * we'll spend less time waiting on the GPU.
683 *
684 * Unfortunately, we don't have a handle to the batch containing
685 * the swap, and getting our hands on that doesn't seem worth it,
686 * so we just use the first batch we emitted after the last swap.
687 */
688 if (brw->need_swap_throttle && brw->throttle_batch[0]) {
689 if (brw->throttle_batch[1]) {
690 if (!brw->disable_throttling) {
691 /* Pass NULL rather than brw so we avoid perf_debug warnings;
692 * stalling is common and expected here...
693 */
694 brw_bo_wait_rendering(brw->throttle_batch[1]);
695 }
696 brw_bo_unreference(brw->throttle_batch[1]);
697 }
698 brw->throttle_batch[1] = brw->throttle_batch[0];
699 brw->throttle_batch[0] = NULL;
700 brw->need_swap_throttle = false;
701 /* Throttling here is more precise than the throttle ioctl, so skip it */
702 brw->need_flush_throttle = false;
703 }
704
705 if (brw->need_flush_throttle) {
706 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
707 drmCommandNone(dri_screen->fd, DRM_I915_GEM_THROTTLE);
708 brw->need_flush_throttle = false;
709 }
710 }
711
712 static int
713 execbuffer(int fd,
714 struct intel_batchbuffer *batch,
715 uint32_t ctx_id,
716 int used,
717 int in_fence,
718 int *out_fence,
719 int flags)
720 {
721 struct drm_i915_gem_execbuffer2 execbuf = {
722 .buffers_ptr = (uintptr_t) batch->validation_list,
723 .buffer_count = batch->exec_count,
724 .batch_start_offset = 0,
725 .batch_len = used,
726 .flags = flags,
727 .rsvd1 = ctx_id, /* rsvd1 is actually the context ID */
728 };
729
730 unsigned long cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2;
731
732 if (in_fence != -1) {
733 execbuf.rsvd2 = in_fence;
734 execbuf.flags |= I915_EXEC_FENCE_IN;
735 }
736
737 if (out_fence != NULL) {
738 cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2_WR;
739 *out_fence = -1;
740 execbuf.flags |= I915_EXEC_FENCE_OUT;
741 }
742
743 int ret = drmIoctl(fd, cmd, &execbuf);
744 if (ret != 0)
745 ret = -errno;
746
747 for (int i = 0; i < batch->exec_count; i++) {
748 struct brw_bo *bo = batch->exec_bos[i];
749
750 bo->idle = false;
751 bo->index = -1;
752
753 /* Update brw_bo::gtt_offset */
754 if (batch->validation_list[i].offset != bo->gtt_offset) {
755 DBG("BO %d migrated: 0x%" PRIx64 " -> 0x%llx\n",
756 bo->gem_handle, bo->gtt_offset,
757 batch->validation_list[i].offset);
758 bo->gtt_offset = batch->validation_list[i].offset;
759 }
760 }
761
762 if (ret == 0 && out_fence != NULL)
763 *out_fence = execbuf.rsvd2 >> 32;
764
765 return ret;
766 }
767
768 static int
769 do_flush_locked(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
770 {
771 const struct gen_device_info *devinfo = &brw->screen->devinfo;
772 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
773 struct intel_batchbuffer *batch = &brw->batch;
774 int ret = 0;
775
776 if (batch->batch_cpu_map) {
777 void *bo_map = brw_bo_map(brw, batch->bo, MAP_WRITE);
778 memcpy(bo_map, batch->batch_cpu_map, 4 * USED_BATCH(*batch));
779 }
780
781 if (batch->state_cpu_map) {
782 void *bo_map = brw_bo_map(brw, batch->state_bo, MAP_WRITE);
783 memcpy(bo_map, batch->state_cpu_map, batch->state_used);
784 }
785
786 brw_bo_unmap(batch->bo);
787 brw_bo_unmap(batch->state_bo);
788
789 if (!brw->screen->no_hw) {
790 /* The requirement for using I915_EXEC_NO_RELOC are:
791 *
792 * The addresses written in the objects must match the corresponding
793 * reloc.gtt_offset which in turn must match the corresponding
794 * execobject.offset.
795 *
796 * Any render targets written to in the batch must be flagged with
797 * EXEC_OBJECT_WRITE.
798 *
799 * To avoid stalling, execobject.offset should match the current
800 * address of that object within the active context.
801 */
802 int flags = I915_EXEC_NO_RELOC;
803
804 if (devinfo->gen >= 6 && batch->ring == BLT_RING) {
805 flags |= I915_EXEC_BLT;
806 } else {
807 flags |= I915_EXEC_RENDER;
808 }
809 if (batch->needs_sol_reset)
810 flags |= I915_EXEC_GEN7_SOL_RESET;
811
812 uint32_t hw_ctx = batch->ring == RENDER_RING ? brw->hw_ctx : 0;
813
814 /* Set statebuffer relocations */
815 const unsigned state_index = batch->state_bo->index;
816 if (state_index < batch->exec_count &&
817 batch->exec_bos[state_index] == batch->state_bo) {
818 struct drm_i915_gem_exec_object2 *entry =
819 &batch->validation_list[state_index];
820 assert(entry->handle == batch->state_bo->gem_handle);
821 entry->relocation_count = batch->state_relocs.reloc_count;
822 entry->relocs_ptr = (uintptr_t) batch->state_relocs.relocs;
823 }
824
825 /* Set batchbuffer relocations */
826 struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[0];
827 assert(entry->handle == batch->bo->gem_handle);
828 entry->relocation_count = batch->batch_relocs.reloc_count;
829 entry->relocs_ptr = (uintptr_t) batch->batch_relocs.relocs;
830
831 if (batch->use_batch_first) {
832 flags |= I915_EXEC_BATCH_FIRST | I915_EXEC_HANDLE_LUT;
833 } else {
834 /* Move the batch to the end of the validation list */
835 struct drm_i915_gem_exec_object2 tmp;
836 const unsigned index = batch->exec_count - 1;
837
838 tmp = *entry;
839 *entry = batch->validation_list[index];
840 batch->validation_list[index] = tmp;
841 }
842
843 ret = execbuffer(dri_screen->fd, batch, hw_ctx,
844 4 * USED_BATCH(*batch),
845 in_fence_fd, out_fence_fd, flags);
846
847 throttle(brw);
848 }
849
850 if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
851 do_batch_dump(brw);
852
853 if (brw->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
854 brw_check_for_reset(brw);
855
856 if (ret != 0) {
857 fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
858 exit(1);
859 }
860
861 return ret;
862 }
863
864 /**
865 * The in_fence_fd is ignored if -1. Otherwise this function takes ownership
866 * of the fd.
867 *
868 * The out_fence_fd is ignored if NULL. Otherwise, the caller takes ownership
869 * of the returned fd.
870 */
871 int
872 _intel_batchbuffer_flush_fence(struct brw_context *brw,
873 int in_fence_fd, int *out_fence_fd,
874 const char *file, int line)
875 {
876 int ret;
877
878 if (USED_BATCH(brw->batch) == 0)
879 return 0;
880
881 if (brw->throttle_batch[0] == NULL) {
882 brw->throttle_batch[0] = brw->batch.bo;
883 brw_bo_reference(brw->throttle_batch[0]);
884 }
885
886 if (unlikely(INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT))) {
887 int bytes_for_commands = 4 * USED_BATCH(brw->batch);
888 int bytes_for_state = brw->batch.state_used;
889 fprintf(stderr, "%19s:%-3d: Batchbuffer flush with %5db (%0.1f%%) (pkt),"
890 " %5db (%0.1f%%) (state), %4d BOs (%0.1fMb aperture),"
891 " %4d batch relocs, %4d state relocs\n", file, line,
892 bytes_for_commands, 100.0f * bytes_for_commands / BATCH_SZ,
893 bytes_for_state, 100.0f * bytes_for_state / STATE_SZ,
894 brw->batch.exec_count,
895 (float) brw->batch.aperture_space / (1024 * 1024),
896 brw->batch.batch_relocs.reloc_count,
897 brw->batch.state_relocs.reloc_count);
898 }
899
900 brw_finish_batch(brw);
901
902 /* Mark the end of the buffer. */
903 intel_batchbuffer_emit_dword(&brw->batch, MI_BATCH_BUFFER_END);
904 if (USED_BATCH(brw->batch) & 1) {
905 /* Round batchbuffer usage to 2 DWORDs. */
906 intel_batchbuffer_emit_dword(&brw->batch, MI_NOOP);
907 }
908
909 intel_upload_finish(brw);
910
911 /* Check that we didn't just wrap our batchbuffer at a bad time. */
912 assert(!brw->no_batch_wrap);
913
914 ret = do_flush_locked(brw, in_fence_fd, out_fence_fd);
915
916 if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
917 fprintf(stderr, "waiting for idle\n");
918 brw_bo_wait_rendering(brw->batch.bo);
919 }
920
921 /* Start a new batch buffer. */
922 brw_new_batch(brw);
923
924 return ret;
925 }
926
927 bool
928 brw_batch_has_aperture_space(struct brw_context *brw, unsigned extra_space)
929 {
930 return brw->batch.aperture_space + extra_space <=
931 brw->screen->aperture_threshold;
932 }
933
934 bool
935 brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo)
936 {
937 unsigned index = READ_ONCE(bo->index);
938 if (index < batch->exec_count && batch->exec_bos[index] == bo)
939 return true;
940
941 for (int i = 0; i < batch->exec_count; i++) {
942 if (batch->exec_bos[i] == bo)
943 return true;
944 }
945 return false;
946 }
947
948 /* This is the only way buffers get added to the validate list.
949 */
950 static uint64_t
951 emit_reloc(struct intel_batchbuffer *batch,
952 struct brw_reloc_list *rlist, uint32_t offset,
953 struct brw_bo *target, uint32_t target_offset,
954 unsigned int reloc_flags)
955 {
956 assert(target != NULL);
957
958 if (rlist->reloc_count == rlist->reloc_array_size) {
959 rlist->reloc_array_size *= 2;
960 rlist->relocs = realloc(rlist->relocs,
961 rlist->reloc_array_size *
962 sizeof(struct drm_i915_gem_relocation_entry));
963 }
964
965 unsigned int index = add_exec_bo(batch, target);
966 struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
967
968 if (reloc_flags)
969 entry->flags |= reloc_flags & batch->valid_reloc_flags;
970
971 rlist->relocs[rlist->reloc_count++] =
972 (struct drm_i915_gem_relocation_entry) {
973 .offset = offset,
974 .delta = target_offset,
975 .target_handle = batch->use_batch_first ? index : target->gem_handle,
976 .presumed_offset = entry->offset,
977 };
978
979 /* Using the old buffer offset, write in what the right data would be, in
980 * case the buffer doesn't move and we can short-circuit the relocation
981 * processing in the kernel
982 */
983 return entry->offset + target_offset;
984 }
985
986 uint64_t
987 brw_batch_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
988 struct brw_bo *target, uint32_t target_offset,
989 unsigned int reloc_flags)
990 {
991 assert(batch_offset <= batch->bo->size - sizeof(uint32_t));
992
993 return emit_reloc(batch, &batch->batch_relocs, batch_offset,
994 target, target_offset, reloc_flags);
995 }
996
997 uint64_t
998 brw_state_reloc(struct intel_batchbuffer *batch, uint32_t state_offset,
999 struct brw_bo *target, uint32_t target_offset,
1000 unsigned int reloc_flags)
1001 {
1002 assert(state_offset <= batch->state_bo->size - sizeof(uint32_t));
1003
1004 return emit_reloc(batch, &batch->state_relocs, state_offset,
1005 target, target_offset, reloc_flags);
1006 }
1007
1008
1009 uint32_t
1010 brw_state_batch_size(struct brw_context *brw, uint32_t offset)
1011 {
1012 struct hash_entry *entry =
1013 _mesa_hash_table_search(brw->batch.state_batch_sizes,
1014 (void *) (uintptr_t) offset);
1015 return entry ? (uintptr_t) entry->data : 0;
1016 }
1017
1018 /**
1019 * Reserve some space in the statebuffer, or flush.
1020 *
1021 * This is used to estimate when we're near the end of the batch,
1022 * so we can flush early.
1023 */
1024 void
1025 brw_require_statebuffer_space(struct brw_context *brw, int size)
1026 {
1027 if (brw->batch.state_used + size >= STATE_SZ)
1028 intel_batchbuffer_flush(brw);
1029 }
1030
1031 /**
1032 * Allocates a block of space in the batchbuffer for indirect state.
1033 */
1034 void *
1035 brw_state_batch(struct brw_context *brw,
1036 int size,
1037 int alignment,
1038 uint32_t *out_offset)
1039 {
1040 struct intel_batchbuffer *batch = &brw->batch;
1041
1042 assert(size < batch->bo->size);
1043
1044 uint32_t offset = ALIGN(batch->state_used, alignment);
1045
1046 if (offset + size >= STATE_SZ) {
1047 if (!brw->no_batch_wrap) {
1048 intel_batchbuffer_flush(brw);
1049 offset = ALIGN(batch->state_used, alignment);
1050 } else {
1051 const unsigned new_size =
1052 MIN2(batch->state_bo->size + batch->state_bo->size / 2,
1053 MAX_STATE_SIZE);
1054 grow_buffer(brw, &batch->state_bo, &batch->state_map,
1055 &batch->state_cpu_map, batch->state_used, new_size);
1056 assert(offset + size < batch->state_bo->size);
1057 }
1058 }
1059
1060 if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
1061 _mesa_hash_table_insert(batch->state_batch_sizes,
1062 (void *) (uintptr_t) offset,
1063 (void *) (uintptr_t) size);
1064 }
1065
1066 batch->state_used = offset + size;
1067
1068 *out_offset = offset;
1069 return batch->state_map + (offset >> 2);
1070 }
1071
1072 void
1073 intel_batchbuffer_data(struct brw_context *brw,
1074 const void *data, GLuint bytes, enum brw_gpu_ring ring)
1075 {
1076 assert((bytes & 3) == 0);
1077 intel_batchbuffer_require_space(brw, bytes, ring);
1078 memcpy(brw->batch.map_next, data, bytes);
1079 brw->batch.map_next += bytes >> 2;
1080 }
1081
1082 static void
1083 load_sized_register_mem(struct brw_context *brw,
1084 uint32_t reg,
1085 struct brw_bo *bo,
1086 uint32_t offset,
1087 int size)
1088 {
1089 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1090 int i;
1091
1092 /* MI_LOAD_REGISTER_MEM only exists on Gen7+. */
1093 assert(devinfo->gen >= 7);
1094
1095 if (devinfo->gen >= 8) {
1096 BEGIN_BATCH(4 * size);
1097 for (i = 0; i < size; i++) {
1098 OUT_BATCH(GEN7_MI_LOAD_REGISTER_MEM | (4 - 2));
1099 OUT_BATCH(reg + i * 4);
1100 OUT_RELOC64(bo, 0, offset + i * 4);
1101 }
1102 ADVANCE_BATCH();
1103 } else {
1104 BEGIN_BATCH(3 * size);
1105 for (i = 0; i < size; i++) {
1106 OUT_BATCH(GEN7_MI_LOAD_REGISTER_MEM | (3 - 2));
1107 OUT_BATCH(reg + i * 4);
1108 OUT_RELOC(bo, 0, offset + i * 4);
1109 }
1110 ADVANCE_BATCH();
1111 }
1112 }
1113
1114 void
1115 brw_load_register_mem(struct brw_context *brw,
1116 uint32_t reg,
1117 struct brw_bo *bo,
1118 uint32_t offset)
1119 {
1120 load_sized_register_mem(brw, reg, bo, offset, 1);
1121 }
1122
1123 void
1124 brw_load_register_mem64(struct brw_context *brw,
1125 uint32_t reg,
1126 struct brw_bo *bo,
1127 uint32_t offset)
1128 {
1129 load_sized_register_mem(brw, reg, bo, offset, 2);
1130 }
1131
1132 /*
1133 * Write an arbitrary 32-bit register to a buffer via MI_STORE_REGISTER_MEM.
1134 */
1135 void
1136 brw_store_register_mem32(struct brw_context *brw,
1137 struct brw_bo *bo, uint32_t reg, uint32_t offset)
1138 {
1139 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1140
1141 assert(devinfo->gen >= 6);
1142
1143 if (devinfo->gen >= 8) {
1144 BEGIN_BATCH(4);
1145 OUT_BATCH(MI_STORE_REGISTER_MEM | (4 - 2));
1146 OUT_BATCH(reg);
1147 OUT_RELOC64(bo, RELOC_WRITE, offset);
1148 ADVANCE_BATCH();
1149 } else {
1150 BEGIN_BATCH(3);
1151 OUT_BATCH(MI_STORE_REGISTER_MEM | (3 - 2));
1152 OUT_BATCH(reg);
1153 OUT_RELOC(bo, RELOC_WRITE | RELOC_NEEDS_GGTT, offset);
1154 ADVANCE_BATCH();
1155 }
1156 }
1157
1158 /*
1159 * Write an arbitrary 64-bit register to a buffer via MI_STORE_REGISTER_MEM.
1160 */
1161 void
1162 brw_store_register_mem64(struct brw_context *brw,
1163 struct brw_bo *bo, uint32_t reg, uint32_t offset)
1164 {
1165 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1166
1167 assert(devinfo->gen >= 6);
1168
1169 /* MI_STORE_REGISTER_MEM only stores a single 32-bit value, so to
1170 * read a full 64-bit register, we need to do two of them.
1171 */
1172 if (devinfo->gen >= 8) {
1173 BEGIN_BATCH(8);
1174 OUT_BATCH(MI_STORE_REGISTER_MEM | (4 - 2));
1175 OUT_BATCH(reg);
1176 OUT_RELOC64(bo, RELOC_WRITE, offset);
1177 OUT_BATCH(MI_STORE_REGISTER_MEM | (4 - 2));
1178 OUT_BATCH(reg + sizeof(uint32_t));
1179 OUT_RELOC64(bo, RELOC_WRITE, offset + sizeof(uint32_t));
1180 ADVANCE_BATCH();
1181 } else {
1182 BEGIN_BATCH(6);
1183 OUT_BATCH(MI_STORE_REGISTER_MEM | (3 - 2));
1184 OUT_BATCH(reg);
1185 OUT_RELOC(bo, RELOC_WRITE | RELOC_NEEDS_GGTT, offset);
1186 OUT_BATCH(MI_STORE_REGISTER_MEM | (3 - 2));
1187 OUT_BATCH(reg + sizeof(uint32_t));
1188 OUT_RELOC(bo, RELOC_WRITE | RELOC_NEEDS_GGTT, offset + sizeof(uint32_t));
1189 ADVANCE_BATCH();
1190 }
1191 }
1192
1193 /*
1194 * Write a 32-bit register using immediate data.
1195 */
1196 void
1197 brw_load_register_imm32(struct brw_context *brw, uint32_t reg, uint32_t imm)
1198 {
1199 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1200
1201 assert(devinfo->gen >= 6);
1202
1203 BEGIN_BATCH(3);
1204 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
1205 OUT_BATCH(reg);
1206 OUT_BATCH(imm);
1207 ADVANCE_BATCH();
1208 }
1209
1210 /*
1211 * Write a 64-bit register using immediate data.
1212 */
1213 void
1214 brw_load_register_imm64(struct brw_context *brw, uint32_t reg, uint64_t imm)
1215 {
1216 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1217
1218 assert(devinfo->gen >= 6);
1219
1220 BEGIN_BATCH(5);
1221 OUT_BATCH(MI_LOAD_REGISTER_IMM | (5 - 2));
1222 OUT_BATCH(reg);
1223 OUT_BATCH(imm & 0xffffffff);
1224 OUT_BATCH(reg + 4);
1225 OUT_BATCH(imm >> 32);
1226 ADVANCE_BATCH();
1227 }
1228
1229 /*
1230 * Copies a 32-bit register.
1231 */
1232 void
1233 brw_load_register_reg(struct brw_context *brw, uint32_t src, uint32_t dest)
1234 {
1235 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1236
1237 assert(devinfo->gen >= 8 || devinfo->is_haswell);
1238
1239 BEGIN_BATCH(3);
1240 OUT_BATCH(MI_LOAD_REGISTER_REG | (3 - 2));
1241 OUT_BATCH(src);
1242 OUT_BATCH(dest);
1243 ADVANCE_BATCH();
1244 }
1245
1246 /*
1247 * Copies a 64-bit register.
1248 */
1249 void
1250 brw_load_register_reg64(struct brw_context *brw, uint32_t src, uint32_t dest)
1251 {
1252 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1253
1254 assert(devinfo->gen >= 8 || devinfo->is_haswell);
1255
1256 BEGIN_BATCH(6);
1257 OUT_BATCH(MI_LOAD_REGISTER_REG | (3 - 2));
1258 OUT_BATCH(src);
1259 OUT_BATCH(dest);
1260 OUT_BATCH(MI_LOAD_REGISTER_REG | (3 - 2));
1261 OUT_BATCH(src + sizeof(uint32_t));
1262 OUT_BATCH(dest + sizeof(uint32_t));
1263 ADVANCE_BATCH();
1264 }
1265
1266 /*
1267 * Write 32-bits of immediate data to a GPU memory buffer.
1268 */
1269 void
1270 brw_store_data_imm32(struct brw_context *brw, struct brw_bo *bo,
1271 uint32_t offset, uint32_t imm)
1272 {
1273 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1274
1275 assert(devinfo->gen >= 6);
1276
1277 BEGIN_BATCH(4);
1278 OUT_BATCH(MI_STORE_DATA_IMM | (4 - 2));
1279 if (devinfo->gen >= 8)
1280 OUT_RELOC64(bo, RELOC_WRITE, offset);
1281 else {
1282 OUT_BATCH(0); /* MBZ */
1283 OUT_RELOC(bo, RELOC_WRITE, offset);
1284 }
1285 OUT_BATCH(imm);
1286 ADVANCE_BATCH();
1287 }
1288
1289 /*
1290 * Write 64-bits of immediate data to a GPU memory buffer.
1291 */
1292 void
1293 brw_store_data_imm64(struct brw_context *brw, struct brw_bo *bo,
1294 uint32_t offset, uint64_t imm)
1295 {
1296 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1297
1298 assert(devinfo->gen >= 6);
1299
1300 BEGIN_BATCH(5);
1301 OUT_BATCH(MI_STORE_DATA_IMM | (5 - 2));
1302 if (devinfo->gen >= 8)
1303 OUT_RELOC64(bo, 0, offset);
1304 else {
1305 OUT_BATCH(0); /* MBZ */
1306 OUT_RELOC(bo, RELOC_WRITE, offset);
1307 }
1308 OUT_BATCH(imm & 0xffffffffu);
1309 OUT_BATCH(imm >> 32);
1310 ADVANCE_BATCH();
1311 }