2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include <amdgpu_drm.h>
30 #include "amdgpu_id.h"
31 #include "radv_radeon_winsys.h"
32 #include "radv_amdgpu_cs.h"
33 #include "radv_amdgpu_bo.h"
36 struct radv_amdgpu_cs
{
37 struct radeon_winsys_cs base
;
38 struct radv_amdgpu_winsys
*ws
;
40 struct amdgpu_cs_ib_info ib
;
42 struct radeon_winsys_bo
*ib_buffer
;
44 unsigned max_num_buffers
;
46 amdgpu_bo_handle
*handles
;
49 struct radeon_winsys_bo
**old_ib_buffers
;
50 unsigned num_old_ib_buffers
;
51 unsigned max_num_old_ib_buffers
;
52 unsigned *ib_size_ptr
;
56 int buffer_hash_table
[1024];
60 static inline struct radv_amdgpu_cs
*
61 radv_amdgpu_cs(struct radeon_winsys_cs
*base
)
63 return (struct radv_amdgpu_cs
*)base
;
66 static int ring_to_hw_ip(enum ring_type ring
)
70 return AMDGPU_HW_IP_GFX
;
72 return AMDGPU_HW_IP_DMA
;
74 return AMDGPU_HW_IP_COMPUTE
;
76 unreachable("unsupported ring");
80 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx
*ctx
,
81 struct amdgpu_cs_fence
*fence
,
82 struct amdgpu_cs_request
*req
)
84 fence
->context
= ctx
->ctx
;
85 fence
->ip_type
= req
->ip_type
;
86 fence
->ip_instance
= req
->ip_instance
;
87 fence
->ring
= req
->ring
;
88 fence
->fence
= req
->seq_no
;
91 static struct radeon_winsys_fence
*radv_amdgpu_create_fence()
93 struct radv_amdgpu_cs_fence
*fence
= calloc(1, sizeof(struct amdgpu_cs_fence
));
94 return (struct radeon_winsys_fence
*)fence
;
97 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence
*_fence
)
99 struct amdgpu_cs_fence
*fence
= (struct amdgpu_cs_fence
*)_fence
;
103 static bool radv_amdgpu_fence_wait(struct radeon_winsys
*_ws
,
104 struct radeon_winsys_fence
*_fence
,
108 struct amdgpu_cs_fence
*fence
= (struct amdgpu_cs_fence
*)_fence
;
109 unsigned flags
= absolute
? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE
: 0;
111 uint32_t expired
= 0;
113 /* Now use the libdrm query. */
114 r
= amdgpu_cs_query_fence_status(fence
,
120 fprintf(stderr
, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
130 static void radv_amdgpu_cs_destroy(struct radeon_winsys_cs
*rcs
)
132 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(rcs
);
135 cs
->ws
->base
.buffer_destroy(cs
->ib_buffer
);
139 for (unsigned i
= 0; i
< cs
->num_old_ib_buffers
; ++i
)
140 cs
->ws
->base
.buffer_destroy(cs
->old_ib_buffers
[i
]);
142 free(cs
->old_ib_buffers
);
144 free(cs
->priorities
);
148 static boolean
radv_amdgpu_init_cs(struct radv_amdgpu_cs
*cs
,
149 enum ring_type ring_type
)
151 for (int i
= 0; i
< ARRAY_SIZE(cs
->buffer_hash_table
); ++i
)
152 cs
->buffer_hash_table
[i
] = -1;
154 cs
->hw_ip
= ring_to_hw_ip(ring_type
);
158 static struct radeon_winsys_cs
*
159 radv_amdgpu_cs_create(struct radeon_winsys
*ws
,
160 enum ring_type ring_type
)
162 struct radv_amdgpu_cs
*cs
;
163 uint32_t ib_size
= 20 * 1024 * 4;
164 cs
= calloc(1, sizeof(struct radv_amdgpu_cs
));
168 cs
->ws
= radv_amdgpu_winsys(ws
);
169 radv_amdgpu_init_cs(cs
, ring_type
);
171 if (cs
->ws
->use_ib_bos
) {
172 cs
->ib_buffer
= ws
->buffer_create(ws
, ib_size
, 0,
174 RADEON_FLAG_CPU_ACCESS
);
175 if (!cs
->ib_buffer
) {
180 cs
->ib_mapped
= ws
->buffer_map(cs
->ib_buffer
);
181 if (!cs
->ib_mapped
) {
182 ws
->buffer_destroy(cs
->ib_buffer
);
187 cs
->ib
.ib_mc_address
= radv_amdgpu_winsys_bo(cs
->ib_buffer
)->va
;
188 cs
->base
.buf
= (uint32_t *)cs
->ib_mapped
;
189 cs
->base
.max_dw
= ib_size
/ 4 - 4;
190 cs
->ib_size_ptr
= &cs
->ib
.size
;
193 ws
->cs_add_buffer(&cs
->base
, cs
->ib_buffer
, 8);
195 cs
->base
.buf
= malloc(16384);
196 cs
->base
.max_dw
= 4096;
206 static void radv_amdgpu_cs_grow(struct radeon_winsys_cs
*_cs
, size_t min_size
)
208 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(_cs
);
215 if (!cs
->ws
->use_ib_bos
) {
216 const uint64_t limit_dws
= 0xffff8;
217 uint64_t ib_dws
= MAX2(cs
->base
.cdw
+ min_size
,
218 MIN2(cs
->base
.max_dw
* 2, limit_dws
));
220 /* The total ib size cannot exceed limit_dws dwords. */
221 if (ib_dws
> limit_dws
)
228 uint32_t *new_buf
= realloc(cs
->base
.buf
, ib_dws
* 4);
230 cs
->base
.buf
= new_buf
;
231 cs
->base
.max_dw
= ib_dws
;
239 uint64_t ib_size
= MAX2(min_size
* 4 + 16, cs
->base
.max_dw
* 4 * 2);
241 /* max that fits in the chain size field. */
242 ib_size
= MIN2(ib_size
, 0xfffff);
244 while (!cs
->base
.cdw
|| (cs
->base
.cdw
& 7) != 4)
245 cs
->base
.buf
[cs
->base
.cdw
++] = 0xffff1000;
247 *cs
->ib_size_ptr
|= cs
->base
.cdw
+ 4;
249 if (cs
->num_old_ib_buffers
== cs
->max_num_old_ib_buffers
) {
250 cs
->max_num_old_ib_buffers
= MAX2(1, cs
->max_num_old_ib_buffers
* 2);
251 cs
->old_ib_buffers
= realloc(cs
->old_ib_buffers
,
252 cs
->max_num_old_ib_buffers
* sizeof(void*));
255 cs
->old_ib_buffers
[cs
->num_old_ib_buffers
++] = cs
->ib_buffer
;
257 cs
->ib_buffer
= cs
->ws
->base
.buffer_create(&cs
->ws
->base
, ib_size
, 0,
259 RADEON_FLAG_CPU_ACCESS
);
261 if (!cs
->ib_buffer
) {
264 cs
->ib_buffer
= cs
->old_ib_buffers
[--cs
->num_old_ib_buffers
];
267 cs
->ib_mapped
= cs
->ws
->base
.buffer_map(cs
->ib_buffer
);
268 if (!cs
->ib_mapped
) {
269 cs
->ws
->base
.buffer_destroy(cs
->ib_buffer
);
272 cs
->ib_buffer
= cs
->old_ib_buffers
[--cs
->num_old_ib_buffers
];
275 cs
->ws
->base
.cs_add_buffer(&cs
->base
, cs
->ib_buffer
, 8);
277 cs
->base
.buf
[cs
->base
.cdw
++] = PKT3(PKT3_INDIRECT_BUFFER_CIK
, 2, 0);
278 cs
->base
.buf
[cs
->base
.cdw
++] = radv_amdgpu_winsys_bo(cs
->ib_buffer
)->va
;
279 cs
->base
.buf
[cs
->base
.cdw
++] = radv_amdgpu_winsys_bo(cs
->ib_buffer
)->va
>> 32;
280 cs
->ib_size_ptr
= cs
->base
.buf
+ cs
->base
.cdw
;
281 cs
->base
.buf
[cs
->base
.cdw
++] = S_3F2_CHAIN(1) | S_3F2_VALID(1);
283 cs
->base
.buf
= (uint32_t *)cs
->ib_mapped
;
285 cs
->base
.max_dw
= ib_size
/ 4 - 4;
289 static bool radv_amdgpu_cs_finalize(struct radeon_winsys_cs
*_cs
)
291 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(_cs
);
293 if (cs
->ws
->use_ib_bos
) {
294 while (!cs
->base
.cdw
|| (cs
->base
.cdw
& 7) != 0)
295 cs
->base
.buf
[cs
->base
.cdw
++] = 0xffff1000;
297 *cs
->ib_size_ptr
|= cs
->base
.cdw
;
299 cs
->is_chained
= false;
305 static void radv_amdgpu_cs_reset(struct radeon_winsys_cs
*_cs
)
307 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(_cs
);
311 for (unsigned i
= 0; i
< cs
->num_buffers
; ++i
) {
312 unsigned hash
= ((uintptr_t)cs
->handles
[i
] >> 6) &
313 (ARRAY_SIZE(cs
->buffer_hash_table
) - 1);
314 cs
->buffer_hash_table
[hash
] = -1;
319 if (cs
->ws
->use_ib_bos
) {
320 cs
->ws
->base
.cs_add_buffer(&cs
->base
, cs
->ib_buffer
, 8);
322 for (unsigned i
= 0; i
< cs
->num_old_ib_buffers
; ++i
)
323 cs
->ws
->base
.buffer_destroy(cs
->old_ib_buffers
[i
]);
325 cs
->num_old_ib_buffers
= 0;
326 cs
->ib
.ib_mc_address
= radv_amdgpu_winsys_bo(cs
->ib_buffer
)->va
;
327 cs
->ib_size_ptr
= &cs
->ib
.size
;
332 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs
*cs
,
335 unsigned hash
= ((uintptr_t)bo
>> 6) & (ARRAY_SIZE(cs
->buffer_hash_table
) - 1);
336 int index
= cs
->buffer_hash_table
[hash
];
341 if (cs
->handles
[index
] == bo
)
344 for (unsigned i
= 0; i
< cs
->num_buffers
; ++i
) {
345 if (cs
->handles
[i
] == bo
) {
346 cs
->buffer_hash_table
[hash
] = i
;
354 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs
*cs
,
359 int index
= radv_amdgpu_cs_find_buffer(cs
, bo
);
362 cs
->priorities
[index
] = MAX2(cs
->priorities
[index
], priority
);
366 if (cs
->num_buffers
== cs
->max_num_buffers
) {
367 unsigned new_count
= MAX2(1, cs
->max_num_buffers
* 2);
368 cs
->handles
= realloc(cs
->handles
, new_count
* sizeof(amdgpu_bo_handle
));
369 cs
->priorities
= realloc(cs
->priorities
, new_count
* sizeof(uint8_t));
370 cs
->max_num_buffers
= new_count
;
373 cs
->handles
[cs
->num_buffers
] = bo
;
374 cs
->priorities
[cs
->num_buffers
] = priority
;
376 hash
= ((uintptr_t)bo
>> 6) & (ARRAY_SIZE(cs
->buffer_hash_table
) - 1);
377 cs
->buffer_hash_table
[hash
] = cs
->num_buffers
;
382 static void radv_amdgpu_cs_add_buffer(struct radeon_winsys_cs
*_cs
,
383 struct radeon_winsys_bo
*_bo
,
386 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(_cs
);
387 struct radv_amdgpu_winsys_bo
*bo
= radv_amdgpu_winsys_bo(_bo
);
389 radv_amdgpu_cs_add_buffer_internal(cs
, bo
->bo
, priority
);
392 static void radv_amdgpu_cs_execute_secondary(struct radeon_winsys_cs
*_parent
,
393 struct radeon_winsys_cs
*_child
)
395 struct radv_amdgpu_cs
*parent
= radv_amdgpu_cs(_parent
);
396 struct radv_amdgpu_cs
*child
= radv_amdgpu_cs(_child
);
398 for (unsigned i
= 0; i
< child
->num_buffers
; ++i
) {
399 radv_amdgpu_cs_add_buffer_internal(parent
, child
->handles
[i
],
400 child
->priorities
[i
]);
403 if (parent
->ws
->use_ib_bos
) {
404 if (parent
->base
.cdw
+ 4 > parent
->base
.max_dw
)
405 radv_amdgpu_cs_grow(&parent
->base
, 4);
407 parent
->base
.buf
[parent
->base
.cdw
++] = PKT3(PKT3_INDIRECT_BUFFER_CIK
, 2, 0);
408 parent
->base
.buf
[parent
->base
.cdw
++] = child
->ib
.ib_mc_address
;
409 parent
->base
.buf
[parent
->base
.cdw
++] = child
->ib
.ib_mc_address
>> 32;
410 parent
->base
.buf
[parent
->base
.cdw
++] = child
->ib
.size
;
412 if (parent
->base
.cdw
+ child
->base
.cdw
> parent
->base
.max_dw
)
413 radv_amdgpu_cs_grow(&parent
->base
, child
->base
.cdw
);
415 memcpy(parent
->base
.buf
+ parent
->base
.cdw
, child
->base
.buf
, 4 * child
->base
.cdw
);
416 parent
->base
.cdw
+= child
->base
.cdw
;
420 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys
*ws
,
421 struct radeon_winsys_cs
**cs_array
,
423 struct radv_amdgpu_winsys_bo
*extra_bo
,
424 amdgpu_bo_list_handle
*bo_list
)
427 if (ws
->debug_all_bos
) {
428 struct radv_amdgpu_winsys_bo
*bo
;
429 amdgpu_bo_handle
*handles
;
432 pthread_mutex_lock(&ws
->global_bo_list_lock
);
434 handles
= malloc(sizeof(handles
[0]) * ws
->num_buffers
);
436 pthread_mutex_unlock(&ws
->global_bo_list_lock
);
440 LIST_FOR_EACH_ENTRY(bo
, &ws
->global_bo_list
, global_list_item
) {
441 assert(num
< ws
->num_buffers
);
442 handles
[num
++] = bo
->bo
;
445 r
= amdgpu_bo_list_create(ws
->dev
, ws
->num_buffers
,
449 pthread_mutex_unlock(&ws
->global_bo_list_lock
);
450 } else if (count
== 1 && !extra_bo
) {
451 struct radv_amdgpu_cs
*cs
= (struct radv_amdgpu_cs
*)cs_array
[0];
452 r
= amdgpu_bo_list_create(ws
->dev
, cs
->num_buffers
, cs
->handles
,
453 cs
->priorities
, bo_list
);
455 unsigned total_buffer_count
= !!extra_bo
;
456 unsigned unique_bo_count
= !!extra_bo
;
457 for (unsigned i
= 0; i
< count
; ++i
) {
458 struct radv_amdgpu_cs
*cs
= (struct radv_amdgpu_cs
*)cs_array
[i
];
459 total_buffer_count
+= cs
->num_buffers
;
462 amdgpu_bo_handle
*handles
= malloc(sizeof(amdgpu_bo_handle
) * total_buffer_count
);
463 uint8_t *priorities
= malloc(sizeof(uint8_t) * total_buffer_count
);
464 if (!handles
|| !priorities
) {
471 handles
[0] = extra_bo
->bo
;
475 for (unsigned i
= 0; i
< count
; ++i
) {
476 struct radv_amdgpu_cs
*cs
= (struct radv_amdgpu_cs
*)cs_array
[i
];
477 for (unsigned j
= 0; j
< cs
->num_buffers
; ++j
) {
479 for (unsigned k
= 0; k
< unique_bo_count
; ++k
) {
480 if (handles
[k
] == cs
->handles
[j
]) {
482 priorities
[k
] = MAX2(priorities
[k
],
488 handles
[unique_bo_count
] = cs
->handles
[j
];
489 priorities
[unique_bo_count
] = cs
->priorities
[j
];
494 r
= amdgpu_bo_list_create(ws
->dev
, unique_bo_count
, handles
,
495 priorities
, bo_list
);
504 static void radv_assign_last_submit(struct radv_amdgpu_ctx
*ctx
,
505 struct amdgpu_cs_request
*request
)
507 radv_amdgpu_request_to_fence(ctx
,
508 &ctx
->last_submission
[request
->ip_type
][request
->ring
],
512 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx
*_ctx
,
514 struct radeon_winsys_cs
**cs_array
,
516 struct radeon_winsys_fence
*_fence
)
519 struct radv_amdgpu_ctx
*ctx
= radv_amdgpu_ctx(_ctx
);
520 struct amdgpu_cs_fence
*fence
= (struct amdgpu_cs_fence
*)_fence
;
521 struct radv_amdgpu_cs
*cs0
= radv_amdgpu_cs(cs_array
[0]);
522 amdgpu_bo_list_handle bo_list
;
523 struct amdgpu_cs_request request
= {0};
525 for (unsigned i
= cs_count
; i
--;) {
526 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(cs_array
[i
]);
528 if (cs
->is_chained
) {
529 *cs
->ib_size_ptr
-= 4;
530 cs
->is_chained
= false;
533 if (i
+ 1 < cs_count
) {
534 struct radv_amdgpu_cs
*next
= radv_amdgpu_cs(cs_array
[i
+ 1]);
535 assert(cs
->base
.cdw
+ 4 <= cs
->base
.max_dw
);
537 cs
->is_chained
= true;
538 *cs
->ib_size_ptr
+= 4;
540 cs
->base
.buf
[cs
->base
.cdw
+ 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK
, 2, 0);
541 cs
->base
.buf
[cs
->base
.cdw
+ 1] = next
->ib
.ib_mc_address
;
542 cs
->base
.buf
[cs
->base
.cdw
+ 2] = next
->ib
.ib_mc_address
>> 32;
543 cs
->base
.buf
[cs
->base
.cdw
+ 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next
->ib
.size
;
547 r
= radv_amdgpu_create_bo_list(cs0
->ws
, cs_array
, cs_count
, NULL
, &bo_list
);
549 fprintf(stderr
, "amdgpu: Failed to created the BO list for submission\n");
553 request
.ip_type
= cs0
->hw_ip
;
554 request
.ring
= queue_idx
;
555 request
.number_of_ibs
= 1;
556 request
.ibs
= &cs0
->ib
;
557 request
.resources
= bo_list
;
559 r
= amdgpu_cs_submit(ctx
->ctx
, 0, &request
, 1);
562 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
564 fprintf(stderr
, "amdgpu: The CS has been rejected, "
565 "see dmesg for more information.\n");
568 amdgpu_bo_list_destroy(bo_list
);
571 radv_amdgpu_request_to_fence(ctx
, fence
, &request
);
573 radv_assign_last_submit(ctx
, &request
);
578 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx
*_ctx
,
580 struct radeon_winsys_cs
**cs_array
,
582 struct radeon_winsys_fence
*_fence
)
585 struct radv_amdgpu_ctx
*ctx
= radv_amdgpu_ctx(_ctx
);
586 struct amdgpu_cs_fence
*fence
= (struct amdgpu_cs_fence
*)_fence
;
587 amdgpu_bo_list_handle bo_list
;
588 struct amdgpu_cs_request request
;
592 for (unsigned i
= 0; i
< cs_count
;) {
593 struct radv_amdgpu_cs
*cs0
= radv_amdgpu_cs(cs_array
[i
]);
594 struct amdgpu_cs_ib_info ibs
[AMDGPU_CS_MAX_IBS_PER_SUBMIT
];
595 unsigned cnt
= MIN2(AMDGPU_CS_MAX_IBS_PER_SUBMIT
, cs_count
- i
);
597 memset(&request
, 0, sizeof(request
));
599 r
= radv_amdgpu_create_bo_list(cs0
->ws
, &cs_array
[i
], cnt
, NULL
, &bo_list
);
601 fprintf(stderr
, "amdgpu: Failed to created the BO list for submission\n");
605 request
.ip_type
= cs0
->hw_ip
;
606 request
.ring
= queue_idx
;
607 request
.resources
= bo_list
;
608 request
.number_of_ibs
= cnt
;
611 for (unsigned j
= 0; j
< cnt
; ++j
) {
612 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(cs_array
[i
+ j
]);
615 if (cs
->is_chained
) {
616 *cs
->ib_size_ptr
-= 4;
617 cs
->is_chained
= false;
621 r
= amdgpu_cs_submit(ctx
->ctx
, 0, &request
, 1);
624 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
626 fprintf(stderr
, "amdgpu: The CS has been rejected, "
627 "see dmesg for more information.\n");
630 amdgpu_bo_list_destroy(bo_list
);
638 radv_amdgpu_request_to_fence(ctx
, fence
, &request
);
640 radv_assign_last_submit(ctx
, &request
);
645 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx
*_ctx
,
647 struct radeon_winsys_cs
**cs_array
,
649 struct radeon_winsys_fence
*_fence
)
652 struct radv_amdgpu_ctx
*ctx
= radv_amdgpu_ctx(_ctx
);
653 struct amdgpu_cs_fence
*fence
= (struct amdgpu_cs_fence
*)_fence
;
654 struct radv_amdgpu_cs
*cs0
= radv_amdgpu_cs(cs_array
[0]);
655 struct radeon_winsys
*ws
= (struct radeon_winsys
*)cs0
->ws
;
656 amdgpu_bo_list_handle bo_list
;
657 struct amdgpu_cs_request request
;
658 uint32_t pad_word
= 0xffff1000U
;
660 if (radv_amdgpu_winsys(ws
)->family
== FAMILY_SI
)
661 pad_word
= 0x80000000;
665 for (unsigned i
= 0; i
< cs_count
;) {
666 struct amdgpu_cs_ib_info ib
= {0};
667 struct radeon_winsys_bo
*bo
= NULL
;
672 while (i
+ cnt
< cs_count
&& 0xffff8 - size
>= radv_amdgpu_cs(cs_array
[i
+ cnt
])->base
.cdw
) {
673 size
+= radv_amdgpu_cs(cs_array
[i
+ cnt
])->base
.cdw
;
679 bo
= ws
->buffer_create(ws
, 4 * size
, 4096, RADEON_DOMAIN_GTT
, RADEON_FLAG_CPU_ACCESS
);
680 ptr
= ws
->buffer_map(bo
);
682 for (unsigned j
= 0; j
< cnt
; ++j
) {
683 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(cs_array
[i
+ j
]);
684 memcpy(ptr
, cs
->base
.buf
, 4 * cs
->base
.cdw
);
689 while(!size
|| (size
& 7)) {
694 memset(&request
, 0, sizeof(request
));
697 r
= radv_amdgpu_create_bo_list(cs0
->ws
, &cs_array
[i
], cnt
,
698 (struct radv_amdgpu_winsys_bo
*)bo
, &bo_list
);
700 fprintf(stderr
, "amdgpu: Failed to created the BO list for submission\n");
705 ib
.ib_mc_address
= ws
->buffer_get_va(bo
);
707 request
.ip_type
= cs0
->hw_ip
;
708 request
.ring
= queue_idx
;
709 request
.resources
= bo_list
;
710 request
.number_of_ibs
= 1;
713 r
= amdgpu_cs_submit(ctx
->ctx
, 0, &request
, 1);
716 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
718 fprintf(stderr
, "amdgpu: The CS has been rejected, "
719 "see dmesg for more information.\n");
722 amdgpu_bo_list_destroy(bo_list
);
724 ws
->buffer_destroy(bo
);
731 radv_amdgpu_request_to_fence(ctx
, fence
, &request
);
733 radv_assign_last_submit(ctx
, &request
);
738 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx
*_ctx
,
740 struct radeon_winsys_cs
**cs_array
,
743 struct radeon_winsys_fence
*_fence
)
745 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(cs_array
[0]);
746 if (!cs
->ws
->use_ib_bos
) {
747 return radv_amdgpu_winsys_cs_submit_sysmem(_ctx
, queue_idx
, cs_array
,
749 } else if (can_patch
&& cs_count
> AMDGPU_CS_MAX_IBS_PER_SUBMIT
&& false) {
750 return radv_amdgpu_winsys_cs_submit_chained(_ctx
, queue_idx
, cs_array
,
753 return radv_amdgpu_winsys_cs_submit_fallback(_ctx
, queue_idx
, cs_array
,
758 static struct radeon_winsys_ctx
*radv_amdgpu_ctx_create(struct radeon_winsys
*_ws
)
760 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
761 struct radv_amdgpu_ctx
*ctx
= CALLOC_STRUCT(radv_amdgpu_ctx
);
766 r
= amdgpu_cs_ctx_create(ws
->dev
, &ctx
->ctx
);
768 fprintf(stderr
, "amdgpu: radv_amdgpu_cs_ctx_create failed. (%i)\n", r
);
772 return (struct radeon_winsys_ctx
*)ctx
;
778 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx
*rwctx
)
780 struct radv_amdgpu_ctx
*ctx
= (struct radv_amdgpu_ctx
*)rwctx
;
781 amdgpu_cs_ctx_free(ctx
->ctx
);
785 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx
*rwctx
,
786 enum ring_type ring_type
, int ring_index
)
788 struct radv_amdgpu_ctx
*ctx
= (struct radv_amdgpu_ctx
*)rwctx
;
789 int ip_type
= ring_to_hw_ip(ring_type
);
791 if (ctx
->last_submission
[ip_type
][ring_index
].fence
) {
793 int ret
= amdgpu_cs_query_fence_status(&ctx
->last_submission
[ip_type
][ring_index
],
794 1000000000ull, 0, &expired
);
803 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys
*ws
)
805 ws
->base
.ctx_create
= radv_amdgpu_ctx_create
;
806 ws
->base
.ctx_destroy
= radv_amdgpu_ctx_destroy
;
807 ws
->base
.ctx_wait_idle
= radv_amdgpu_ctx_wait_idle
;
808 ws
->base
.cs_create
= radv_amdgpu_cs_create
;
809 ws
->base
.cs_destroy
= radv_amdgpu_cs_destroy
;
810 ws
->base
.cs_grow
= radv_amdgpu_cs_grow
;
811 ws
->base
.cs_finalize
= radv_amdgpu_cs_finalize
;
812 ws
->base
.cs_reset
= radv_amdgpu_cs_reset
;
813 ws
->base
.cs_add_buffer
= radv_amdgpu_cs_add_buffer
;
814 ws
->base
.cs_execute_secondary
= radv_amdgpu_cs_execute_secondary
;
815 ws
->base
.cs_submit
= radv_amdgpu_winsys_cs_submit
;
816 ws
->base
.create_fence
= radv_amdgpu_create_fence
;
817 ws
->base
.destroy_fence
= radv_amdgpu_destroy_fence
;
818 ws
->base
.fence_wait
= radv_amdgpu_fence_wait
;