2 * Copyright (c) 2012 Rob Clark <robdclark@gmail.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Helper lib to track gpu buffers contents/address, and map between gpu and
26 * host address while decoding cmdstream/crashdumps
39 /* for 'once' mode, for buffers containing cmdstream keep track per offset
40 * into buffer of which modes it has already been dumped;
49 static struct buffer buffers
[512];
53 buffer_contains_gpuaddr(struct buffer
*buf
, uint64_t gpuaddr
, uint32_t len
)
55 return (buf
->gpuaddr
<= gpuaddr
) && (gpuaddr
< (buf
->gpuaddr
+ buf
->len
));
59 buffer_contains_hostptr(struct buffer
*buf
, void *hostptr
)
61 return (buf
->hostptr
<= hostptr
) && (hostptr
< (buf
->hostptr
+ buf
->len
));
66 gpuaddr(void *hostptr
)
69 for (i
= 0; i
< nbuffers
; i
++)
70 if (buffer_contains_hostptr(&buffers
[i
], hostptr
))
71 return buffers
[i
].gpuaddr
+ (hostptr
- buffers
[i
].hostptr
);
76 gpubaseaddr(uint64_t gpuaddr
)
81 for (i
= 0; i
< nbuffers
; i
++)
82 if (buffer_contains_gpuaddr(&buffers
[i
], gpuaddr
, 0))
83 return buffers
[i
].gpuaddr
;
88 hostptr(uint64_t gpuaddr
)
93 for (i
= 0; i
< nbuffers
; i
++)
94 if (buffer_contains_gpuaddr(&buffers
[i
], gpuaddr
, 0))
95 return buffers
[i
].hostptr
+ (gpuaddr
- buffers
[i
].gpuaddr
);
100 hostlen(uint64_t gpuaddr
)
105 for (i
= 0; i
< nbuffers
; i
++)
106 if (buffer_contains_gpuaddr(&buffers
[i
], gpuaddr
, 0))
107 return buffers
[i
].len
+ buffers
[i
].gpuaddr
- gpuaddr
;
112 has_dumped(uint64_t gpuaddr
, unsigned enable_mask
)
117 for (int i
= 0; i
< nbuffers
; i
++) {
118 if (buffer_contains_gpuaddr(&buffers
[i
], gpuaddr
, 0)) {
119 struct buffer
*b
= &buffers
[i
];
120 assert(gpuaddr
>= b
->gpuaddr
);
121 unsigned offset
= gpuaddr
- b
->gpuaddr
;
124 while (n
< b
->noffsets
) {
125 if (offset
== b
->offsets
[n
].offset
)
130 /* if needed, allocate a new offset entry: */
131 if (n
== b
->noffsets
) {
133 assert(b
->noffsets
< ARRAY_SIZE(b
->offsets
));
134 b
->offsets
[n
].dumped_mask
= 0;
135 b
->offsets
[n
].offset
= offset
;
138 if ((b
->offsets
[n
].dumped_mask
& enable_mask
) == enable_mask
)
141 b
->offsets
[n
].dumped_mask
|= enable_mask
;
153 for (int i
= 0; i
< nbuffers
; i
++) {
154 free(buffers
[i
].hostptr
);
155 buffers
[i
].hostptr
= NULL
;
157 buffers
[i
].noffsets
= 0;
163 * Record buffer contents, takes ownership of hostptr (freed in
167 add_buffer(uint64_t gpuaddr
, unsigned int len
, void *hostptr
)
171 for (i
= 0; i
< nbuffers
; i
++) {
172 if (buffers
[i
].gpuaddr
== gpuaddr
)
177 /* some traces, like test-perf, with some blob versions,
178 * seem to generate an unreasonable # of gpu buffers (a
179 * leak?), so just ignore them.
181 if (nbuffers
>= ARRAY_SIZE(buffers
)) {
188 buffers
[i
].hostptr
= hostptr
;
189 buffers
[i
].len
= len
;
190 buffers
[i
].gpuaddr
= gpuaddr
;