Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / intel / dev / gen_device_info.h
1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef GEN_DEVICE_INFO_H
26 #define GEN_DEVICE_INFO_H
27
28 #include <stdbool.h>
29 #include <stdint.h>
30
31 #include "util/macros.h"
32
33 #ifdef __cplusplus
34 extern "C" {
35 #endif
36
37 struct drm_i915_query_topology_info;
38
39 #define GEN_DEVICE_MAX_SLICES (6) /* Maximum on gen10 */
40 #define GEN_DEVICE_MAX_SUBSLICES (8) /* Maximum on gen11 */
41 #define GEN_DEVICE_MAX_EUS_PER_SUBSLICE (10) /* Maximum on Haswell */
42 #define GEN_DEVICE_MAX_PIXEL_PIPES (2) /* Maximum on gen11 */
43
44 /**
45 * Intel hardware information and quirks
46 */
47 struct gen_device_info
48 {
49 int gen; /**< Generation number: 4, 5, 6, 7, ... */
50 int revision;
51 int gt;
52
53 bool is_g4x;
54 bool is_ivybridge;
55 bool is_baytrail;
56 bool is_haswell;
57 bool is_broadwell;
58 bool is_cherryview;
59 bool is_skylake;
60 bool is_broxton;
61 bool is_kabylake;
62 bool is_geminilake;
63 bool is_coffeelake;
64 bool is_cannonlake;
65 bool is_elkhartlake;
66 bool is_dg1;
67
68 bool has_hiz_and_separate_stencil;
69 bool must_use_separate_stencil;
70 bool has_sample_with_hiz;
71 bool has_llc;
72
73 bool has_pln;
74 bool has_64bit_float;
75 bool has_64bit_int;
76 bool has_integer_dword_mul;
77 bool has_compr4;
78 bool has_surface_tile_offset;
79 bool supports_simd16_3src;
80 bool has_resource_streamer;
81 bool disable_ccs_repack;
82 bool has_aux_map;
83 bool has_tiling_uapi;
84
85 /**
86 * \name Intel hardware quirks
87 * @{
88 */
89 bool has_negative_rhw_bug;
90
91 /**
92 * Some versions of Gen hardware don't do centroid interpolation correctly
93 * on unlit pixels, causing incorrect values for derivatives near triangle
94 * edges. Enabling this flag causes the fragment shader to use
95 * non-centroid interpolation for unlit pixels, at the expense of two extra
96 * fragment shader instructions.
97 */
98 bool needs_unlit_centroid_workaround;
99 /** @} */
100
101 /**
102 * \name GPU hardware limits
103 *
104 * In general, you can find shader thread maximums by looking at the "Maximum
105 * Number of Threads" field in the Intel PRM description of the 3DSTATE_VS,
106 * 3DSTATE_GS, 3DSTATE_HS, 3DSTATE_DS, and 3DSTATE_PS commands. URB entry
107 * limits come from the "Number of URB Entries" field in the
108 * 3DSTATE_URB_VS command and friends.
109 *
110 * These fields are used to calculate the scratch space to allocate. The
111 * amount of scratch space can be larger without being harmful on modern
112 * GPUs, however, prior to Haswell, programming the maximum number of threads
113 * to greater than the hardware maximum would cause GPU performance to tank.
114 *
115 * @{
116 */
117 /**
118 * Total number of slices present on the device whether or not they've been
119 * fused off.
120 *
121 * XXX: CS thread counts are limited by the inability to do cross subslice
122 * communication. It is the effectively the number of logical threads which
123 * can be executed in a subslice. Fuse configurations may cause this number
124 * to change, so we program @max_cs_threads as the lower maximum.
125 */
126 unsigned num_slices;
127
128 /**
129 * Number of subslices for each slice (used to be uniform until CNL).
130 */
131 unsigned num_subslices[GEN_DEVICE_MAX_SUBSLICES];
132
133 /**
134 * Number of subslices on each pixel pipe (ICL).
135 */
136 unsigned ppipe_subslices[GEN_DEVICE_MAX_PIXEL_PIPES];
137
138 /**
139 * Upper bound of number of EU per subslice (some SKUs might have just 1 EU
140 * fused across all subslices, like 47 EUs, in which case this number won't
141 * be acurate for one subslice).
142 */
143 unsigned num_eu_per_subslice;
144
145 /**
146 * Number of threads per eu, varies between 4 and 8 between generations.
147 */
148 unsigned num_thread_per_eu;
149
150 /**
151 * A bit mask of the slices available.
152 */
153 uint8_t slice_masks;
154
155 /**
156 * An array of bit mask of the subslices available, use subslice_slice_stride
157 * to access this array.
158 */
159 uint8_t subslice_masks[GEN_DEVICE_MAX_SLICES *
160 DIV_ROUND_UP(GEN_DEVICE_MAX_SUBSLICES, 8)];
161
162 /**
163 * An array of bit mask of EUs available, use eu_slice_stride &
164 * eu_subslice_stride to access this array.
165 */
166 uint8_t eu_masks[GEN_DEVICE_MAX_SLICES *
167 GEN_DEVICE_MAX_SUBSLICES *
168 DIV_ROUND_UP(GEN_DEVICE_MAX_EUS_PER_SUBSLICE, 8)];
169
170 /**
171 * Stride to access subslice_masks[].
172 */
173 uint16_t subslice_slice_stride;
174
175 /**
176 * Strides to access eu_masks[].
177 */
178 uint16_t eu_slice_stride;
179 uint16_t eu_subslice_stride;
180
181 unsigned l3_banks;
182 unsigned max_vs_threads; /**< Maximum Vertex Shader threads */
183 unsigned max_tcs_threads; /**< Maximum Hull Shader threads */
184 unsigned max_tes_threads; /**< Maximum Domain Shader threads */
185 unsigned max_gs_threads; /**< Maximum Geometry Shader threads. */
186 /**
187 * Theoretical maximum number of Pixel Shader threads.
188 *
189 * PSD means Pixel Shader Dispatcher. On modern Intel GPUs, hardware will
190 * automatically scale pixel shader thread count, based on a single value
191 * programmed into 3DSTATE_PS.
192 *
193 * To calculate the maximum number of threads for Gen8 beyond (which have
194 * multiple Pixel Shader Dispatchers):
195 *
196 * - Look up 3DSTATE_PS and find "Maximum Number of Threads Per PSD"
197 * - Usually there's only one PSD per subslice, so use the number of
198 * subslices for number of PSDs.
199 * - For max_wm_threads, the total should be PSD threads * #PSDs.
200 */
201 unsigned max_wm_threads;
202
203 /**
204 * Maximum Compute Shader threads.
205 *
206 * Thread count * number of EUs per subslice
207 */
208 unsigned max_cs_threads;
209
210 struct {
211 /**
212 * Fixed size of the URB.
213 *
214 * On Gen6 and DG1, this is measured in KB. Gen4-5 instead measure
215 * this in 512b blocks, as that's more convenient there.
216 *
217 * On most Gen7+ platforms, the URB is a section of the L3 cache,
218 * and can be resized based on the L3 programming. For those platforms,
219 * simply leave this field blank (zero) - it isn't used.
220 */
221 unsigned size;
222
223 /**
224 * The minimum number of URB entries. See the 3DSTATE_URB_<XS> docs.
225 */
226 unsigned min_entries[4];
227
228 /**
229 * The maximum number of URB entries. See the 3DSTATE_URB_<XS> docs.
230 */
231 unsigned max_entries[4];
232 } urb;
233
234 /**
235 * For the longest time the timestamp frequency for Gen's timestamp counter
236 * could be assumed to be 12.5MHz, where the least significant bit neatly
237 * corresponded to 80 nanoseconds.
238 *
239 * Since Gen9 the numbers aren't so round, with a a frequency of 12MHz for
240 * SKL (or scale factor of 83.33333333) and a frequency of 19200000Hz for
241 * BXT.
242 *
243 * For simplicty to fit with the current code scaling by a single constant
244 * to map from raw timestamps to nanoseconds we now do the conversion in
245 * floating point instead of integer arithmetic.
246 *
247 * In general it's probably worth noting that the documented constants we
248 * have for the per-platform timestamp frequencies aren't perfect and
249 * shouldn't be trusted for scaling and comparing timestamps with a large
250 * delta.
251 *
252 * E.g. with crude testing on my system using the 'correct' scale factor I'm
253 * seeing a drift of ~2 milliseconds per second.
254 */
255 uint64_t timestamp_frequency;
256
257 uint64_t aperture_bytes;
258
259 /**
260 * ID to put into the .aub files.
261 */
262 int simulator_id;
263
264 /**
265 * holds the pci device id
266 */
267 uint32_t chipset_id;
268
269 /**
270 * no_hw is true when the chipset_id pci device id has been overridden
271 */
272 bool no_hw;
273 /** @} */
274 };
275
276 #define gen_device_info_is_9lp(devinfo) \
277 ((devinfo)->is_broxton || (devinfo)->is_geminilake)
278
279 static inline bool
280 gen_device_info_subslice_available(const struct gen_device_info *devinfo,
281 int slice, int subslice)
282 {
283 return (devinfo->subslice_masks[slice * devinfo->subslice_slice_stride +
284 subslice / 8] & (1U << (subslice % 8))) != 0;
285 }
286
287 int gen_device_name_to_pci_device_id(const char *name);
288 const char *gen_get_device_name(int devid);
289
290 static inline uint64_t
291 gen_device_info_timebase_scale(const struct gen_device_info *devinfo,
292 uint64_t gpu_timestamp)
293 {
294 return (1000000000ull * gpu_timestamp) / devinfo->timestamp_frequency;
295 }
296
297 bool gen_get_device_info_from_fd(int fh, struct gen_device_info *devinfo);
298 bool gen_get_device_info_from_pci_id(int pci_id,
299 struct gen_device_info *devinfo);
300 int gen_get_aperture_size(int fd, uint64_t *size);
301
302 #ifdef __cplusplus
303 }
304 #endif
305
306 #endif /* GEN_DEVICE_INFO_H */