2 * Copyright © 2014-2017 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * @file v3d_simulator_hw.c
27 * Implements the actual HW interaction betweeh the GL driver's VC5 simulator and the simulator.
29 * The register headers between V3D versions will have conflicting defines, so
30 * all register interactions appear in this file and are compiled per V3D version
34 #ifdef USE_V3D_SIMULATOR
36 #include "v3d_screen.h"
37 #include "v3d_context.h"
38 #include "v3d_simulator_wrapper.h"
40 #define HW_REGISTER_RO(x) (x)
41 #define HW_REGISTER_RW(x) (x)
43 #include "libs/core/v3d/registers/4.1.34.0/v3d.h"
45 #include "libs/core/v3d/registers/3.3.0.0/v3d.h"
48 #define V3D_WRITE(reg, val) v3d_hw_write_reg(v3d, reg, val)
49 #define V3D_READ(reg) v3d_hw_read_reg(v3d, reg)
52 v3d_invalidate_l3(struct v3d_hw
*v3d
)
54 if (!v3d_hw_has_gca(v3d
))
58 uint32_t gca_ctrl
= V3D_READ(V3D_GCA_CACHE_CTRL
);
60 V3D_WRITE(V3D_GCA_CACHE_CTRL
, gca_ctrl
| V3D_GCA_CACHE_CTRL_FLUSH_SET
);
61 V3D_WRITE(V3D_GCA_CACHE_CTRL
, gca_ctrl
& ~V3D_GCA_CACHE_CTRL_FLUSH_SET
);
65 /* Invalidates the L2C cache. This is a read-only cache for uniforms and instructions. */
67 v3d_invalidate_l2c(struct v3d_hw
*v3d
)
69 if (V3D_VERSION
>= 33)
72 V3D_WRITE(V3D_CTL_0_L2CACTL
,
73 V3D_CTL_0_L2CACTL_L2CCLR_SET
|
74 V3D_CTL_0_L2CACTL_L2CENA_SET
);
77 /* Invalidates texture L2 cachelines */
79 v3d_invalidate_l2t(struct v3d_hw
*v3d
)
81 V3D_WRITE(V3D_CTL_0_L2TFLSTA
, 0);
82 V3D_WRITE(V3D_CTL_0_L2TFLEND
, ~0);
83 V3D_WRITE(V3D_CTL_0_L2TCACTL
,
84 V3D_CTL_0_L2TCACTL_L2TFLS_SET
|
85 (0 << V3D_CTL_0_L2TCACTL_L2TFLM_LSB
));
88 /* Flushes dirty texture cachelines from the L1 write combiner */
90 v3d_flush_l1td(struct v3d_hw
*v3d
)
92 V3D_WRITE(V3D_CTL_0_L2TCACTL
,
93 V3D_CTL_0_L2TCACTL_TMUWCF_SET
);
95 assert(!(V3D_READ(V3D_CTL_0_L2TCACTL
) & V3D_CTL_0_L2TCACTL_L2TFLS_SET
));
98 /* Flushes dirty texture L2 cachelines */
100 v3d_flush_l2t(struct v3d_hw
*v3d
)
102 V3D_WRITE(V3D_CTL_0_L2TFLSTA
, 0);
103 V3D_WRITE(V3D_CTL_0_L2TFLEND
, ~0);
104 V3D_WRITE(V3D_CTL_0_L2TCACTL
,
105 V3D_CTL_0_L2TCACTL_L2TFLS_SET
|
106 (2 << V3D_CTL_0_L2TCACTL_L2TFLM_LSB
));
108 assert(!(V3D_READ(V3D_CTL_0_L2TCACTL
) & V3D_CTL_0_L2TCACTL_L2TFLS_SET
));
111 /* Invalidates the slice caches. These are read-only caches. */
113 v3d_invalidate_slices(struct v3d_hw
*v3d
)
115 V3D_WRITE(V3D_CTL_0_SLCACTL
, ~0);
119 v3d_invalidate_caches(struct v3d_hw
*v3d
)
121 v3d_invalidate_l3(v3d
);
122 v3d_invalidate_l2c(v3d
);
123 v3d_invalidate_l2t(v3d
);
124 v3d_invalidate_slices(v3d
);
127 static uint32_t g_gmp_ofs
;
129 v3d_reload_gmp(struct v3d_hw
*v3d
)
131 /* Completely reset the GMP. */
132 V3D_WRITE(V3D_GMP_0_CFG
,
133 V3D_GMP_0_CFG_PROTENABLE_SET
);
134 V3D_WRITE(V3D_GMP_0_TABLE_ADDR
, g_gmp_ofs
);
135 V3D_WRITE(V3D_GMP_0_CLEAR_LOAD
, ~0);
136 while (V3D_READ(V3D_GMP_0_STATUS
) &
137 V3D_GMP_0_STATUS_CFG_BUSY_SET
) {
143 v3d_flush_caches(struct v3d_hw
*v3d
)
150 v3dX(simulator_submit_tfu_ioctl
)(struct v3d_hw
*v3d
,
151 struct drm_v3d_submit_tfu
*args
)
153 int last_vtct
= V3D_READ(V3D_TFU_CS
) & V3D_TFU_CS_CVTCT_SET
;
155 V3D_WRITE(V3D_TFU_IIA
, args
->iia
);
156 V3D_WRITE(V3D_TFU_IIS
, args
->iis
);
157 V3D_WRITE(V3D_TFU_ICA
, args
->ica
);
158 V3D_WRITE(V3D_TFU_IUA
, args
->iua
);
159 V3D_WRITE(V3D_TFU_IOA
, args
->ioa
);
160 V3D_WRITE(V3D_TFU_IOS
, args
->ios
);
161 V3D_WRITE(V3D_TFU_COEF0
, args
->coef
[0]);
162 V3D_WRITE(V3D_TFU_COEF1
, args
->coef
[1]);
163 V3D_WRITE(V3D_TFU_COEF2
, args
->coef
[2]);
164 V3D_WRITE(V3D_TFU_COEF3
, args
->coef
[3]);
166 V3D_WRITE(V3D_TFU_ICFG
, args
->icfg
);
168 while ((V3D_READ(V3D_TFU_CS
) & V3D_TFU_CS_CVTCT_SET
) == last_vtct
) {
175 #if V3D_VERSION >= 41
177 v3dX(simulator_submit_csd_ioctl
)(struct v3d_hw
*v3d
,
178 struct drm_v3d_submit_csd
*args
,
184 v3d_invalidate_caches(v3d
);
186 V3D_WRITE(V3D_CSD_0_QUEUED_CFG1
, args
->cfg
[1]);
187 V3D_WRITE(V3D_CSD_0_QUEUED_CFG2
, args
->cfg
[2]);
188 V3D_WRITE(V3D_CSD_0_QUEUED_CFG3
, args
->cfg
[3]);
189 V3D_WRITE(V3D_CSD_0_QUEUED_CFG4
, args
->cfg
[4]);
190 V3D_WRITE(V3D_CSD_0_QUEUED_CFG5
, args
->cfg
[5]);
191 V3D_WRITE(V3D_CSD_0_QUEUED_CFG6
, args
->cfg
[6]);
192 /* CFG0 kicks off the job */
193 V3D_WRITE(V3D_CSD_0_QUEUED_CFG0
, args
->cfg
[0]);
195 while (V3D_READ(V3D_CSD_0_STATUS
) &
196 (V3D_CSD_0_STATUS_HAVE_CURRENT_DISPATCH_SET
|
197 V3D_CSD_0_STATUS_HAVE_QUEUED_DISPATCH_SET
)) {
201 v3d_flush_caches(v3d
);
208 v3dX(simulator_get_param_ioctl
)(struct v3d_hw
*v3d
,
209 struct drm_v3d_get_param
*args
)
211 static const uint32_t reg_map
[] = {
212 [DRM_V3D_PARAM_V3D_UIFCFG
] = V3D_HUB_CTL_UIFCFG
,
213 [DRM_V3D_PARAM_V3D_HUB_IDENT1
] = V3D_HUB_CTL_IDENT1
,
214 [DRM_V3D_PARAM_V3D_HUB_IDENT2
] = V3D_HUB_CTL_IDENT2
,
215 [DRM_V3D_PARAM_V3D_HUB_IDENT3
] = V3D_HUB_CTL_IDENT3
,
216 [DRM_V3D_PARAM_V3D_CORE0_IDENT0
] = V3D_CTL_0_IDENT0
,
217 [DRM_V3D_PARAM_V3D_CORE0_IDENT1
] = V3D_CTL_0_IDENT1
,
218 [DRM_V3D_PARAM_V3D_CORE0_IDENT2
] = V3D_CTL_0_IDENT2
,
221 switch (args
->param
) {
222 case DRM_V3D_PARAM_SUPPORTS_TFU
:
225 case DRM_V3D_PARAM_SUPPORTS_CSD
:
226 args
->value
= V3D_VERSION
>= 41;
230 if (args
->param
< ARRAY_SIZE(reg_map
) && reg_map
[args
->param
]) {
231 args
->value
= V3D_READ(reg_map
[args
->param
]);
235 fprintf(stderr
, "Unknown DRM_IOCTL_VC5_GET_PARAM(%lld)\n",
236 (long long)args
->value
);
240 static struct v3d_hw
*v3d_isr_hw
;
243 v3d_isr(uint32_t hub_status
)
245 struct v3d_hw
*v3d
= v3d_isr_hw
;
247 /* Check the per-core bits */
248 if (hub_status
& (1 << 0)) {
249 uint32_t core_status
= V3D_READ(V3D_CTL_0_INT_STS
);
250 V3D_WRITE(V3D_CTL_0_INT_CLR
, core_status
);
252 if (core_status
& V3D_CTL_0_INT_STS_INT_OUTOMEM_SET
) {
253 uint32_t size
= 256 * 1024;
254 uint32_t offset
= v3d_simulator_get_spill(size
);
258 V3D_WRITE(V3D_PTB_0_BPOA
, offset
);
259 V3D_WRITE(V3D_PTB_0_BPOS
, size
);
263 if (core_status
& V3D_CTL_0_INT_STS_INT_GMPV_SET
) {
264 fprintf(stderr
, "GMP violation at 0x%08x\n",
265 V3D_READ(V3D_GMP_0_VIO_ADDR
));
269 "Unexpected ISR with core status 0x%08x\n",
279 v3dX(simulator_init_regs
)(struct v3d_hw
*v3d
)
281 #if V3D_VERSION == 33
282 /* Set OVRTMUOUT to match kernel behavior.
284 * This means that the texture sampler uniform configuration's tmu
285 * output type field is used, instead of using the hardware default
286 * behavior based on the texture type. If you want the default
287 * behavior, you can still put "2" in the indirect texture state's
290 V3D_WRITE(V3D_CTL_0_MISCCFG
, V3D_CTL_1_MISCCFG_OVRTMUOUT_SET
);
293 uint32_t core_interrupts
= (V3D_CTL_0_INT_STS_INT_GMPV_SET
|
294 V3D_CTL_0_INT_STS_INT_OUTOMEM_SET
);
295 V3D_WRITE(V3D_CTL_0_INT_MSK_SET
, ~core_interrupts
);
296 V3D_WRITE(V3D_CTL_0_INT_MSK_CLR
, core_interrupts
);
299 v3d_hw_set_isr(v3d
, v3d_isr
);
303 v3dX(simulator_submit_cl_ioctl
)(struct v3d_hw
*v3d
,
304 struct drm_v3d_submit_cl
*submit
,
310 v3d_invalidate_caches(v3d
);
313 V3D_WRITE(V3D_CLE_0_CT0QMA
, submit
->qma
);
314 V3D_WRITE(V3D_CLE_0_CT0QMS
, submit
->qms
);
316 #if V3D_VERSION >= 41
318 V3D_WRITE(V3D_CLE_0_CT0QTS
,
319 V3D_CLE_0_CT0QTS_CTQTSEN_SET
|
323 V3D_WRITE(V3D_CLE_0_CT0QBA
, submit
->bcl_start
);
324 V3D_WRITE(V3D_CLE_0_CT0QEA
, submit
->bcl_end
);
326 /* Wait for bin to complete before firing render. The kernel's
327 * scheduler implements this using the GPU scheduler blocking on the
328 * bin fence completing. (We don't use HW semaphores).
330 while (V3D_READ(V3D_CLE_0_CT0CA
) !=
331 V3D_READ(V3D_CLE_0_CT0EA
)) {
335 v3d_invalidate_caches(v3d
);
337 V3D_WRITE(V3D_CLE_0_CT1QBA
, submit
->rcl_start
);
338 V3D_WRITE(V3D_CLE_0_CT1QEA
, submit
->rcl_end
);
340 while (V3D_READ(V3D_CLE_0_CT1CA
) !=
341 V3D_READ(V3D_CLE_0_CT1EA
) ||
342 V3D_READ(V3D_CLE_1_CT1CA
) !=
343 V3D_READ(V3D_CLE_1_CT1EA
)) {
348 #endif /* USE_V3D_SIMULATOR */