kvm: x86: Fix segment registers to make them VMX compatible
[gem5.git] / src / cpu / kvm / x86_cpu.cc
1 /*
2 * Copyright (c) 2013 Andreas Sandberg
3 * All rights reserved
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Andreas Sandberg
29 */
30
31 #include <linux/kvm.h>
32
33 #include <algorithm>
34 #include <cerrno>
35 #include <memory>
36
37 #include "arch/x86/regs/msr.hh"
38 #include "arch/x86/cpuid.hh"
39 #include "arch/x86/utility.hh"
40 #include "arch/registers.hh"
41 #include "cpu/kvm/base.hh"
42 #include "cpu/kvm/x86_cpu.hh"
43 #include "debug/Drain.hh"
44 #include "debug/Kvm.hh"
45 #include "debug/KvmContext.hh"
46 #include "debug/KvmIO.hh"
47 #include "debug/KvmInt.hh"
48
49 using namespace X86ISA;
50
51 #define MSR_TSC 0x10
52
53 #define IO_PCI_CONF_ADDR 0xCF8
54 #define IO_PCI_CONF_DATA_BASE 0xCFC
55
56 // Task segment type of an inactive 32-bit or 64-bit task
57 #define SEG_SYS_TYPE_TSS_AVAILABLE 9
58 // Task segment type of an active 32-bit or 64-bit task
59 #define SEG_SYS_TYPE_TSS_BUSY 11
60
61 // Non-conforming accessed code segment
62 #define SEG_CS_TYPE_ACCESSED 9
63 // Non-conforming accessed code segment that can be read
64 #define SEG_CS_TYPE_READ_ACCESSED 11
65
66 // The lowest bit of the type field for normal segments (code and
67 // data) is used to indicate that a segment has been accessed.
68 #define SEG_TYPE_BIT_ACCESSED 1
69
70
71 #define FOREACH_IREG() \
72 do { \
73 APPLY_IREG(rax, INTREG_RAX); \
74 APPLY_IREG(rbx, INTREG_RBX); \
75 APPLY_IREG(rcx, INTREG_RCX); \
76 APPLY_IREG(rdx, INTREG_RDX); \
77 APPLY_IREG(rsi, INTREG_RSI); \
78 APPLY_IREG(rdi, INTREG_RDI); \
79 APPLY_IREG(rsp, INTREG_RSP); \
80 APPLY_IREG(rbp, INTREG_RBP); \
81 APPLY_IREG(r8, INTREG_R8); \
82 APPLY_IREG(r9, INTREG_R9); \
83 APPLY_IREG(r10, INTREG_R10); \
84 APPLY_IREG(r11, INTREG_R11); \
85 APPLY_IREG(r12, INTREG_R12); \
86 APPLY_IREG(r13, INTREG_R13); \
87 APPLY_IREG(r14, INTREG_R14); \
88 APPLY_IREG(r15, INTREG_R15); \
89 } while(0)
90
91 #define FOREACH_SREG() \
92 do { \
93 APPLY_SREG(cr0, MISCREG_CR0); \
94 APPLY_SREG(cr2, MISCREG_CR2); \
95 APPLY_SREG(cr3, MISCREG_CR3); \
96 APPLY_SREG(cr4, MISCREG_CR4); \
97 APPLY_SREG(cr8, MISCREG_CR8); \
98 APPLY_SREG(efer, MISCREG_EFER); \
99 APPLY_SREG(apic_base, MISCREG_APIC_BASE); \
100 } while(0)
101
102 #define FOREACH_DREG() \
103 do { \
104 APPLY_DREG(db[0], MISCREG_DR0); \
105 APPLY_DREG(db[1], MISCREG_DR1); \
106 APPLY_DREG(db[2], MISCREG_DR2); \
107 APPLY_DREG(db[3], MISCREG_DR3); \
108 APPLY_DREG(dr6, MISCREG_DR6); \
109 APPLY_DREG(dr7, MISCREG_DR7); \
110 } while(0)
111
112 #define FOREACH_SEGMENT() \
113 do { \
114 APPLY_SEGMENT(cs, MISCREG_CS - MISCREG_SEG_SEL_BASE); \
115 APPLY_SEGMENT(ds, MISCREG_DS - MISCREG_SEG_SEL_BASE); \
116 APPLY_SEGMENT(es, MISCREG_ES - MISCREG_SEG_SEL_BASE); \
117 APPLY_SEGMENT(fs, MISCREG_FS - MISCREG_SEG_SEL_BASE); \
118 APPLY_SEGMENT(gs, MISCREG_GS - MISCREG_SEG_SEL_BASE); \
119 APPLY_SEGMENT(ss, MISCREG_SS - MISCREG_SEG_SEL_BASE); \
120 APPLY_SEGMENT(tr, MISCREG_TR - MISCREG_SEG_SEL_BASE); \
121 APPLY_SEGMENT(ldt, MISCREG_TSL - MISCREG_SEG_SEL_BASE); \
122 } while(0)
123
124 #define FOREACH_DTABLE() \
125 do { \
126 APPLY_DTABLE(gdt, MISCREG_TSG - MISCREG_SEG_SEL_BASE); \
127 APPLY_DTABLE(idt, MISCREG_IDTR - MISCREG_SEG_SEL_BASE); \
128 } while(0)
129
130 template<typename STRUCT, typename ENTRY>
131 static STRUCT *newVarStruct(size_t entries)
132 {
133 return (STRUCT *)operator new(sizeof(STRUCT) + entries * sizeof(ENTRY));
134 }
135
136 static void
137 dumpKvm(const struct kvm_regs &regs)
138 {
139 inform("KVM register state:\n");
140
141 #define APPLY_IREG(kreg, mreg) \
142 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
143
144 FOREACH_IREG();
145
146 #undef APPLY_IREG
147
148 inform("\trip: 0x%llx\n", regs.rip);
149 inform("\trflags: 0x%llx\n", regs.rflags);
150 }
151
152 static void
153 dumpKvm(const char *reg_name, const struct kvm_segment &seg)
154 {
155 inform("\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
156 "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, unus.: %u\n",
157 reg_name,
158 seg.base, seg.limit, seg.selector, seg.type,
159 seg.present, seg.dpl, seg.db, seg.s, seg.l, seg.g, seg.avl, seg.unusable);
160 }
161
162 static void
163 dumpKvm(const char *reg_name, const struct kvm_dtable &dtable)
164 {
165 inform("\t%s: @0x%llx+%x\n",
166 reg_name, dtable.base, dtable.limit);
167 }
168
169 static void
170 dumpKvm(const struct kvm_sregs &sregs)
171 {
172 #define APPLY_SREG(kreg, mreg) \
173 inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
174 #define APPLY_SEGMENT(kreg, idx) \
175 dumpKvm(# kreg, sregs.kreg);
176 #define APPLY_DTABLE(kreg, idx) \
177 dumpKvm(# kreg, sregs.kreg);
178
179 inform("Special registers:\n");
180 FOREACH_SEGMENT();
181 FOREACH_SREG();
182 FOREACH_DTABLE();
183
184 inform("Interrupt Bitmap:");
185 for (int i = 0; i < KVM_NR_INTERRUPTS; i += 64)
186 inform(" 0x%.8x", sregs.interrupt_bitmap[i / 64]);
187
188 #undef APPLY_SREG
189 #undef APPLY_SEGMENT
190 #undef APPLY_DTABLE
191 }
192
193 #ifdef KVM_GET_DEBUGREGS
194 static void
195 dumpKvm(const struct kvm_debugregs &regs)
196 {
197 inform("KVM debug state:\n");
198
199 #define APPLY_DREG(kreg, mreg) \
200 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
201
202 FOREACH_DREG();
203
204 #undef APPLY_DREG
205
206 inform("\tflags: 0x%llx\n", regs.flags);
207 }
208 #endif
209
210 static void
211 dumpKvm(const struct kvm_fpu &fpu)
212 {
213 inform("FPU registers:\n");
214 inform("\tfcw: 0x%x\n", fpu.fcw);
215 inform("\tfsw: 0x%x\n", fpu.fsw);
216 inform("\tftwx: 0x%x\n", fpu.ftwx);
217 inform("\tlast_opcode: 0x%x\n", fpu.last_opcode);
218 inform("\tlast_ip: 0x%x\n", fpu.last_ip);
219 inform("\tlast_dp: 0x%x\n", fpu.last_dp);
220 inform("\tmxcsr: 0x%x\n", fpu.mxcsr);
221 inform("\tFP Stack:\n");
222 for (int i = 0; i < 8; ++i) {
223 const bool empty(!((fpu.ftwx >> i) & 0x1));
224 char hex[33];
225 for (int j = 0; j < 16; ++j)
226 snprintf(&hex[j*2], 3, "%.2x", fpu.fpr[i][j]);
227 inform("\t\t%i: 0x%s%s\n", i, hex, empty ? " (e)" : "");
228 }
229 inform("\tXMM registers:\n");
230 for (int i = 0; i < 16; ++i) {
231 char hex[33];
232 for (int j = 0; j < 16; ++j)
233 snprintf(&hex[j*2], 3, "%.2x", fpu.xmm[i][j]);
234 inform("\t\t%i: 0x%s\n", i, hex);
235 }
236 }
237
238 static void
239 dumpKvm(const struct kvm_msrs &msrs)
240 {
241 inform("MSRs:\n");
242
243 for (int i = 0; i < msrs.nmsrs; ++i) {
244 const struct kvm_msr_entry &e(msrs.entries[i]);
245
246 inform("\t0x%x: 0x%x\n", e.index, e.data);
247 }
248 }
249
250 static void
251 dumpKvm(const struct kvm_xcrs &regs)
252 {
253 inform("KVM XCR registers:\n");
254
255 inform("\tFlags: 0x%x\n", regs.flags);
256 for (int i = 0; i < regs.nr_xcrs; ++i) {
257 inform("\tXCR[0x%x]: 0x%x\n",
258 regs.xcrs[i].xcr,
259 regs.xcrs[i].value);
260 }
261 }
262
263 static void
264 dumpKvm(const struct kvm_xsave &xsave)
265 {
266 inform("KVM XSAVE:\n");
267
268 Trace::dump((Tick)-1, "xsave.region",
269 xsave.region, sizeof(xsave.region));
270 }
271
272 static void
273 dumpKvm(const struct kvm_vcpu_events &events)
274 {
275 inform("vCPU events:\n");
276
277 inform("\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
278 events.exception.injected, events.exception.nr,
279 events.exception.has_error_code, events.exception.error_code);
280
281 inform("\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
282 events.interrupt.injected, events.interrupt.nr,
283 events.interrupt.soft);
284
285 inform("\tNMI: [inj: %i, pending: %i, masked: %i]\n",
286 events.nmi.injected, events.nmi.pending,
287 events.nmi.masked);
288
289 inform("\tSIPI vector: 0x%x\n", events.sipi_vector);
290 inform("\tFlags: 0x%x\n", events.flags);
291 }
292
293 static bool
294 isCanonicalAddress(uint64_t addr)
295 {
296 // x86-64 doesn't currently use the full 64-bit virtual address
297 // space, instead it uses signed 48 bit addresses that are
298 // sign-extended to 64 bits. Such addresses are known as
299 // "canonical".
300 uint64_t upper_half(addr & 0xffff800000000000ULL);
301 return upper_half == 0 || upper_half == 0xffff800000000000;
302 }
303
304 static void
305 checkSeg(const char *name, const int idx, const struct kvm_segment &seg,
306 struct kvm_sregs sregs)
307 {
308 // Check the register base
309 switch (idx) {
310 case MISCREG_TSL:
311 case MISCREG_TR:
312 case MISCREG_FS:
313 case MISCREG_GS:
314 if (!isCanonicalAddress(seg.base))
315 warn("Illegal %s base: 0x%x\n", name, seg.base);
316 break;
317
318 case MISCREG_SS:
319 case MISCREG_DS:
320 case MISCREG_ES:
321 if (seg.unusable)
322 break;
323 case MISCREG_CS:
324 if (seg.base & 0xffffffff00000000ULL)
325 warn("Illegal %s base: 0x%x\n", name, seg.base);
326 break;
327 }
328
329 // Check the type
330 switch (idx) {
331 case MISCREG_CS:
332 switch (seg.type) {
333 case 3:
334 if (seg.dpl != 0)
335 warn("CS type is 3 but dpl != 0.\n");
336 break;
337 case 9:
338 case 11:
339 if (seg.dpl != sregs.ss.dpl)
340 warn("CS type is %i but CS DPL != SS DPL\n", seg.type);
341 break;
342 case 13:
343 case 15:
344 if (seg.dpl > sregs.ss.dpl)
345 warn("CS type is %i but CS DPL > SS DPL\n", seg.type);
346 break;
347 default:
348 warn("Illegal CS type: %i\n", seg.type);
349 break;
350 }
351 break;
352
353 case MISCREG_SS:
354 if (seg.unusable)
355 break;
356 switch (seg.type) {
357 case 3:
358 if (sregs.cs.type == 3 && seg.dpl != 0)
359 warn("CS type is 3, but SS DPL is != 0.\n");
360 /* FALLTHROUGH */
361 case 7:
362 if (!(sregs.cr0 & 1) && seg.dpl != 0)
363 warn("SS DPL is %i, but CR0 PE is 0\n", seg.dpl);
364 break;
365 default:
366 warn("Illegal SS type: %i\n", seg.type);
367 break;
368 }
369 break;
370
371 case MISCREG_DS:
372 case MISCREG_ES:
373 case MISCREG_FS:
374 case MISCREG_GS:
375 if (seg.unusable)
376 break;
377 if (!(seg.type & 0x1) ||
378 ((seg.type & 0x8) && !(seg.type & 0x2)))
379 warn("%s has an illegal type field: %i\n", name, seg.type);
380 break;
381
382 case MISCREG_TR:
383 // TODO: We should check the CPU mode
384 if (seg.type != 3 && seg.type != 11)
385 warn("%s: Illegal segment type (%i)\n", name, seg.type);
386 break;
387
388 case MISCREG_TSL:
389 if (seg.unusable)
390 break;
391 if (seg.type != 2)
392 warn("%s: Illegal segment type (%i)\n", name, seg.type);
393 break;
394 }
395
396 switch (idx) {
397 case MISCREG_SS:
398 case MISCREG_DS:
399 case MISCREG_ES:
400 case MISCREG_FS:
401 case MISCREG_GS:
402 if (seg.unusable)
403 break;
404 case MISCREG_CS:
405 if (!seg.s)
406 warn("%s: S flag not set\n", name);
407 break;
408
409 case MISCREG_TSL:
410 if (seg.unusable)
411 break;
412 case MISCREG_TR:
413 if (seg.s)
414 warn("%s: S flag is set\n", name);
415 break;
416 }
417
418 switch (idx) {
419 case MISCREG_SS:
420 case MISCREG_DS:
421 case MISCREG_ES:
422 case MISCREG_FS:
423 case MISCREG_GS:
424 case MISCREG_TSL:
425 if (seg.unusable)
426 break;
427 case MISCREG_TR:
428 case MISCREG_CS:
429 if (!seg.present)
430 warn("%s: P flag not set\n", name);
431
432 if (((seg.limit & 0xFFF) == 0 && seg.g) ||
433 ((seg.limit & 0xFFF00000) != 0 && !seg.g)) {
434 warn("%s limit (0x%x) and g (%i) combination is illegal.\n",
435 name, seg.limit, seg.g);
436 }
437 break;
438 }
439
440 // TODO: Check CS DB
441 }
442
443 X86KvmCPU::X86KvmCPU(X86KvmCPUParams *params)
444 : BaseKvmCPU(params)
445 {
446 Kvm &kvm(vm.kvm);
447
448 if (!kvm.capSetTSSAddress())
449 panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
450 if (!kvm.capExtendedCPUID())
451 panic("KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
452 if (!kvm.capUserNMI())
453 warn("KVM: Missing capability (KVM_CAP_USER_NMI)\n");
454 if (!kvm.capVCPUEvents())
455 warn("KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
456
457 haveDebugRegs = kvm.capDebugRegs();
458 haveXSave = kvm.capXSave();
459 haveXCRs = kvm.capXCRs();
460 }
461
462 X86KvmCPU::~X86KvmCPU()
463 {
464 }
465
466 void
467 X86KvmCPU::startup()
468 {
469 BaseKvmCPU::startup();
470
471 updateCPUID();
472
473 io_req.setThreadContext(tc->contextId(), 0);
474
475 // TODO: Do we need to create an identity mapped TSS area? We
476 // should call kvm.vm.setTSSAddress() here in that case. It should
477 // only be needed for old versions of the virtualization
478 // extensions. We should make sure that the identity range is
479 // reserved in the e820 memory map in that case.
480 }
481
482 void
483 X86KvmCPU::dump()
484 {
485 dumpIntRegs();
486 dumpFpuRegs();
487 dumpSpecRegs();
488 dumpDebugRegs();
489 dumpXCRs();
490 dumpVCpuEvents();
491 dumpMSRs();
492 dumpXSave();
493 }
494
495 void
496 X86KvmCPU::dumpFpuRegs() const
497 {
498 struct kvm_fpu fpu;
499 getFPUState(fpu);
500 dumpKvm(fpu);
501 }
502
503 void
504 X86KvmCPU::dumpIntRegs() const
505 {
506 struct kvm_regs regs;
507 getRegisters(regs);
508 dumpKvm(regs);
509 }
510
511 void
512 X86KvmCPU::dumpSpecRegs() const
513 {
514 struct kvm_sregs sregs;
515 getSpecialRegisters(sregs);
516 dumpKvm(sregs);
517 }
518
519 void
520 X86KvmCPU::dumpDebugRegs() const
521 {
522 if (haveDebugRegs) {
523 #ifdef KVM_GET_DEBUGREGS
524 struct kvm_debugregs dregs;
525 getDebugRegisters(dregs);
526 dumpKvm(dregs);
527 #endif
528 } else {
529 inform("Debug registers not supported by kernel.\n");
530 }
531 }
532
533 void
534 X86KvmCPU::dumpXCRs() const
535 {
536 if (haveXCRs) {
537 struct kvm_xcrs xcrs;
538 getXCRs(xcrs);
539 dumpKvm(xcrs);
540 } else {
541 inform("XCRs not supported by kernel.\n");
542 }
543 }
544
545 void
546 X86KvmCPU::dumpXSave() const
547 {
548 if (haveXSave) {
549 struct kvm_xsave xsave;
550 getXSave(xsave);
551 dumpKvm(xsave);
552 } else {
553 inform("XSave not supported by kernel.\n");
554 }
555 }
556
557 void
558 X86KvmCPU::dumpVCpuEvents() const
559 {
560 struct kvm_vcpu_events events;
561 getVCpuEvents(events);
562 dumpKvm(events);
563 }
564
565 void
566 X86KvmCPU::dumpMSRs() const
567 {
568 const Kvm::MSRIndexVector &supported_msrs(vm.kvm.getSupportedMSRs());
569 std::unique_ptr<struct kvm_msrs> msrs(
570 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
571 supported_msrs.size()));
572
573 msrs->nmsrs = supported_msrs.size();
574 for (int i = 0; i < supported_msrs.size(); ++i) {
575 struct kvm_msr_entry &e(msrs->entries[i]);
576 e.index = supported_msrs[i];
577 e.reserved = 0;
578 e.data = 0;
579 }
580 getMSRs(*msrs.get());
581
582 dumpKvm(*msrs.get());
583 }
584
585 void
586 X86KvmCPU::updateKvmState()
587 {
588 updateKvmStateRegs();
589 updateKvmStateSRegs();
590 updateKvmStateFPU();
591 updateKvmStateMSRs();
592
593 DPRINTF(KvmContext, "X86KvmCPU::updateKvmState():\n");
594 if (DTRACE(KvmContext))
595 dump();
596 }
597
598 void
599 X86KvmCPU::updateKvmStateRegs()
600 {
601 struct kvm_regs regs;
602
603 #define APPLY_IREG(kreg, mreg) regs.kreg = tc->readIntReg(mreg)
604 FOREACH_IREG();
605 #undef APPLY_IREG
606
607 regs.rip = tc->instAddr();
608
609 /* You might think that setting regs.rflags to the contents
610 * MISCREG_RFLAGS here would suffice. In that case you're
611 * mistaken. We need to reconstruct it from a bunch of ucode
612 * registers and wave a dead chicken over it (aka mask out and set
613 * reserved bits) to get it to work.
614 */
615 regs.rflags = X86ISA::getRFlags(tc);
616
617 setRegisters(regs);
618 }
619
620 static inline void
621 setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg,
622 const int index)
623 {
624 SegAttr attr(tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(index)));
625
626 kvm_seg.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index));
627 kvm_seg.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index));
628 kvm_seg.selector = tc->readMiscRegNoEffect(MISCREG_SEG_SEL(index));
629 kvm_seg.type = attr.type;
630 kvm_seg.present = attr.present;
631 kvm_seg.dpl = attr.dpl;
632 kvm_seg.db = attr.defaultSize;
633 kvm_seg.s = attr.system;
634 kvm_seg.l = attr.longMode;
635 kvm_seg.g = attr.granularity;
636 kvm_seg.avl = attr.avl;
637
638 // A segment is unusable when the selector is zero. There is a
639 // attr.unusable flag in gem5, but it seems unused.
640 //
641 // TODO: Are there corner cases where this doesn't work?
642 kvm_seg.unusable = (kvm_seg.selector == 0);
643 }
644
645 static inline void
646 setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable,
647 const int index)
648 {
649 kvm_dtable.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index));
650 kvm_dtable.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index));
651 }
652
653 static void
654 forceSegAccessed(struct kvm_segment &seg)
655 {
656 // Intel's VMX requires that (some) usable segments are flagged as
657 // 'accessed' (i.e., the lowest bit in the segment type is set)
658 // when entering VMX. This wouldn't necessary be the case even if
659 // gem5 did set the access bits correctly, so we force it to one
660 // in that case.
661 if (!seg.unusable)
662 seg.type |= SEG_TYPE_BIT_ACCESSED;
663 }
664
665 void
666 X86KvmCPU::updateKvmStateSRegs()
667 {
668 struct kvm_sregs sregs;
669
670 #define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
671 #define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
672 #define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
673
674 FOREACH_SREG();
675 FOREACH_SEGMENT();
676 FOREACH_DTABLE();
677
678 #undef APPLY_SREG
679 #undef APPLY_SEGMENT
680 #undef APPLY_DTABLE
681
682 // Clear the interrupt bitmap
683 memset(&sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
684
685 // VMX requires CS, SS, DS, ES, FS, and GS to have the accessed
686 // bit in the type field set.
687 forceSegAccessed(sregs.cs);
688 forceSegAccessed(sregs.ss);
689 forceSegAccessed(sregs.ds);
690 forceSegAccessed(sregs.es);
691 forceSegAccessed(sregs.fs);
692 forceSegAccessed(sregs.gs);
693
694 // There are currently some cases where the active task isn't
695 // marked as busy. This is illegal in VMX, so we force it to busy.
696 if (sregs.tr.type == SEG_SYS_TYPE_TSS_AVAILABLE) {
697 hack("tr.type (%i) is not busy. Forcing the busy bit.\n",
698 sregs.tr.type);
699 sregs.tr.type = SEG_SYS_TYPE_TSS_BUSY;
700 }
701
702 // VMX requires the DPL of SS and CS to be the same for
703 // non-conforming code segments. It seems like m5 doesn't set the
704 // DPL of SS correctly when taking interrupts, so we need to fix
705 // that here.
706 if ((sregs.cs.type == SEG_CS_TYPE_ACCESSED ||
707 sregs.cs.type == SEG_CS_TYPE_READ_ACCESSED) &&
708 sregs.cs.dpl != sregs.ss.dpl) {
709
710 hack("CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
711 sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl);
712 sregs.ss.dpl = sregs.cs.dpl;
713 }
714
715 // Do checks after fixing up the state to avoid getting excessive
716 // amounts of warnings.
717 RFLAGS rflags_nocc(tc->readMiscReg(MISCREG_RFLAGS));
718 if (!rflags_nocc.vm) {
719 // Do segment verification if the CPU isn't entering virtual
720 // 8086 mode. We currently assume that unrestricted guest
721 // mode is available.
722
723 #define APPLY_SEGMENT(kreg, idx) \
724 checkSeg(# kreg, idx + MISCREG_SEG_SEL_BASE, sregs.kreg, sregs)
725
726 FOREACH_SEGMENT();
727 #undef APPLY_SEGMENT
728 }
729
730 setSpecialRegisters(sregs);
731 }
732 void
733 X86KvmCPU::updateKvmStateFPU()
734 {
735 warn_once("X86KvmCPU::updateKvmStateFPU not implemented\n");
736 }
737
738 void
739 X86KvmCPU::updateKvmStateMSRs()
740 {
741 KvmMSRVector msrs;
742
743 const Kvm::MSRIndexVector &indices(getMsrIntersection());
744
745 for (auto it = indices.cbegin(); it != indices.cend(); ++it) {
746 struct kvm_msr_entry e;
747
748 e.index = *it;
749 e.reserved = 0;
750 e.data = tc->readMiscReg(msrMap.at(*it));
751 DPRINTF(KvmContext, "Adding MSR: idx: 0x%x, data: 0x%x\n",
752 e.index, e.data);
753
754 msrs.push_back(e);
755 }
756
757 setMSRs(msrs);
758 }
759
760 void
761 X86KvmCPU::updateThreadContext()
762 {
763 DPRINTF(KvmContext, "X86KvmCPU::updateThreadContext():\n");
764 if (DTRACE(KvmContext))
765 dump();
766
767 updateThreadContextRegs();
768 updateThreadContextSRegs();
769 updateThreadContextFPU();
770 updateThreadContextMSRs();
771
772 // The M5 misc reg caches some values from other
773 // registers. Writing to it with side effects causes it to be
774 // updated from its source registers.
775 tc->setMiscReg(MISCREG_M5_REG, 0);
776 }
777
778 void
779 X86KvmCPU::updateThreadContextRegs()
780 {
781 struct kvm_regs regs;
782 getRegisters(regs);
783
784 #define APPLY_IREG(kreg, mreg) tc->setIntReg(mreg, regs.kreg)
785
786 FOREACH_IREG();
787
788 #undef APPLY_IREG
789
790 tc->pcState(PCState(regs.rip));
791
792 // Flags are spread out across multiple semi-magic registers so we
793 // need some special care when updating them.
794 X86ISA::setRFlags(tc, regs.rflags);
795 }
796
797
798 inline void
799 setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg,
800 const int index)
801 {
802 SegAttr attr(0);
803
804 attr.type = kvm_seg.type;
805 attr.present = kvm_seg.present;
806 attr.dpl = kvm_seg.dpl;
807 attr.defaultSize = kvm_seg.db;
808 attr.system = kvm_seg.s;
809 attr.longMode = kvm_seg.l;
810 attr.granularity = kvm_seg.g;
811 attr.avl = kvm_seg.avl;
812 attr.unusable = kvm_seg.unusable;
813
814 // We need some setMiscReg magic here to keep the effective base
815 // addresses in sync. We need an up-to-date version of EFER, so
816 // make sure this is called after the sregs have been synced.
817 tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_seg.base);
818 tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_seg.limit);
819 tc->setMiscReg(MISCREG_SEG_SEL(index), kvm_seg.selector);
820 tc->setMiscReg(MISCREG_SEG_ATTR(index), attr);
821 }
822
823 inline void
824 setContextSegment(ThreadContext *tc, const struct kvm_dtable &kvm_dtable,
825 const int index)
826 {
827 // We need some setMiscReg magic here to keep the effective base
828 // addresses in sync. We need an up-to-date version of EFER, so
829 // make sure this is called after the sregs have been synced.
830 tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_dtable.base);
831 tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_dtable.limit);
832 }
833
834 void
835 X86KvmCPU::updateThreadContextSRegs()
836 {
837 struct kvm_sregs sregs;
838 getSpecialRegisters(sregs);
839
840 assert(getKvmRunState()->apic_base == sregs.apic_base);
841 assert(getKvmRunState()->cr8 == sregs.cr8);
842
843 #define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
844 #define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
845 #define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
846 FOREACH_SREG();
847 FOREACH_SEGMENT();
848 FOREACH_DTABLE();
849 #undef APPLY_SREG
850 #undef APPLY_SEGMENT
851 #undef APPLY_DTABLE
852 }
853
854 void
855 X86KvmCPU::updateThreadContextFPU()
856 {
857 warn_once("X86KvmCPU::updateThreadContextFPU not implemented\n");
858 }
859
860 void
861 X86KvmCPU::updateThreadContextMSRs()
862 {
863 const Kvm::MSRIndexVector &msrs(getMsrIntersection());
864
865 std::unique_ptr<struct kvm_msrs> kvm_msrs(
866 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size()));
867 struct kvm_msr_entry *entry;
868
869 // Create a list of MSRs to read
870 kvm_msrs->nmsrs = msrs.size();
871 entry = &kvm_msrs->entries[0];
872 for (auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) {
873 entry->index = *it;
874 entry->reserved = 0;
875 entry->data = 0;
876 }
877
878 getMSRs(*kvm_msrs.get());
879
880 // Update M5's state
881 entry = &kvm_msrs->entries[0];
882 for (int i = 0; i < kvm_msrs->nmsrs; ++i, ++entry) {
883 DPRINTF(KvmContext, "Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
884 entry->index, entry->data);
885
886 tc->setMiscReg(X86ISA::msrMap.at(entry->index), entry->data);
887 }
888 }
889
890 void
891 X86KvmCPU::deliverInterrupts()
892 {
893 syncThreadContext();
894
895 Fault fault(interrupts->getInterrupt(tc));
896 interrupts->updateIntrInfo(tc);
897
898 X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
899 if (x86int) {
900 struct kvm_interrupt kvm_int;
901 kvm_int.irq = x86int->getVector();
902
903 DPRINTF(KvmInt, "Delivering interrupt: %s (%u)\n",
904 fault->name(), kvm_int.irq);
905
906 kvmInterrupt(kvm_int);
907 } else if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
908 DPRINTF(KvmInt, "Delivering NMI\n");
909 kvmNonMaskableInterrupt();
910 } else {
911 panic("KVM: Unknown interrupt type\n");
912 }
913
914 }
915
916 Tick
917 X86KvmCPU::kvmRun(Tick ticks)
918 {
919 struct kvm_run &kvm_run(*getKvmRunState());
920
921 if (interrupts->checkInterruptsRaw()) {
922 if (kvm_run.ready_for_interrupt_injection) {
923 // KVM claims that it is ready for an interrupt. It might
924 // be lying if we just updated rflags and disabled
925 // interrupts (e.g., by doing a CPU handover). Let's sync
926 // the thread context and check if there are /really/
927 // interrupts that should be delivered now.
928 syncThreadContext();
929 if (interrupts->checkInterrupts(tc)) {
930 DPRINTF(KvmInt,
931 "M5 has pending interrupts, delivering interrupt.\n");
932
933 deliverInterrupts();
934 } else {
935 DPRINTF(KvmInt,
936 "Interrupt delivery delayed due to KVM confusion.\n");
937 kvm_run.request_interrupt_window = 1;
938 }
939 } else if (!kvm_run.request_interrupt_window) {
940 DPRINTF(KvmInt,
941 "M5 has pending interrupts, requesting interrupt "
942 "window.\n");
943 kvm_run.request_interrupt_window = 1;
944 }
945 } else {
946 kvm_run.request_interrupt_window = 0;
947 }
948
949 return kvmRunWrapper(ticks);
950 }
951
952 Tick
953 X86KvmCPU::kvmRunDrain()
954 {
955 struct kvm_run &kvm_run(*getKvmRunState());
956
957 if (!archIsDrained()) {
958 DPRINTF(Drain, "kvmRunDrain: Architecture code isn't drained\n");
959
960 // Tell KVM to find a suitable place to deliver interrupts. This
961 // should ensure that pending interrupts have been delivered and
962 // things are reasonably consistent (i.e., no interrupts pending
963 // in the guest).
964 kvm_run.request_interrupt_window = 1;
965
966 // Limit the run to 1 millisecond. That is hopefully enough to
967 // reach an interrupt window. Otherwise, we'll just try again
968 // later.
969 return kvmRunWrapper(1 * SimClock::Float::ms);
970 } else {
971 DPRINTF(Drain, "kvmRunDrain: Delivering pending IO\n");
972
973 return kvmRunWrapper(0);
974 }
975 }
976
977 Tick
978 X86KvmCPU::kvmRunWrapper(Tick ticks)
979 {
980 struct kvm_run &kvm_run(*getKvmRunState());
981
982 // Synchronize the APIC base and CR8 here since they are present
983 // in the kvm_run struct, which makes the synchronization really
984 // cheap.
985 kvm_run.apic_base = tc->readMiscReg(MISCREG_APIC_BASE);
986 kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8);
987
988 const Tick run_ticks(BaseKvmCPU::kvmRun(ticks));
989
990 tc->setMiscReg(MISCREG_APIC_BASE, kvm_run.apic_base);
991 kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8);
992
993 return run_ticks;
994 }
995
996 uint64_t
997 X86KvmCPU::getHostCycles() const
998 {
999 return getMSR(MSR_TSC);
1000 }
1001
1002 void
1003 X86KvmCPU::handleIOMiscReg32(int miscreg)
1004 {
1005 struct kvm_run &kvm_run(*getKvmRunState());
1006 const uint16_t port(kvm_run.io.port);
1007
1008 assert(kvm_run.exit_reason == KVM_EXIT_IO);
1009
1010 if (kvm_run.io.size != 4) {
1011 panic("Unexpected IO size (%u) for address 0x%x.\n",
1012 kvm_run.io.size, port);
1013 }
1014
1015 if (kvm_run.io.count != 1) {
1016 panic("Unexpected IO count (%u) for address 0x%x.\n",
1017 kvm_run.io.count, port);
1018 }
1019
1020 uint32_t *data((uint32_t *)getGuestData(kvm_run.io.data_offset));
1021 if (kvm_run.io.direction == KVM_EXIT_IO_OUT)
1022 tc->setMiscReg(miscreg, *data);
1023 else
1024 *data = tc->readMiscRegNoEffect(miscreg);
1025 }
1026
1027 Tick
1028 X86KvmCPU::handleKvmExitIO()
1029 {
1030 struct kvm_run &kvm_run(*getKvmRunState());
1031 bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT);
1032 unsigned char *guestData(getGuestData(kvm_run.io.data_offset));
1033 Tick delay(0);
1034 uint16_t port(kvm_run.io.port);
1035 Addr pAddr;
1036 const int count(kvm_run.io.count);
1037
1038 assert(kvm_run.io.direction == KVM_EXIT_IO_IN ||
1039 kvm_run.io.direction == KVM_EXIT_IO_OUT);
1040
1041 DPRINTF(KvmIO, "KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1042 (isWrite ? "out" : "in"), kvm_run.io.port);
1043
1044 /* Vanilla gem5 handles PCI discovery in the TLB(!). Since we
1045 * don't use the TLB component, we need to intercept and handle
1046 * the PCI configuration space IO ports here.
1047 *
1048 * The IO port PCI discovery mechanism uses one address register
1049 * and one data register. We map the address register to a misc
1050 * reg and use that to re-route data register accesses to the
1051 * right location in the PCI configuration space.
1052 */
1053 if (port == IO_PCI_CONF_ADDR) {
1054 handleIOMiscReg32(MISCREG_PCI_CONFIG_ADDRESS);
1055 return 0;
1056 } else if ((port & ~0x3) == IO_PCI_CONF_DATA_BASE) {
1057 Addr pciConfigAddr(tc->readMiscRegNoEffect(MISCREG_PCI_CONFIG_ADDRESS));
1058 if (pciConfigAddr & 0x80000000) {
1059 pAddr = X86ISA::x86PciConfigAddress((pciConfigAddr & 0x7ffffffc) |
1060 (port & 0x3));
1061 } else {
1062 pAddr = X86ISA::x86IOAddress(port);
1063 }
1064 } else {
1065 pAddr = X86ISA::x86IOAddress(port);
1066 }
1067
1068 io_req.setPhys(pAddr, kvm_run.io.size, Request::UNCACHEABLE,
1069 dataMasterId());
1070
1071 const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
1072 for (int i = 0; i < count; ++i) {
1073 Packet pkt(&io_req, cmd);
1074
1075 pkt.dataStatic(guestData);
1076 delay += dataPort.sendAtomic(&pkt);
1077
1078 guestData += kvm_run.io.size;
1079 }
1080
1081 return delay;
1082 }
1083
1084 Tick
1085 X86KvmCPU::handleKvmExitIRQWindowOpen()
1086 {
1087 // We don't need to do anything here since this is caught the next
1088 // time we execute kvmRun(). We still overload the exit event to
1089 // silence the warning about an unhandled exit event.
1090 return 0;
1091 }
1092
1093 bool
1094 X86KvmCPU::archIsDrained() const
1095 {
1096 struct kvm_vcpu_events events;
1097
1098 getVCpuEvents(events);
1099
1100 // We could probably handle this in a by re-inserting interrupts
1101 // that are pending into gem5 on a drain. However, that would
1102 // probably be tricky to do reliably, so we'll just prevent a
1103 // drain if there is anything pending in the
1104 // guest. X86KvmCPU::kvmRunDrain() minimizes the amount of code
1105 // executed in the guest by requesting an interrupt window if
1106 // there are pending interrupts.
1107 const bool pending_events(events.exception.injected ||
1108 events.interrupt.injected ||
1109 events.nmi.injected || events.nmi.pending);
1110
1111 if (pending_events) {
1112 DPRINTF(Drain, "archIsDrained: Pending events: %s %s %s %s\n",
1113 events.exception.injected ? "exception" : "",
1114 events.interrupt.injected ? "interrupt" : "",
1115 events.nmi.injected ? "nmi[i]" : "",
1116 events.nmi.pending ? "nmi[p]" : "");
1117 }
1118
1119 return !pending_events;
1120 }
1121
1122 static struct kvm_cpuid_entry2
1123 makeKvmCpuid(uint32_t function, uint32_t index,
1124 CpuidResult &result)
1125 {
1126 struct kvm_cpuid_entry2 e;
1127 e.function = function;
1128 e.index = index;
1129 e.flags = 0;
1130 e.eax = (uint32_t)result.rax;
1131 e.ebx = (uint32_t)result.rbx;
1132 e.ecx = (uint32_t)result.rcx;
1133 e.edx = (uint32_t)result.rdx;
1134
1135 return e;
1136 }
1137
1138 void
1139 X86KvmCPU::updateCPUID()
1140 {
1141 Kvm::CPUIDVector m5_supported;
1142
1143 /* TODO: We currently don't support any of the functions that
1144 * iterate through data structures in the CPU using an index. It's
1145 * currently not a problem since M5 doesn't expose any of them at
1146 * the moment.
1147 */
1148
1149 /* Basic features */
1150 CpuidResult func0;
1151 X86ISA::doCpuid(tc, 0x0, 0, func0);
1152 for (uint32_t function = 0; function <= func0.rax; ++function) {
1153 CpuidResult cpuid;
1154 uint32_t idx(0);
1155
1156 X86ISA::doCpuid(tc, function, idx, cpuid);
1157 m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1158 }
1159
1160 /* Extended features */
1161 CpuidResult efunc0;
1162 X86ISA::doCpuid(tc, 0x80000000, 0, efunc0);
1163 for (uint32_t function = 0x80000000; function <= efunc0.rax; ++function) {
1164 CpuidResult cpuid;
1165 uint32_t idx(0);
1166
1167 X86ISA::doCpuid(tc, function, idx, cpuid);
1168 m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1169 }
1170
1171 setCPUID(m5_supported);
1172 }
1173
1174 void
1175 X86KvmCPU::setCPUID(const struct kvm_cpuid2 &cpuid)
1176 {
1177 if (ioctl(KVM_SET_CPUID2, (void *)&cpuid) == -1)
1178 panic("KVM: Failed to set guest CPUID2 (errno: %i)\n",
1179 errno);
1180 }
1181
1182 void
1183 X86KvmCPU::setCPUID(const Kvm::CPUIDVector &cpuid)
1184 {
1185 std::unique_ptr<struct kvm_cpuid2> kvm_cpuid(
1186 newVarStruct<struct kvm_cpuid2, struct kvm_cpuid_entry2>(cpuid.size()));
1187
1188 kvm_cpuid->nent = cpuid.size();
1189 std::copy(cpuid.begin(), cpuid.end(), kvm_cpuid->entries);
1190
1191 setCPUID(*kvm_cpuid);
1192 }
1193
1194 void
1195 X86KvmCPU::setMSRs(const struct kvm_msrs &msrs)
1196 {
1197 if (ioctl(KVM_SET_MSRS, (void *)&msrs) == -1)
1198 panic("KVM: Failed to set guest MSRs (errno: %i)\n",
1199 errno);
1200 }
1201
1202 void
1203 X86KvmCPU::setMSRs(const KvmMSRVector &msrs)
1204 {
1205 std::unique_ptr<struct kvm_msrs> kvm_msrs(
1206 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size()));
1207
1208 kvm_msrs->nmsrs = msrs.size();
1209 std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries);
1210
1211 setMSRs(*kvm_msrs);
1212 }
1213
1214 void
1215 X86KvmCPU::getMSRs(struct kvm_msrs &msrs) const
1216 {
1217 if (ioctl(KVM_GET_MSRS, (void *)&msrs) == -1)
1218 panic("KVM: Failed to get guest MSRs (errno: %i)\n",
1219 errno);
1220 }
1221
1222
1223 void
1224 X86KvmCPU::setMSR(uint32_t index, uint64_t value)
1225 {
1226 std::unique_ptr<struct kvm_msrs> kvm_msrs(
1227 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1));
1228 struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1229
1230 kvm_msrs->nmsrs = 1;
1231 entry.index = index;
1232 entry.reserved = 0;
1233 entry.data = value;
1234
1235 setMSRs(*kvm_msrs.get());
1236 }
1237
1238 uint64_t
1239 X86KvmCPU::getMSR(uint32_t index) const
1240 {
1241 std::unique_ptr<struct kvm_msrs> kvm_msrs(
1242 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1));
1243 struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1244
1245 kvm_msrs->nmsrs = 1;
1246 entry.index = index;
1247 entry.reserved = 0;
1248 entry.data = 0;
1249
1250 getMSRs(*kvm_msrs.get());
1251 return entry.data;
1252 }
1253
1254 const Kvm::MSRIndexVector &
1255 X86KvmCPU::getMsrIntersection() const
1256 {
1257 if (cachedMsrIntersection.empty()) {
1258 const Kvm::MSRIndexVector &kvm_msrs(vm.kvm.getSupportedMSRs());
1259
1260 DPRINTF(Kvm, "kvm-x86: Updating MSR intersection\n");
1261 for (auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
1262 if (X86ISA::msrMap.find(*it) != X86ISA::msrMap.end()) {
1263 cachedMsrIntersection.push_back(*it);
1264 DPRINTF(Kvm, "kvm-x86: Adding MSR 0x%x\n", *it);
1265 } else {
1266 warn("kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1267 *it);
1268 }
1269 }
1270 }
1271
1272 return cachedMsrIntersection;
1273 }
1274
1275 void
1276 X86KvmCPU::getDebugRegisters(struct kvm_debugregs &regs) const
1277 {
1278 #ifdef KVM_GET_DEBUGREGS
1279 if (ioctl(KVM_GET_DEBUGREGS, &regs) == -1)
1280 panic("KVM: Failed to get guest debug registers\n");
1281 #else
1282 panic("KVM: Unsupported getDebugRegisters call.\n");
1283 #endif
1284 }
1285
1286 void
1287 X86KvmCPU::setDebugRegisters(const struct kvm_debugregs &regs)
1288 {
1289 #ifdef KVM_SET_DEBUGREGS
1290 if (ioctl(KVM_SET_DEBUGREGS, (void *)&regs) == -1)
1291 panic("KVM: Failed to set guest debug registers\n");
1292 #else
1293 panic("KVM: Unsupported setDebugRegisters call.\n");
1294 #endif
1295 }
1296
1297 void
1298 X86KvmCPU::getXCRs(struct kvm_xcrs &regs) const
1299 {
1300 if (ioctl(KVM_GET_XCRS, &regs) == -1)
1301 panic("KVM: Failed to get guest debug registers\n");
1302 }
1303
1304 void
1305 X86KvmCPU::setXCRs(const struct kvm_xcrs &regs)
1306 {
1307 if (ioctl(KVM_SET_XCRS, (void *)&regs) == -1)
1308 panic("KVM: Failed to set guest debug registers\n");
1309 }
1310
1311 void
1312 X86KvmCPU::getXSave(struct kvm_xsave &xsave) const
1313 {
1314 if (ioctl(KVM_GET_XSAVE, &xsave) == -1)
1315 panic("KVM: Failed to get guest debug registers\n");
1316 }
1317
1318 void
1319 X86KvmCPU::setXSave(const struct kvm_xsave &xsave)
1320 {
1321 if (ioctl(KVM_SET_XSAVE, (void *)&xsave) == -1)
1322 panic("KVM: Failed to set guest debug registers\n");
1323 }
1324
1325
1326 void
1327 X86KvmCPU::getVCpuEvents(struct kvm_vcpu_events &events) const
1328 {
1329 if (ioctl(KVM_GET_VCPU_EVENTS, &events) == -1)
1330 panic("KVM: Failed to get guest debug registers\n");
1331 }
1332
1333 void
1334 X86KvmCPU::setVCpuEvents(const struct kvm_vcpu_events &events)
1335 {
1336 if (ioctl(KVM_SET_VCPU_EVENTS, (void *)&events) == -1)
1337 panic("KVM: Failed to set guest debug registers\n");
1338 }
1339
1340 X86KvmCPU *
1341 X86KvmCPUParams::create()
1342 {
1343 return new X86KvmCPU(this);
1344 }