2 * Copyright (c) 2013 Andreas Sandberg
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Andreas Sandberg
31 #include <linux/kvm.h>
37 #include "arch/x86/regs/msr.hh"
38 #include "arch/x86/cpuid.hh"
39 #include "arch/x86/utility.hh"
40 #include "arch/registers.hh"
41 #include "cpu/kvm/base.hh"
42 #include "cpu/kvm/x86_cpu.hh"
43 #include "debug/Drain.hh"
44 #include "debug/Kvm.hh"
45 #include "debug/KvmContext.hh"
46 #include "debug/KvmIO.hh"
47 #include "debug/KvmInt.hh"
49 using namespace X86ISA
;
53 #define IO_PCI_CONF_ADDR 0xCF8
54 #define IO_PCI_CONF_DATA_BASE 0xCFC
56 // Task segment type of an inactive 32-bit or 64-bit task
57 #define SEG_SYS_TYPE_TSS_AVAILABLE 9
58 // Task segment type of an active 32-bit or 64-bit task
59 #define SEG_SYS_TYPE_TSS_BUSY 11
61 // Non-conforming accessed code segment
62 #define SEG_CS_TYPE_ACCESSED 9
63 // Non-conforming accessed code segment that can be read
64 #define SEG_CS_TYPE_READ_ACCESSED 11
66 // The lowest bit of the type field for normal segments (code and
67 // data) is used to indicate that a segment has been accessed.
68 #define SEG_TYPE_BIT_ACCESSED 1
98 uint64_t reserved
[12];
101 static_assert(sizeof(FXSave
) == 512, "Unexpected size of FXSave");
103 #define FOREACH_IREG() \
105 APPLY_IREG(rax, INTREG_RAX); \
106 APPLY_IREG(rbx, INTREG_RBX); \
107 APPLY_IREG(rcx, INTREG_RCX); \
108 APPLY_IREG(rdx, INTREG_RDX); \
109 APPLY_IREG(rsi, INTREG_RSI); \
110 APPLY_IREG(rdi, INTREG_RDI); \
111 APPLY_IREG(rsp, INTREG_RSP); \
112 APPLY_IREG(rbp, INTREG_RBP); \
113 APPLY_IREG(r8, INTREG_R8); \
114 APPLY_IREG(r9, INTREG_R9); \
115 APPLY_IREG(r10, INTREG_R10); \
116 APPLY_IREG(r11, INTREG_R11); \
117 APPLY_IREG(r12, INTREG_R12); \
118 APPLY_IREG(r13, INTREG_R13); \
119 APPLY_IREG(r14, INTREG_R14); \
120 APPLY_IREG(r15, INTREG_R15); \
123 #define FOREACH_SREG() \
125 APPLY_SREG(cr0, MISCREG_CR0); \
126 APPLY_SREG(cr2, MISCREG_CR2); \
127 APPLY_SREG(cr3, MISCREG_CR3); \
128 APPLY_SREG(cr4, MISCREG_CR4); \
129 APPLY_SREG(cr8, MISCREG_CR8); \
130 APPLY_SREG(efer, MISCREG_EFER); \
131 APPLY_SREG(apic_base, MISCREG_APIC_BASE); \
134 #define FOREACH_DREG() \
136 APPLY_DREG(db[0], MISCREG_DR0); \
137 APPLY_DREG(db[1], MISCREG_DR1); \
138 APPLY_DREG(db[2], MISCREG_DR2); \
139 APPLY_DREG(db[3], MISCREG_DR3); \
140 APPLY_DREG(dr6, MISCREG_DR6); \
141 APPLY_DREG(dr7, MISCREG_DR7); \
144 #define FOREACH_SEGMENT() \
146 APPLY_SEGMENT(cs, MISCREG_CS - MISCREG_SEG_SEL_BASE); \
147 APPLY_SEGMENT(ds, MISCREG_DS - MISCREG_SEG_SEL_BASE); \
148 APPLY_SEGMENT(es, MISCREG_ES - MISCREG_SEG_SEL_BASE); \
149 APPLY_SEGMENT(fs, MISCREG_FS - MISCREG_SEG_SEL_BASE); \
150 APPLY_SEGMENT(gs, MISCREG_GS - MISCREG_SEG_SEL_BASE); \
151 APPLY_SEGMENT(ss, MISCREG_SS - MISCREG_SEG_SEL_BASE); \
152 APPLY_SEGMENT(tr, MISCREG_TR - MISCREG_SEG_SEL_BASE); \
153 APPLY_SEGMENT(ldt, MISCREG_TSL - MISCREG_SEG_SEL_BASE); \
156 #define FOREACH_DTABLE() \
158 APPLY_DTABLE(gdt, MISCREG_TSG - MISCREG_SEG_SEL_BASE); \
159 APPLY_DTABLE(idt, MISCREG_IDTR - MISCREG_SEG_SEL_BASE); \
162 template<typename STRUCT
, typename ENTRY
>
163 static STRUCT
*newVarStruct(size_t entries
)
165 return (STRUCT
*)operator new(sizeof(STRUCT
) + entries
* sizeof(ENTRY
));
169 dumpKvm(const struct kvm_regs
®s
)
171 inform("KVM register state:\n");
173 #define APPLY_IREG(kreg, mreg) \
174 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
180 inform("\trip: 0x%llx\n", regs
.rip
);
181 inform("\trflags: 0x%llx\n", regs
.rflags
);
185 dumpKvm(const char *reg_name
, const struct kvm_segment
&seg
)
187 inform("\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
188 "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, unus.: %u\n",
190 seg
.base
, seg
.limit
, seg
.selector
, seg
.type
,
191 seg
.present
, seg
.dpl
, seg
.db
, seg
.s
, seg
.l
, seg
.g
, seg
.avl
, seg
.unusable
);
195 dumpKvm(const char *reg_name
, const struct kvm_dtable
&dtable
)
197 inform("\t%s: @0x%llx+%x\n",
198 reg_name
, dtable
.base
, dtable
.limit
);
202 dumpKvm(const struct kvm_sregs
&sregs
)
204 #define APPLY_SREG(kreg, mreg) \
205 inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
206 #define APPLY_SEGMENT(kreg, idx) \
207 dumpKvm(# kreg, sregs.kreg);
208 #define APPLY_DTABLE(kreg, idx) \
209 dumpKvm(# kreg, sregs.kreg);
211 inform("Special registers:\n");
216 inform("Interrupt Bitmap:");
217 for (int i
= 0; i
< KVM_NR_INTERRUPTS
; i
+= 64)
218 inform(" 0x%.8x", sregs
.interrupt_bitmap
[i
/ 64]);
225 #ifdef KVM_GET_DEBUGREGS
227 dumpKvm(const struct kvm_debugregs
®s
)
229 inform("KVM debug state:\n");
231 #define APPLY_DREG(kreg, mreg) \
232 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
238 inform("\tflags: 0x%llx\n", regs
.flags
);
243 dumpFpuSpec(const struct FXSave
&xs
)
245 inform("\tlast_ip: 0x%x\n", xs
.ctrl64
.fpu_ip
);
246 inform("\tlast_dp: 0x%x\n", xs
.ctrl64
.fpu_dp
);
247 inform("\tmxcsr_mask: 0x%x\n", xs
.mxcsr_mask
);
251 dumpFpuSpec(const struct kvm_fpu
&fpu
)
253 inform("\tlast_ip: 0x%x\n", fpu
.last_ip
);
254 inform("\tlast_dp: 0x%x\n", fpu
.last_dp
);
259 dumpFpuCommon(const T
&fpu
)
261 const unsigned top((fpu
.fsw
>> 11) & 0x7);
262 inform("\tfcw: 0x%x\n", fpu
.fcw
);
264 inform("\tfsw: 0x%x (top: %i, "
265 "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
268 (fpu
.fsw
& CC0Bit
) ? "C0" : "",
269 (fpu
.fsw
& CC1Bit
) ? "C1" : "",
270 (fpu
.fsw
& CC2Bit
) ? "C2" : "",
271 (fpu
.fsw
& CC3Bit
) ? "C3" : "",
273 (fpu
.fsw
& IEBit
) ? "I" : "",
274 (fpu
.fsw
& DEBit
) ? "D" : "",
275 (fpu
.fsw
& ZEBit
) ? "Z" : "",
276 (fpu
.fsw
& OEBit
) ? "O" : "",
277 (fpu
.fsw
& UEBit
) ? "U" : "",
278 (fpu
.fsw
& PEBit
) ? "P" : "",
280 (fpu
.fsw
& StackFaultBit
) ? "SF " : "",
281 (fpu
.fsw
& ErrSummaryBit
) ? "ES " : "",
282 (fpu
.fsw
& BusyBit
) ? "BUSY " : ""
284 inform("\tftwx: 0x%x\n", fpu
.ftwx
);
285 inform("\tlast_opcode: 0x%x\n", fpu
.last_opcode
);
287 inform("\tmxcsr: 0x%x\n", fpu
.mxcsr
);
288 inform("\tFP Stack:\n");
289 for (int i
= 0; i
< 8; ++i
) {
290 const unsigned reg_idx((i
+ top
) & 0x7);
291 const bool empty(!((fpu
.ftwx
>> reg_idx
) & 0x1));
292 const double value(X86ISA::loadFloat80(fpu
.fpr
[i
]));
294 for (int j
= 0; j
< 10; ++j
)
295 snprintf(&hex
[j
*2], 3, "%.2x", fpu
.fpr
[i
][j
]);
296 inform("\t\tST%i/%i: 0x%s (%f)%s\n", i
, reg_idx
,
297 hex
, value
, empty
? " (e)" : "");
299 inform("\tXMM registers:\n");
300 for (int i
= 0; i
< 16; ++i
) {
302 for (int j
= 0; j
< 16; ++j
)
303 snprintf(&hex
[j
*2], 3, "%.2x", fpu
.xmm
[i
][j
]);
304 inform("\t\t%i: 0x%s\n", i
, hex
);
309 dumpKvm(const struct kvm_fpu
&fpu
)
311 inform("FPU registers:\n");
316 dumpKvm(const struct kvm_xsave
&xsave
)
318 inform("FPU registers (XSave):\n");
319 dumpFpuCommon(*(FXSave
*)xsave
.region
);
323 dumpKvm(const struct kvm_msrs
&msrs
)
327 for (int i
= 0; i
< msrs
.nmsrs
; ++i
) {
328 const struct kvm_msr_entry
&e(msrs
.entries
[i
]);
330 inform("\t0x%x: 0x%x\n", e
.index
, e
.data
);
335 dumpKvm(const struct kvm_xcrs
®s
)
337 inform("KVM XCR registers:\n");
339 inform("\tFlags: 0x%x\n", regs
.flags
);
340 for (int i
= 0; i
< regs
.nr_xcrs
; ++i
) {
341 inform("\tXCR[0x%x]: 0x%x\n",
348 dumpKvm(const struct kvm_vcpu_events
&events
)
350 inform("vCPU events:\n");
352 inform("\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
353 events
.exception
.injected
, events
.exception
.nr
,
354 events
.exception
.has_error_code
, events
.exception
.error_code
);
356 inform("\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
357 events
.interrupt
.injected
, events
.interrupt
.nr
,
358 events
.interrupt
.soft
);
360 inform("\tNMI: [inj: %i, pending: %i, masked: %i]\n",
361 events
.nmi
.injected
, events
.nmi
.pending
,
364 inform("\tSIPI vector: 0x%x\n", events
.sipi_vector
);
365 inform("\tFlags: 0x%x\n", events
.flags
);
369 isCanonicalAddress(uint64_t addr
)
371 // x86-64 doesn't currently use the full 64-bit virtual address
372 // space, instead it uses signed 48 bit addresses that are
373 // sign-extended to 64 bits. Such addresses are known as
375 uint64_t upper_half(addr
& 0xffff800000000000ULL
);
376 return upper_half
== 0 || upper_half
== 0xffff800000000000;
380 checkSeg(const char *name
, const int idx
, const struct kvm_segment
&seg
,
381 struct kvm_sregs sregs
)
383 // Check the register base
389 if (!isCanonicalAddress(seg
.base
))
390 warn("Illegal %s base: 0x%x\n", name
, seg
.base
);
399 if (seg
.base
& 0xffffffff00000000ULL
)
400 warn("Illegal %s base: 0x%x\n", name
, seg
.base
);
410 warn("CS type is 3 but dpl != 0.\n");
414 if (seg
.dpl
!= sregs
.ss
.dpl
)
415 warn("CS type is %i but CS DPL != SS DPL\n", seg
.type
);
419 if (seg
.dpl
> sregs
.ss
.dpl
)
420 warn("CS type is %i but CS DPL > SS DPL\n", seg
.type
);
423 warn("Illegal CS type: %i\n", seg
.type
);
433 if (sregs
.cs
.type
== 3 && seg
.dpl
!= 0)
434 warn("CS type is 3, but SS DPL is != 0.\n");
437 if (!(sregs
.cr0
& 1) && seg
.dpl
!= 0)
438 warn("SS DPL is %i, but CR0 PE is 0\n", seg
.dpl
);
441 warn("Illegal SS type: %i\n", seg
.type
);
452 if (!(seg
.type
& 0x1) ||
453 ((seg
.type
& 0x8) && !(seg
.type
& 0x2)))
454 warn("%s has an illegal type field: %i\n", name
, seg
.type
);
458 // TODO: We should check the CPU mode
459 if (seg
.type
!= 3 && seg
.type
!= 11)
460 warn("%s: Illegal segment type (%i)\n", name
, seg
.type
);
467 warn("%s: Illegal segment type (%i)\n", name
, seg
.type
);
481 warn("%s: S flag not set\n", name
);
489 warn("%s: S flag is set\n", name
);
505 warn("%s: P flag not set\n", name
);
507 if (((seg
.limit
& 0xFFF) == 0 && seg
.g
) ||
508 ((seg
.limit
& 0xFFF00000) != 0 && !seg
.g
)) {
509 warn("%s limit (0x%x) and g (%i) combination is illegal.\n",
510 name
, seg
.limit
, seg
.g
);
518 X86KvmCPU::X86KvmCPU(X86KvmCPUParams
*params
)
519 : BaseKvmCPU(params
),
520 useXSave(params
->useXSave
)
524 if (!kvm
.capSetTSSAddress())
525 panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
526 if (!kvm
.capExtendedCPUID())
527 panic("KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
528 if (!kvm
.capUserNMI())
529 warn("KVM: Missing capability (KVM_CAP_USER_NMI)\n");
530 if (!kvm
.capVCPUEvents())
531 warn("KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
533 haveDebugRegs
= kvm
.capDebugRegs();
534 haveXSave
= kvm
.capXSave();
535 haveXCRs
= kvm
.capXCRs();
537 if (useXSave
&& !haveXSave
) {
538 warn("KVM: XSAVE not supported by host. MXCSR synchronization might be "
539 "unreliable due to kernel bugs.\n");
541 } else if (!useXSave
) {
542 warn("KVM: XSave FPU/SIMD synchronization disabled by user.\n");
546 X86KvmCPU::~X86KvmCPU()
553 BaseKvmCPU::startup();
557 io_req
.setThreadContext(tc
->contextId(), 0);
559 // TODO: Do we need to create an identity mapped TSS area? We
560 // should call kvm.vm.setTSSAddress() here in that case. It should
561 // only be needed for old versions of the virtualization
562 // extensions. We should make sure that the identity range is
563 // reserved in the e820 memory map in that case.
582 X86KvmCPU::dumpFpuRegs() const
590 X86KvmCPU::dumpIntRegs() const
592 struct kvm_regs regs
;
598 X86KvmCPU::dumpSpecRegs() const
600 struct kvm_sregs sregs
;
601 getSpecialRegisters(sregs
);
606 X86KvmCPU::dumpDebugRegs() const
609 #ifdef KVM_GET_DEBUGREGS
610 struct kvm_debugregs dregs
;
611 getDebugRegisters(dregs
);
615 inform("Debug registers not supported by kernel.\n");
620 X86KvmCPU::dumpXCRs() const
623 struct kvm_xcrs xcrs
;
627 inform("XCRs not supported by kernel.\n");
632 X86KvmCPU::dumpXSave() const
635 struct kvm_xsave xsave
;
639 inform("XSave not supported by kernel.\n");
644 X86KvmCPU::dumpVCpuEvents() const
646 struct kvm_vcpu_events events
;
647 getVCpuEvents(events
);
652 X86KvmCPU::dumpMSRs() const
654 const Kvm::MSRIndexVector
&supported_msrs(vm
.kvm
.getSupportedMSRs());
655 std::unique_ptr
<struct kvm_msrs
> msrs(
656 newVarStruct
<struct kvm_msrs
, struct kvm_msr_entry
>(
657 supported_msrs
.size()));
659 msrs
->nmsrs
= supported_msrs
.size();
660 for (int i
= 0; i
< supported_msrs
.size(); ++i
) {
661 struct kvm_msr_entry
&e(msrs
->entries
[i
]);
662 e
.index
= supported_msrs
[i
];
666 getMSRs(*msrs
.get());
668 dumpKvm(*msrs
.get());
672 X86KvmCPU::updateKvmState()
674 updateKvmStateRegs();
675 updateKvmStateSRegs();
677 updateKvmStateMSRs();
679 DPRINTF(KvmContext
, "X86KvmCPU::updateKvmState():\n");
680 if (DTRACE(KvmContext
))
685 X86KvmCPU::updateKvmStateRegs()
687 struct kvm_regs regs
;
689 #define APPLY_IREG(kreg, mreg) regs.kreg = tc->readIntReg(mreg)
693 regs
.rip
= tc
->instAddr();
695 /* You might think that setting regs.rflags to the contents
696 * MISCREG_RFLAGS here would suffice. In that case you're
697 * mistaken. We need to reconstruct it from a bunch of ucode
698 * registers and wave a dead chicken over it (aka mask out and set
699 * reserved bits) to get it to work.
701 regs
.rflags
= X86ISA::getRFlags(tc
);
707 setKvmSegmentReg(ThreadContext
*tc
, struct kvm_segment
&kvm_seg
,
710 SegAttr
attr(tc
->readMiscRegNoEffect(MISCREG_SEG_ATTR(index
)));
712 kvm_seg
.base
= tc
->readMiscRegNoEffect(MISCREG_SEG_BASE(index
));
713 kvm_seg
.limit
= tc
->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index
));
714 kvm_seg
.selector
= tc
->readMiscRegNoEffect(MISCREG_SEG_SEL(index
));
715 kvm_seg
.type
= attr
.type
;
716 kvm_seg
.present
= attr
.present
;
717 kvm_seg
.dpl
= attr
.dpl
;
718 kvm_seg
.db
= attr
.defaultSize
;
719 kvm_seg
.s
= attr
.system
;
720 kvm_seg
.l
= attr
.longMode
;
721 kvm_seg
.g
= attr
.granularity
;
722 kvm_seg
.avl
= attr
.avl
;
724 // A segment is normally unusable when the selector is zero. There
725 // is a attr.unusable flag in gem5, but it seems unused. qemu
726 // seems to set this to 0 all the time, so we just do the same and
727 // hope for the best.
728 kvm_seg
.unusable
= 0;
732 setKvmDTableReg(ThreadContext
*tc
, struct kvm_dtable
&kvm_dtable
,
735 kvm_dtable
.base
= tc
->readMiscRegNoEffect(MISCREG_SEG_BASE(index
));
736 kvm_dtable
.limit
= tc
->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index
));
740 forceSegAccessed(struct kvm_segment
&seg
)
742 // Intel's VMX requires that (some) usable segments are flagged as
743 // 'accessed' (i.e., the lowest bit in the segment type is set)
744 // when entering VMX. This wouldn't necessary be the case even if
745 // gem5 did set the access bits correctly, so we force it to one
748 seg
.type
|= SEG_TYPE_BIT_ACCESSED
;
752 X86KvmCPU::updateKvmStateSRegs()
754 struct kvm_sregs sregs
;
756 #define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
757 #define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
758 #define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
768 // Clear the interrupt bitmap
769 memset(&sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
771 // VMX requires CS, SS, DS, ES, FS, and GS to have the accessed
772 // bit in the type field set.
773 forceSegAccessed(sregs
.cs
);
774 forceSegAccessed(sregs
.ss
);
775 forceSegAccessed(sregs
.ds
);
776 forceSegAccessed(sregs
.es
);
777 forceSegAccessed(sregs
.fs
);
778 forceSegAccessed(sregs
.gs
);
780 // There are currently some cases where the active task isn't
781 // marked as busy. This is illegal in VMX, so we force it to busy.
782 if (sregs
.tr
.type
== SEG_SYS_TYPE_TSS_AVAILABLE
) {
783 hack("tr.type (%i) is not busy. Forcing the busy bit.\n",
785 sregs
.tr
.type
= SEG_SYS_TYPE_TSS_BUSY
;
788 // VMX requires the DPL of SS and CS to be the same for
789 // non-conforming code segments. It seems like m5 doesn't set the
790 // DPL of SS correctly when taking interrupts, so we need to fix
792 if ((sregs
.cs
.type
== SEG_CS_TYPE_ACCESSED
||
793 sregs
.cs
.type
== SEG_CS_TYPE_READ_ACCESSED
) &&
794 sregs
.cs
.dpl
!= sregs
.ss
.dpl
) {
796 hack("CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
797 sregs
.cs
.dpl
, sregs
.ss
.dpl
, sregs
.cs
.dpl
);
798 sregs
.ss
.dpl
= sregs
.cs
.dpl
;
801 // Do checks after fixing up the state to avoid getting excessive
802 // amounts of warnings.
803 RFLAGS
rflags_nocc(tc
->readMiscReg(MISCREG_RFLAGS
));
804 if (!rflags_nocc
.vm
) {
805 // Do segment verification if the CPU isn't entering virtual
806 // 8086 mode. We currently assume that unrestricted guest
807 // mode is available.
809 #define APPLY_SEGMENT(kreg, idx) \
810 checkSeg(# kreg, idx + MISCREG_SEG_SEL_BASE, sregs.kreg, sregs)
816 setSpecialRegisters(sregs
);
819 template <typename T
>
821 updateKvmStateFPUCommon(ThreadContext
*tc
, T
&fpu
)
823 static_assert(sizeof(X86ISA::FloatRegBits
) == 8,
824 "Unexpected size of X86ISA::FloatRegBits");
826 fpu
.mxcsr
= tc
->readMiscRegNoEffect(MISCREG_MXCSR
);
827 fpu
.fcw
= tc
->readMiscRegNoEffect(MISCREG_FCW
);
828 // No need to rebuild from MISCREG_FSW and MISCREG_TOP if we read
830 fpu
.fsw
= tc
->readMiscReg(MISCREG_FSW
);
832 uint64_t ftw(tc
->readMiscRegNoEffect(MISCREG_FTW
));
833 fpu
.ftwx
= X86ISA::convX87TagsToXTags(ftw
);
835 fpu
.last_opcode
= tc
->readMiscRegNoEffect(MISCREG_FOP
);
837 const unsigned top((fpu
.fsw
>> 11) & 0x7);
838 for (int i
= 0; i
< 8; ++i
) {
839 const unsigned reg_idx((i
+ top
) & 0x7);
840 const double value(tc
->readFloatReg(FLOATREG_FPR(reg_idx
)));
841 DPRINTF(KvmContext
, "Setting KVM FP reg %i (st[%i]) := %f\n",
843 X86ISA::storeFloat80(fpu
.fpr
[i
], value
);
846 // TODO: We should update the MMX state
848 for (int i
= 0; i
< 16; ++i
) {
849 *(X86ISA::FloatRegBits
*)&fpu
.xmm
[i
][0] =
850 tc
->readFloatRegBits(FLOATREG_XMM_LOW(i
));
851 *(X86ISA::FloatRegBits
*)&fpu
.xmm
[i
][8] =
852 tc
->readFloatRegBits(FLOATREG_XMM_HIGH(i
));
857 X86KvmCPU::updateKvmStateFPULegacy()
861 // There is some padding in the FP registers, so we'd better zero
863 memset(&fpu
, 0, sizeof(fpu
));
865 updateKvmStateFPUCommon(tc
, fpu
);
867 if (tc
->readMiscRegNoEffect(MISCREG_FISEG
))
868 warn_once("MISCREG_FISEG is non-zero.\n");
870 fpu
.last_ip
= tc
->readMiscRegNoEffect(MISCREG_FIOFF
);
872 if (tc
->readMiscRegNoEffect(MISCREG_FOSEG
))
873 warn_once("MISCREG_FOSEG is non-zero.\n");
875 fpu
.last_dp
= tc
->readMiscRegNoEffect(MISCREG_FOOFF
);
881 X86KvmCPU::updateKvmStateFPUXSave()
883 struct kvm_xsave kxsave
;
884 FXSave
&xsave(*(FXSave
*)kxsave
.region
);
886 // There is some padding and reserved fields in the structure, so
887 // we'd better zero the whole thing.
888 memset(&kxsave
, 0, sizeof(kxsave
));
890 updateKvmStateFPUCommon(tc
, xsave
);
892 if (tc
->readMiscRegNoEffect(MISCREG_FISEG
))
893 warn_once("MISCREG_FISEG is non-zero.\n");
895 xsave
.ctrl64
.fpu_ip
= tc
->readMiscRegNoEffect(MISCREG_FIOFF
);
897 if (tc
->readMiscRegNoEffect(MISCREG_FOSEG
))
898 warn_once("MISCREG_FOSEG is non-zero.\n");
900 xsave
.ctrl64
.fpu_dp
= tc
->readMiscRegNoEffect(MISCREG_FOOFF
);
906 X86KvmCPU::updateKvmStateFPU()
909 updateKvmStateFPUXSave();
911 updateKvmStateFPULegacy();
915 X86KvmCPU::updateKvmStateMSRs()
919 const Kvm::MSRIndexVector
&indices(getMsrIntersection());
921 for (auto it
= indices
.cbegin(); it
!= indices
.cend(); ++it
) {
922 struct kvm_msr_entry e
;
926 e
.data
= tc
->readMiscReg(msrMap
.at(*it
));
927 DPRINTF(KvmContext
, "Adding MSR: idx: 0x%x, data: 0x%x\n",
937 X86KvmCPU::updateThreadContext()
939 DPRINTF(KvmContext
, "X86KvmCPU::updateThreadContext():\n");
940 if (DTRACE(KvmContext
))
943 updateThreadContextRegs();
944 updateThreadContextSRegs();
946 updateThreadContextXSave();
948 updateThreadContextFPU();
949 updateThreadContextMSRs();
951 // The M5 misc reg caches some values from other
952 // registers. Writing to it with side effects causes it to be
953 // updated from its source registers.
954 tc
->setMiscReg(MISCREG_M5_REG
, 0);
958 X86KvmCPU::updateThreadContextRegs()
960 struct kvm_regs regs
;
963 #define APPLY_IREG(kreg, mreg) tc->setIntReg(mreg, regs.kreg)
969 tc
->pcState(PCState(regs
.rip
));
971 // Flags are spread out across multiple semi-magic registers so we
972 // need some special care when updating them.
973 X86ISA::setRFlags(tc
, regs
.rflags
);
978 setContextSegment(ThreadContext
*tc
, const struct kvm_segment
&kvm_seg
,
983 attr
.type
= kvm_seg
.type
;
984 attr
.present
= kvm_seg
.present
;
985 attr
.dpl
= kvm_seg
.dpl
;
986 attr
.defaultSize
= kvm_seg
.db
;
987 attr
.system
= kvm_seg
.s
;
988 attr
.longMode
= kvm_seg
.l
;
989 attr
.granularity
= kvm_seg
.g
;
990 attr
.avl
= kvm_seg
.avl
;
991 attr
.unusable
= kvm_seg
.unusable
;
993 // We need some setMiscReg magic here to keep the effective base
994 // addresses in sync. We need an up-to-date version of EFER, so
995 // make sure this is called after the sregs have been synced.
996 tc
->setMiscReg(MISCREG_SEG_BASE(index
), kvm_seg
.base
);
997 tc
->setMiscReg(MISCREG_SEG_LIMIT(index
), kvm_seg
.limit
);
998 tc
->setMiscReg(MISCREG_SEG_SEL(index
), kvm_seg
.selector
);
999 tc
->setMiscReg(MISCREG_SEG_ATTR(index
), attr
);
1003 setContextSegment(ThreadContext
*tc
, const struct kvm_dtable
&kvm_dtable
,
1006 // We need some setMiscReg magic here to keep the effective base
1007 // addresses in sync. We need an up-to-date version of EFER, so
1008 // make sure this is called after the sregs have been synced.
1009 tc
->setMiscReg(MISCREG_SEG_BASE(index
), kvm_dtable
.base
);
1010 tc
->setMiscReg(MISCREG_SEG_LIMIT(index
), kvm_dtable
.limit
);
1014 X86KvmCPU::updateThreadContextSRegs()
1016 struct kvm_sregs sregs
;
1017 getSpecialRegisters(sregs
);
1019 assert(getKvmRunState()->apic_base
== sregs
.apic_base
);
1020 assert(getKvmRunState()->cr8
== sregs
.cr8
);
1022 #define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
1023 #define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1024 #define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1029 #undef APPLY_SEGMENT
1033 template<typename T
>
1035 updateThreadContextFPUCommon(ThreadContext
*tc
, const T
&fpu
)
1037 const unsigned top((fpu
.fsw
>> 11) & 0x7);
1039 static_assert(sizeof(X86ISA::FloatRegBits
) == 8,
1040 "Unexpected size of X86ISA::FloatRegBits");
1042 for (int i
= 0; i
< 8; ++i
) {
1043 const unsigned reg_idx((i
+ top
) & 0x7);
1044 const double value(X86ISA::loadFloat80(fpu
.fpr
[i
]));
1045 DPRINTF(KvmContext
, "Setting gem5 FP reg %i (st[%i]) := %f\n",
1047 tc
->setFloatReg(FLOATREG_FPR(reg_idx
), value
);
1050 // TODO: We should update the MMX state
1052 tc
->setMiscRegNoEffect(MISCREG_X87_TOP
, top
);
1053 tc
->setMiscRegNoEffect(MISCREG_MXCSR
, fpu
.mxcsr
);
1054 tc
->setMiscRegNoEffect(MISCREG_FCW
, fpu
.fcw
);
1055 tc
->setMiscRegNoEffect(MISCREG_FSW
, fpu
.fsw
);
1057 uint64_t ftw(convX87XTagsToTags(fpu
.ftwx
));
1058 // TODO: Are these registers really the same?
1059 tc
->setMiscRegNoEffect(MISCREG_FTW
, ftw
);
1060 tc
->setMiscRegNoEffect(MISCREG_FTAG
, ftw
);
1062 tc
->setMiscRegNoEffect(MISCREG_FOP
, fpu
.last_opcode
);
1064 for (int i
= 0; i
< 16; ++i
) {
1065 tc
->setFloatRegBits(FLOATREG_XMM_LOW(i
),
1066 *(X86ISA::FloatRegBits
*)&fpu
.xmm
[i
][0]);
1067 tc
->setFloatRegBits(FLOATREG_XMM_HIGH(i
),
1068 *(X86ISA::FloatRegBits
*)&fpu
.xmm
[i
][8]);
1073 X86KvmCPU::updateThreadContextFPU()
1078 updateThreadContextFPUCommon(tc
, fpu
);
1080 tc
->setMiscRegNoEffect(MISCREG_FISEG
, 0);
1081 tc
->setMiscRegNoEffect(MISCREG_FIOFF
, fpu
.last_ip
);
1082 tc
->setMiscRegNoEffect(MISCREG_FOSEG
, 0);
1083 tc
->setMiscRegNoEffect(MISCREG_FOOFF
, fpu
.last_dp
);
1087 X86KvmCPU::updateThreadContextXSave()
1089 struct kvm_xsave kxsave
;
1090 FXSave
&xsave(*(FXSave
*)kxsave
.region
);
1093 updateThreadContextFPUCommon(tc
, xsave
);
1095 tc
->setMiscRegNoEffect(MISCREG_FISEG
, 0);
1096 tc
->setMiscRegNoEffect(MISCREG_FIOFF
, xsave
.ctrl64
.fpu_ip
);
1097 tc
->setMiscRegNoEffect(MISCREG_FOSEG
, 0);
1098 tc
->setMiscRegNoEffect(MISCREG_FOOFF
, xsave
.ctrl64
.fpu_dp
);
1102 X86KvmCPU::updateThreadContextMSRs()
1104 const Kvm::MSRIndexVector
&msrs(getMsrIntersection());
1106 std::unique_ptr
<struct kvm_msrs
> kvm_msrs(
1107 newVarStruct
<struct kvm_msrs
, struct kvm_msr_entry
>(msrs
.size()));
1108 struct kvm_msr_entry
*entry
;
1110 // Create a list of MSRs to read
1111 kvm_msrs
->nmsrs
= msrs
.size();
1112 entry
= &kvm_msrs
->entries
[0];
1113 for (auto it
= msrs
.cbegin(); it
!= msrs
.cend(); ++it
, ++entry
) {
1115 entry
->reserved
= 0;
1119 getMSRs(*kvm_msrs
.get());
1121 // Update M5's state
1122 entry
= &kvm_msrs
->entries
[0];
1123 for (int i
= 0; i
< kvm_msrs
->nmsrs
; ++i
, ++entry
) {
1124 DPRINTF(KvmContext
, "Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
1125 entry
->index
, entry
->data
);
1127 tc
->setMiscReg(X86ISA::msrMap
.at(entry
->index
), entry
->data
);
1132 X86KvmCPU::deliverInterrupts()
1134 syncThreadContext();
1136 Fault
fault(interrupts
->getInterrupt(tc
));
1137 interrupts
->updateIntrInfo(tc
);
1139 X86Interrupt
*x86int(dynamic_cast<X86Interrupt
*>(fault
.get()));
1141 struct kvm_interrupt kvm_int
;
1142 kvm_int
.irq
= x86int
->getVector();
1144 DPRINTF(KvmInt
, "Delivering interrupt: %s (%u)\n",
1145 fault
->name(), kvm_int
.irq
);
1147 kvmInterrupt(kvm_int
);
1148 } else if (dynamic_cast<NonMaskableInterrupt
*>(fault
.get())) {
1149 DPRINTF(KvmInt
, "Delivering NMI\n");
1150 kvmNonMaskableInterrupt();
1152 panic("KVM: Unknown interrupt type\n");
1158 X86KvmCPU::kvmRun(Tick ticks
)
1160 struct kvm_run
&kvm_run(*getKvmRunState());
1162 if (interrupts
->checkInterruptsRaw()) {
1163 if (kvm_run
.ready_for_interrupt_injection
) {
1164 // KVM claims that it is ready for an interrupt. It might
1165 // be lying if we just updated rflags and disabled
1166 // interrupts (e.g., by doing a CPU handover). Let's sync
1167 // the thread context and check if there are /really/
1168 // interrupts that should be delivered now.
1169 syncThreadContext();
1170 if (interrupts
->checkInterrupts(tc
)) {
1172 "M5 has pending interrupts, delivering interrupt.\n");
1174 deliverInterrupts();
1177 "Interrupt delivery delayed due to KVM confusion.\n");
1178 kvm_run
.request_interrupt_window
= 1;
1180 } else if (!kvm_run
.request_interrupt_window
) {
1182 "M5 has pending interrupts, requesting interrupt "
1184 kvm_run
.request_interrupt_window
= 1;
1187 kvm_run
.request_interrupt_window
= 0;
1190 return kvmRunWrapper(ticks
);
1194 X86KvmCPU::kvmRunDrain()
1196 struct kvm_run
&kvm_run(*getKvmRunState());
1198 if (!archIsDrained()) {
1199 DPRINTF(Drain
, "kvmRunDrain: Architecture code isn't drained\n");
1201 // Tell KVM to find a suitable place to deliver interrupts. This
1202 // should ensure that pending interrupts have been delivered and
1203 // things are reasonably consistent (i.e., no interrupts pending
1205 kvm_run
.request_interrupt_window
= 1;
1207 // Limit the run to 1 millisecond. That is hopefully enough to
1208 // reach an interrupt window. Otherwise, we'll just try again
1210 return kvmRunWrapper(1 * SimClock::Float::ms
);
1212 DPRINTF(Drain
, "kvmRunDrain: Delivering pending IO\n");
1214 return kvmRunWrapper(0);
1219 X86KvmCPU::kvmRunWrapper(Tick ticks
)
1221 struct kvm_run
&kvm_run(*getKvmRunState());
1223 // Synchronize the APIC base and CR8 here since they are present
1224 // in the kvm_run struct, which makes the synchronization really
1226 kvm_run
.apic_base
= tc
->readMiscReg(MISCREG_APIC_BASE
);
1227 kvm_run
.cr8
= tc
->readMiscReg(MISCREG_CR8
);
1229 const Tick
run_ticks(BaseKvmCPU::kvmRun(ticks
));
1231 tc
->setMiscReg(MISCREG_APIC_BASE
, kvm_run
.apic_base
);
1232 kvm_run
.cr8
= tc
->readMiscReg(MISCREG_CR8
);
1238 X86KvmCPU::getHostCycles() const
1240 return getMSR(MSR_TSC
);
1244 X86KvmCPU::handleIOMiscReg32(int miscreg
)
1246 struct kvm_run
&kvm_run(*getKvmRunState());
1247 const uint16_t port(kvm_run
.io
.port
);
1249 assert(kvm_run
.exit_reason
== KVM_EXIT_IO
);
1251 if (kvm_run
.io
.size
!= 4) {
1252 panic("Unexpected IO size (%u) for address 0x%x.\n",
1253 kvm_run
.io
.size
, port
);
1256 if (kvm_run
.io
.count
!= 1) {
1257 panic("Unexpected IO count (%u) for address 0x%x.\n",
1258 kvm_run
.io
.count
, port
);
1261 uint32_t *data((uint32_t *)getGuestData(kvm_run
.io
.data_offset
));
1262 if (kvm_run
.io
.direction
== KVM_EXIT_IO_OUT
)
1263 tc
->setMiscReg(miscreg
, *data
);
1265 *data
= tc
->readMiscRegNoEffect(miscreg
);
1269 X86KvmCPU::handleKvmExitIO()
1271 struct kvm_run
&kvm_run(*getKvmRunState());
1272 bool isWrite(kvm_run
.io
.direction
== KVM_EXIT_IO_OUT
);
1273 unsigned char *guestData(getGuestData(kvm_run
.io
.data_offset
));
1275 uint16_t port(kvm_run
.io
.port
);
1277 const int count(kvm_run
.io
.count
);
1279 assert(kvm_run
.io
.direction
== KVM_EXIT_IO_IN
||
1280 kvm_run
.io
.direction
== KVM_EXIT_IO_OUT
);
1282 DPRINTF(KvmIO
, "KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1283 (isWrite
? "out" : "in"), kvm_run
.io
.port
);
1285 /* Vanilla gem5 handles PCI discovery in the TLB(!). Since we
1286 * don't use the TLB component, we need to intercept and handle
1287 * the PCI configuration space IO ports here.
1289 * The IO port PCI discovery mechanism uses one address register
1290 * and one data register. We map the address register to a misc
1291 * reg and use that to re-route data register accesses to the
1292 * right location in the PCI configuration space.
1294 if (port
== IO_PCI_CONF_ADDR
) {
1295 handleIOMiscReg32(MISCREG_PCI_CONFIG_ADDRESS
);
1297 } else if ((port
& ~0x3) == IO_PCI_CONF_DATA_BASE
) {
1298 Addr
pciConfigAddr(tc
->readMiscRegNoEffect(MISCREG_PCI_CONFIG_ADDRESS
));
1299 if (pciConfigAddr
& 0x80000000) {
1300 pAddr
= X86ISA::x86PciConfigAddress((pciConfigAddr
& 0x7ffffffc) |
1303 pAddr
= X86ISA::x86IOAddress(port
);
1306 pAddr
= X86ISA::x86IOAddress(port
);
1309 io_req
.setPhys(pAddr
, kvm_run
.io
.size
, Request::UNCACHEABLE
,
1312 const MemCmd
cmd(isWrite
? MemCmd::WriteReq
: MemCmd::ReadReq
);
1313 for (int i
= 0; i
< count
; ++i
) {
1314 Packet
pkt(&io_req
, cmd
);
1316 pkt
.dataStatic(guestData
);
1317 delay
+= dataPort
.sendAtomic(&pkt
);
1319 guestData
+= kvm_run
.io
.size
;
1326 X86KvmCPU::handleKvmExitIRQWindowOpen()
1328 // We don't need to do anything here since this is caught the next
1329 // time we execute kvmRun(). We still overload the exit event to
1330 // silence the warning about an unhandled exit event.
1335 X86KvmCPU::archIsDrained() const
1337 struct kvm_vcpu_events events
;
1339 getVCpuEvents(events
);
1341 // We could probably handle this in a by re-inserting interrupts
1342 // that are pending into gem5 on a drain. However, that would
1343 // probably be tricky to do reliably, so we'll just prevent a
1344 // drain if there is anything pending in the
1345 // guest. X86KvmCPU::kvmRunDrain() minimizes the amount of code
1346 // executed in the guest by requesting an interrupt window if
1347 // there are pending interrupts.
1348 const bool pending_events(events
.exception
.injected
||
1349 events
.interrupt
.injected
||
1350 events
.nmi
.injected
|| events
.nmi
.pending
);
1352 if (pending_events
) {
1353 DPRINTF(Drain
, "archIsDrained: Pending events: %s %s %s %s\n",
1354 events
.exception
.injected
? "exception" : "",
1355 events
.interrupt
.injected
? "interrupt" : "",
1356 events
.nmi
.injected
? "nmi[i]" : "",
1357 events
.nmi
.pending
? "nmi[p]" : "");
1360 return !pending_events
;
1363 static struct kvm_cpuid_entry2
1364 makeKvmCpuid(uint32_t function
, uint32_t index
,
1365 CpuidResult
&result
)
1367 struct kvm_cpuid_entry2 e
;
1368 e
.function
= function
;
1371 e
.eax
= (uint32_t)result
.rax
;
1372 e
.ebx
= (uint32_t)result
.rbx
;
1373 e
.ecx
= (uint32_t)result
.rcx
;
1374 e
.edx
= (uint32_t)result
.rdx
;
1380 X86KvmCPU::updateCPUID()
1382 Kvm::CPUIDVector m5_supported
;
1384 /* TODO: We currently don't support any of the functions that
1385 * iterate through data structures in the CPU using an index. It's
1386 * currently not a problem since M5 doesn't expose any of them at
1390 /* Basic features */
1392 X86ISA::doCpuid(tc
, 0x0, 0, func0
);
1393 for (uint32_t function
= 0; function
<= func0
.rax
; ++function
) {
1397 X86ISA::doCpuid(tc
, function
, idx
, cpuid
);
1398 m5_supported
.push_back(makeKvmCpuid(function
, idx
, cpuid
));
1401 /* Extended features */
1403 X86ISA::doCpuid(tc
, 0x80000000, 0, efunc0
);
1404 for (uint32_t function
= 0x80000000; function
<= efunc0
.rax
; ++function
) {
1408 X86ISA::doCpuid(tc
, function
, idx
, cpuid
);
1409 m5_supported
.push_back(makeKvmCpuid(function
, idx
, cpuid
));
1412 setCPUID(m5_supported
);
1416 X86KvmCPU::setCPUID(const struct kvm_cpuid2
&cpuid
)
1418 if (ioctl(KVM_SET_CPUID2
, (void *)&cpuid
) == -1)
1419 panic("KVM: Failed to set guest CPUID2 (errno: %i)\n",
1424 X86KvmCPU::setCPUID(const Kvm::CPUIDVector
&cpuid
)
1426 std::unique_ptr
<struct kvm_cpuid2
> kvm_cpuid(
1427 newVarStruct
<struct kvm_cpuid2
, struct kvm_cpuid_entry2
>(cpuid
.size()));
1429 kvm_cpuid
->nent
= cpuid
.size();
1430 std::copy(cpuid
.begin(), cpuid
.end(), kvm_cpuid
->entries
);
1432 setCPUID(*kvm_cpuid
);
1436 X86KvmCPU::setMSRs(const struct kvm_msrs
&msrs
)
1438 if (ioctl(KVM_SET_MSRS
, (void *)&msrs
) == -1)
1439 panic("KVM: Failed to set guest MSRs (errno: %i)\n",
1444 X86KvmCPU::setMSRs(const KvmMSRVector
&msrs
)
1446 std::unique_ptr
<struct kvm_msrs
> kvm_msrs(
1447 newVarStruct
<struct kvm_msrs
, struct kvm_msr_entry
>(msrs
.size()));
1449 kvm_msrs
->nmsrs
= msrs
.size();
1450 std::copy(msrs
.begin(), msrs
.end(), kvm_msrs
->entries
);
1456 X86KvmCPU::getMSRs(struct kvm_msrs
&msrs
) const
1458 if (ioctl(KVM_GET_MSRS
, (void *)&msrs
) == -1)
1459 panic("KVM: Failed to get guest MSRs (errno: %i)\n",
1465 X86KvmCPU::setMSR(uint32_t index
, uint64_t value
)
1467 std::unique_ptr
<struct kvm_msrs
> kvm_msrs(
1468 newVarStruct
<struct kvm_msrs
, struct kvm_msr_entry
>(1));
1469 struct kvm_msr_entry
&entry(kvm_msrs
->entries
[0]);
1471 kvm_msrs
->nmsrs
= 1;
1472 entry
.index
= index
;
1476 setMSRs(*kvm_msrs
.get());
1480 X86KvmCPU::getMSR(uint32_t index
) const
1482 std::unique_ptr
<struct kvm_msrs
> kvm_msrs(
1483 newVarStruct
<struct kvm_msrs
, struct kvm_msr_entry
>(1));
1484 struct kvm_msr_entry
&entry(kvm_msrs
->entries
[0]);
1486 kvm_msrs
->nmsrs
= 1;
1487 entry
.index
= index
;
1491 getMSRs(*kvm_msrs
.get());
1495 const Kvm::MSRIndexVector
&
1496 X86KvmCPU::getMsrIntersection() const
1498 if (cachedMsrIntersection
.empty()) {
1499 const Kvm::MSRIndexVector
&kvm_msrs(vm
.kvm
.getSupportedMSRs());
1501 DPRINTF(Kvm
, "kvm-x86: Updating MSR intersection\n");
1502 for (auto it
= kvm_msrs
.cbegin(); it
!= kvm_msrs
.cend(); ++it
) {
1503 if (X86ISA::msrMap
.find(*it
) != X86ISA::msrMap
.end()) {
1504 cachedMsrIntersection
.push_back(*it
);
1505 DPRINTF(Kvm
, "kvm-x86: Adding MSR 0x%x\n", *it
);
1507 warn("kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1513 return cachedMsrIntersection
;
1517 X86KvmCPU::getDebugRegisters(struct kvm_debugregs
®s
) const
1519 #ifdef KVM_GET_DEBUGREGS
1520 if (ioctl(KVM_GET_DEBUGREGS
, ®s
) == -1)
1521 panic("KVM: Failed to get guest debug registers\n");
1523 panic("KVM: Unsupported getDebugRegisters call.\n");
1528 X86KvmCPU::setDebugRegisters(const struct kvm_debugregs
®s
)
1530 #ifdef KVM_SET_DEBUGREGS
1531 if (ioctl(KVM_SET_DEBUGREGS
, (void *)®s
) == -1)
1532 panic("KVM: Failed to set guest debug registers\n");
1534 panic("KVM: Unsupported setDebugRegisters call.\n");
1539 X86KvmCPU::getXCRs(struct kvm_xcrs
®s
) const
1541 if (ioctl(KVM_GET_XCRS
, ®s
) == -1)
1542 panic("KVM: Failed to get guest debug registers\n");
1546 X86KvmCPU::setXCRs(const struct kvm_xcrs
®s
)
1548 if (ioctl(KVM_SET_XCRS
, (void *)®s
) == -1)
1549 panic("KVM: Failed to set guest debug registers\n");
1553 X86KvmCPU::getXSave(struct kvm_xsave
&xsave
) const
1555 if (ioctl(KVM_GET_XSAVE
, &xsave
) == -1)
1556 panic("KVM: Failed to get guest debug registers\n");
1560 X86KvmCPU::setXSave(const struct kvm_xsave
&xsave
)
1562 if (ioctl(KVM_SET_XSAVE
, (void *)&xsave
) == -1)
1563 panic("KVM: Failed to set guest debug registers\n");
1568 X86KvmCPU::getVCpuEvents(struct kvm_vcpu_events
&events
) const
1570 if (ioctl(KVM_GET_VCPU_EVENTS
, &events
) == -1)
1571 panic("KVM: Failed to get guest debug registers\n");
1575 X86KvmCPU::setVCpuEvents(const struct kvm_vcpu_events
&events
)
1577 if (ioctl(KVM_SET_VCPU_EVENTS
, (void *)&events
) == -1)
1578 panic("KVM: Failed to set guest debug registers\n");
1582 X86KvmCPUParams::create()
1584 return new X86KvmCPU(this);