2 * Copyright (c) 2013 Andreas Sandberg
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Andreas Sandberg
31 #include <linux/kvm.h>
37 #include "arch/x86/regs/msr.hh"
38 #include "arch/x86/cpuid.hh"
39 #include "arch/x86/utility.hh"
40 #include "arch/registers.hh"
41 #include "cpu/kvm/base.hh"
42 #include "cpu/kvm/x86_cpu.hh"
43 #include "debug/Drain.hh"
44 #include "debug/Kvm.hh"
45 #include "debug/KvmContext.hh"
46 #include "debug/KvmIO.hh"
47 #include "debug/KvmInt.hh"
49 using namespace X86ISA
;
53 #define IO_PCI_CONF_ADDR 0xCF8
54 #define IO_PCI_CONF_DATA_BASE 0xCFC
56 // Task segment type of an inactive 32-bit or 64-bit task
57 #define SEG_SYS_TYPE_TSS_AVAILABLE 9
58 // Task segment type of an active 32-bit or 64-bit task
59 #define SEG_SYS_TYPE_TSS_BUSY 11
61 // Non-conforming accessed code segment
62 #define SEG_CS_TYPE_ACCESSED 9
63 // Non-conforming accessed code segment that can be read
64 #define SEG_CS_TYPE_READ_ACCESSED 11
66 // The lowest bit of the type field for normal segments (code and
67 // data) is used to indicate that a segment has been accessed.
68 #define SEG_TYPE_BIT_ACCESSED 1
98 uint64_t reserved
[12];
101 static_assert(sizeof(FXSave
) == 512, "Unexpected size of FXSave");
103 #define FOREACH_IREG() \
105 APPLY_IREG(rax, INTREG_RAX); \
106 APPLY_IREG(rbx, INTREG_RBX); \
107 APPLY_IREG(rcx, INTREG_RCX); \
108 APPLY_IREG(rdx, INTREG_RDX); \
109 APPLY_IREG(rsi, INTREG_RSI); \
110 APPLY_IREG(rdi, INTREG_RDI); \
111 APPLY_IREG(rsp, INTREG_RSP); \
112 APPLY_IREG(rbp, INTREG_RBP); \
113 APPLY_IREG(r8, INTREG_R8); \
114 APPLY_IREG(r9, INTREG_R9); \
115 APPLY_IREG(r10, INTREG_R10); \
116 APPLY_IREG(r11, INTREG_R11); \
117 APPLY_IREG(r12, INTREG_R12); \
118 APPLY_IREG(r13, INTREG_R13); \
119 APPLY_IREG(r14, INTREG_R14); \
120 APPLY_IREG(r15, INTREG_R15); \
123 #define FOREACH_SREG() \
125 APPLY_SREG(cr0, MISCREG_CR0); \
126 APPLY_SREG(cr2, MISCREG_CR2); \
127 APPLY_SREG(cr3, MISCREG_CR3); \
128 APPLY_SREG(cr4, MISCREG_CR4); \
129 APPLY_SREG(cr8, MISCREG_CR8); \
130 APPLY_SREG(efer, MISCREG_EFER); \
131 APPLY_SREG(apic_base, MISCREG_APIC_BASE); \
134 #define FOREACH_DREG() \
136 APPLY_DREG(db[0], MISCREG_DR0); \
137 APPLY_DREG(db[1], MISCREG_DR1); \
138 APPLY_DREG(db[2], MISCREG_DR2); \
139 APPLY_DREG(db[3], MISCREG_DR3); \
140 APPLY_DREG(dr6, MISCREG_DR6); \
141 APPLY_DREG(dr7, MISCREG_DR7); \
144 #define FOREACH_SEGMENT() \
146 APPLY_SEGMENT(cs, MISCREG_CS - MISCREG_SEG_SEL_BASE); \
147 APPLY_SEGMENT(ds, MISCREG_DS - MISCREG_SEG_SEL_BASE); \
148 APPLY_SEGMENT(es, MISCREG_ES - MISCREG_SEG_SEL_BASE); \
149 APPLY_SEGMENT(fs, MISCREG_FS - MISCREG_SEG_SEL_BASE); \
150 APPLY_SEGMENT(gs, MISCREG_GS - MISCREG_SEG_SEL_BASE); \
151 APPLY_SEGMENT(ss, MISCREG_SS - MISCREG_SEG_SEL_BASE); \
152 APPLY_SEGMENT(tr, MISCREG_TR - MISCREG_SEG_SEL_BASE); \
153 APPLY_SEGMENT(ldt, MISCREG_TSL - MISCREG_SEG_SEL_BASE); \
156 #define FOREACH_DTABLE() \
158 APPLY_DTABLE(gdt, MISCREG_TSG - MISCREG_SEG_SEL_BASE); \
159 APPLY_DTABLE(idt, MISCREG_IDTR - MISCREG_SEG_SEL_BASE); \
162 template<typename STRUCT
, typename ENTRY
>
163 static STRUCT
*newVarStruct(size_t entries
)
165 return (STRUCT
*)operator new(sizeof(STRUCT
) + entries
* sizeof(ENTRY
));
169 dumpKvm(const struct kvm_regs
®s
)
171 inform("KVM register state:\n");
173 #define APPLY_IREG(kreg, mreg) \
174 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
180 inform("\trip: 0x%llx\n", regs
.rip
);
181 inform("\trflags: 0x%llx\n", regs
.rflags
);
185 dumpKvm(const char *reg_name
, const struct kvm_segment
&seg
)
187 inform("\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
188 "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, unus.: %u\n",
190 seg
.base
, seg
.limit
, seg
.selector
, seg
.type
,
191 seg
.present
, seg
.dpl
, seg
.db
, seg
.s
, seg
.l
, seg
.g
, seg
.avl
, seg
.unusable
);
195 dumpKvm(const char *reg_name
, const struct kvm_dtable
&dtable
)
197 inform("\t%s: @0x%llx+%x\n",
198 reg_name
, dtable
.base
, dtable
.limit
);
202 dumpKvm(const struct kvm_sregs
&sregs
)
204 #define APPLY_SREG(kreg, mreg) \
205 inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
206 #define APPLY_SEGMENT(kreg, idx) \
207 dumpKvm(# kreg, sregs.kreg);
208 #define APPLY_DTABLE(kreg, idx) \
209 dumpKvm(# kreg, sregs.kreg);
211 inform("Special registers:\n");
216 inform("Interrupt Bitmap:");
217 for (int i
= 0; i
< KVM_NR_INTERRUPTS
; i
+= 64)
218 inform(" 0x%.8x", sregs
.interrupt_bitmap
[i
/ 64]);
225 #ifdef KVM_GET_DEBUGREGS
227 dumpKvm(const struct kvm_debugregs
®s
)
229 inform("KVM debug state:\n");
231 #define APPLY_DREG(kreg, mreg) \
232 inform("\t" # kreg ": 0x%llx\n", regs.kreg)
238 inform("\tflags: 0x%llx\n", regs
.flags
);
243 dumpFpuSpec(const struct FXSave
&xs
)
245 inform("\tlast_ip: 0x%x\n", xs
.ctrl64
.fpu_ip
);
246 inform("\tlast_dp: 0x%x\n", xs
.ctrl64
.fpu_dp
);
247 inform("\tmxcsr_mask: 0x%x\n", xs
.mxcsr_mask
);
251 dumpFpuSpec(const struct kvm_fpu
&fpu
)
253 inform("\tlast_ip: 0x%x\n", fpu
.last_ip
);
254 inform("\tlast_dp: 0x%x\n", fpu
.last_dp
);
259 dumpFpuCommon(const T
&fpu
)
261 const unsigned top((fpu
.fsw
>> 11) & 0x7);
262 inform("\tfcw: 0x%x\n", fpu
.fcw
);
264 inform("\tfsw: 0x%x (top: %i, "
265 "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
268 (fpu
.fsw
& CC0Bit
) ? "C0" : "",
269 (fpu
.fsw
& CC1Bit
) ? "C1" : "",
270 (fpu
.fsw
& CC2Bit
) ? "C2" : "",
271 (fpu
.fsw
& CC3Bit
) ? "C3" : "",
273 (fpu
.fsw
& IEBit
) ? "I" : "",
274 (fpu
.fsw
& DEBit
) ? "D" : "",
275 (fpu
.fsw
& ZEBit
) ? "Z" : "",
276 (fpu
.fsw
& OEBit
) ? "O" : "",
277 (fpu
.fsw
& UEBit
) ? "U" : "",
278 (fpu
.fsw
& PEBit
) ? "P" : "",
280 (fpu
.fsw
& StackFaultBit
) ? "SF " : "",
281 (fpu
.fsw
& ErrSummaryBit
) ? "ES " : "",
282 (fpu
.fsw
& BusyBit
) ? "BUSY " : ""
284 inform("\tftwx: 0x%x\n", fpu
.ftwx
);
285 inform("\tlast_opcode: 0x%x\n", fpu
.last_opcode
);
287 inform("\tmxcsr: 0x%x\n", fpu
.mxcsr
);
288 inform("\tFP Stack:\n");
289 for (int i
= 0; i
< 8; ++i
) {
290 const unsigned reg_idx((i
+ top
) & 0x7);
291 const bool empty(!((fpu
.ftwx
>> reg_idx
) & 0x1));
292 const double value(X86ISA::loadFloat80(fpu
.fpr
[i
]));
294 for (int j
= 0; j
< 10; ++j
)
295 snprintf(&hex
[j
*2], 3, "%.2x", fpu
.fpr
[i
][j
]);
296 inform("\t\tST%i/%i: 0x%s (%f)%s\n", i
, reg_idx
,
297 hex
, value
, empty
? " (e)" : "");
299 inform("\tXMM registers:\n");
300 for (int i
= 0; i
< 16; ++i
) {
302 for (int j
= 0; j
< 16; ++j
)
303 snprintf(&hex
[j
*2], 3, "%.2x", fpu
.xmm
[i
][j
]);
304 inform("\t\t%i: 0x%s\n", i
, hex
);
309 dumpKvm(const struct kvm_fpu
&fpu
)
311 inform("FPU registers:\n");
316 dumpKvm(const struct kvm_xsave
&xsave
)
318 inform("FPU registers (XSave):\n");
319 dumpFpuCommon(*(FXSave
*)xsave
.region
);
323 dumpKvm(const struct kvm_msrs
&msrs
)
327 for (int i
= 0; i
< msrs
.nmsrs
; ++i
) {
328 const struct kvm_msr_entry
&e(msrs
.entries
[i
]);
330 inform("\t0x%x: 0x%x\n", e
.index
, e
.data
);
335 dumpKvm(const struct kvm_xcrs
®s
)
337 inform("KVM XCR registers:\n");
339 inform("\tFlags: 0x%x\n", regs
.flags
);
340 for (int i
= 0; i
< regs
.nr_xcrs
; ++i
) {
341 inform("\tXCR[0x%x]: 0x%x\n",
348 dumpKvm(const struct kvm_vcpu_events
&events
)
350 inform("vCPU events:\n");
352 inform("\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
353 events
.exception
.injected
, events
.exception
.nr
,
354 events
.exception
.has_error_code
, events
.exception
.error_code
);
356 inform("\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
357 events
.interrupt
.injected
, events
.interrupt
.nr
,
358 events
.interrupt
.soft
);
360 inform("\tNMI: [inj: %i, pending: %i, masked: %i]\n",
361 events
.nmi
.injected
, events
.nmi
.pending
,
364 inform("\tSIPI vector: 0x%x\n", events
.sipi_vector
);
365 inform("\tFlags: 0x%x\n", events
.flags
);
369 isCanonicalAddress(uint64_t addr
)
371 // x86-64 doesn't currently use the full 64-bit virtual address
372 // space, instead it uses signed 48 bit addresses that are
373 // sign-extended to 64 bits. Such addresses are known as
375 uint64_t upper_half(addr
& 0xffff800000000000ULL
);
376 return upper_half
== 0 || upper_half
== 0xffff800000000000;
380 checkSeg(const char *name
, const int idx
, const struct kvm_segment
&seg
,
381 struct kvm_sregs sregs
)
383 // Check the register base
389 if (!isCanonicalAddress(seg
.base
))
390 warn("Illegal %s base: 0x%x\n", name
, seg
.base
);
399 if (seg
.base
& 0xffffffff00000000ULL
)
400 warn("Illegal %s base: 0x%x\n", name
, seg
.base
);
410 warn("CS type is 3 but dpl != 0.\n");
414 if (seg
.dpl
!= sregs
.ss
.dpl
)
415 warn("CS type is %i but CS DPL != SS DPL\n", seg
.type
);
419 if (seg
.dpl
> sregs
.ss
.dpl
)
420 warn("CS type is %i but CS DPL > SS DPL\n", seg
.type
);
423 warn("Illegal CS type: %i\n", seg
.type
);
433 if (sregs
.cs
.type
== 3 && seg
.dpl
!= 0)
434 warn("CS type is 3, but SS DPL is != 0.\n");
437 if (!(sregs
.cr0
& 1) && seg
.dpl
!= 0)
438 warn("SS DPL is %i, but CR0 PE is 0\n", seg
.dpl
);
441 warn("Illegal SS type: %i\n", seg
.type
);
452 if (!(seg
.type
& 0x1) ||
453 ((seg
.type
& 0x8) && !(seg
.type
& 0x2)))
454 warn("%s has an illegal type field: %i\n", name
, seg
.type
);
458 // TODO: We should check the CPU mode
459 if (seg
.type
!= 3 && seg
.type
!= 11)
460 warn("%s: Illegal segment type (%i)\n", name
, seg
.type
);
467 warn("%s: Illegal segment type (%i)\n", name
, seg
.type
);
481 warn("%s: S flag not set\n", name
);
489 warn("%s: S flag is set\n", name
);
505 warn("%s: P flag not set\n", name
);
507 if (((seg
.limit
& 0xFFF) == 0 && seg
.g
) ||
508 ((seg
.limit
& 0xFFF00000) != 0 && !seg
.g
)) {
509 warn("%s limit (0x%x) and g (%i) combination is illegal.\n",
510 name
, seg
.limit
, seg
.g
);
518 X86KvmCPU::X86KvmCPU(X86KvmCPUParams
*params
)
519 : BaseKvmCPU(params
),
520 useXSave(params
->useXSave
)
524 if (!kvm
.capSetTSSAddress())
525 panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
526 if (!kvm
.capExtendedCPUID())
527 panic("KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
528 if (!kvm
.capUserNMI())
529 warn("KVM: Missing capability (KVM_CAP_USER_NMI)\n");
530 if (!kvm
.capVCPUEvents())
531 warn("KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
533 haveDebugRegs
= kvm
.capDebugRegs();
534 haveXSave
= kvm
.capXSave();
535 haveXCRs
= kvm
.capXCRs();
537 if (useXSave
&& !haveXSave
) {
538 warn("KVM: XSAVE not supported by host. MXCSR synchronization might be "
539 "unreliable due to kernel bugs.\n");
541 } else if (!useXSave
) {
542 warn("KVM: XSave FPU/SIMD synchronization disabled by user.\n");
546 X86KvmCPU::~X86KvmCPU()
553 BaseKvmCPU::startup();
557 // TODO: Do we need to create an identity mapped TSS area? We
558 // should call kvm.vm.setTSSAddress() here in that case. It should
559 // only be needed for old versions of the virtualization
560 // extensions. We should make sure that the identity range is
561 // reserved in the e820 memory map in that case.
565 X86KvmCPU::dump() const
580 X86KvmCPU::dumpFpuRegs() const
588 X86KvmCPU::dumpIntRegs() const
590 struct kvm_regs regs
;
596 X86KvmCPU::dumpSpecRegs() const
598 struct kvm_sregs sregs
;
599 getSpecialRegisters(sregs
);
604 X86KvmCPU::dumpDebugRegs() const
607 #ifdef KVM_GET_DEBUGREGS
608 struct kvm_debugregs dregs
;
609 getDebugRegisters(dregs
);
613 inform("Debug registers not supported by kernel.\n");
618 X86KvmCPU::dumpXCRs() const
621 struct kvm_xcrs xcrs
;
625 inform("XCRs not supported by kernel.\n");
630 X86KvmCPU::dumpXSave() const
633 struct kvm_xsave xsave
;
637 inform("XSave not supported by kernel.\n");
642 X86KvmCPU::dumpVCpuEvents() const
644 struct kvm_vcpu_events events
;
645 getVCpuEvents(events
);
650 X86KvmCPU::dumpMSRs() const
652 const Kvm::MSRIndexVector
&supported_msrs(vm
.kvm
->getSupportedMSRs());
653 std::unique_ptr
<struct kvm_msrs
> msrs(
654 newVarStruct
<struct kvm_msrs
, struct kvm_msr_entry
>(
655 supported_msrs
.size()));
657 msrs
->nmsrs
= supported_msrs
.size();
658 for (int i
= 0; i
< supported_msrs
.size(); ++i
) {
659 struct kvm_msr_entry
&e(msrs
->entries
[i
]);
660 e
.index
= supported_msrs
[i
];
664 getMSRs(*msrs
.get());
666 dumpKvm(*msrs
.get());
670 X86KvmCPU::updateKvmState()
672 updateKvmStateRegs();
673 updateKvmStateSRegs();
675 updateKvmStateMSRs();
677 DPRINTF(KvmContext
, "X86KvmCPU::updateKvmState():\n");
678 if (DTRACE(KvmContext
))
683 X86KvmCPU::updateKvmStateRegs()
685 struct kvm_regs regs
;
687 #define APPLY_IREG(kreg, mreg) regs.kreg = tc->readIntReg(mreg)
691 regs
.rip
= tc
->instAddr() - tc
->readMiscReg(MISCREG_CS_BASE
);
693 /* You might think that setting regs.rflags to the contents
694 * MISCREG_RFLAGS here would suffice. In that case you're
695 * mistaken. We need to reconstruct it from a bunch of ucode
696 * registers and wave a dead chicken over it (aka mask out and set
697 * reserved bits) to get it to work.
699 regs
.rflags
= X86ISA::getRFlags(tc
);
705 setKvmSegmentReg(ThreadContext
*tc
, struct kvm_segment
&kvm_seg
,
708 SegAttr
attr(tc
->readMiscRegNoEffect(MISCREG_SEG_ATTR(index
)));
710 kvm_seg
.base
= tc
->readMiscRegNoEffect(MISCREG_SEG_BASE(index
));
711 kvm_seg
.limit
= tc
->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index
));
712 kvm_seg
.selector
= tc
->readMiscRegNoEffect(MISCREG_SEG_SEL(index
));
713 kvm_seg
.type
= attr
.type
;
714 kvm_seg
.present
= attr
.present
;
715 kvm_seg
.dpl
= attr
.dpl
;
716 kvm_seg
.db
= attr
.defaultSize
;
717 kvm_seg
.s
= attr
.system
;
718 kvm_seg
.l
= attr
.longMode
;
719 kvm_seg
.g
= attr
.granularity
;
720 kvm_seg
.avl
= attr
.avl
;
722 // A segment is normally unusable when the selector is zero. There
723 // is a attr.unusable flag in gem5, but it seems unused. qemu
724 // seems to set this to 0 all the time, so we just do the same and
725 // hope for the best.
726 kvm_seg
.unusable
= 0;
730 setKvmDTableReg(ThreadContext
*tc
, struct kvm_dtable
&kvm_dtable
,
733 kvm_dtable
.base
= tc
->readMiscRegNoEffect(MISCREG_SEG_BASE(index
));
734 kvm_dtable
.limit
= tc
->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index
));
738 forceSegAccessed(struct kvm_segment
&seg
)
740 // Intel's VMX requires that (some) usable segments are flagged as
741 // 'accessed' (i.e., the lowest bit in the segment type is set)
742 // when entering VMX. This wouldn't necessary be the case even if
743 // gem5 did set the access bits correctly, so we force it to one
746 seg
.type
|= SEG_TYPE_BIT_ACCESSED
;
750 X86KvmCPU::updateKvmStateSRegs()
752 struct kvm_sregs sregs
;
754 #define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
755 #define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
756 #define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
766 // Clear the interrupt bitmap
767 memset(&sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
769 // VMX requires CS, SS, DS, ES, FS, and GS to have the accessed
770 // bit in the type field set.
771 forceSegAccessed(sregs
.cs
);
772 forceSegAccessed(sregs
.ss
);
773 forceSegAccessed(sregs
.ds
);
774 forceSegAccessed(sregs
.es
);
775 forceSegAccessed(sregs
.fs
);
776 forceSegAccessed(sregs
.gs
);
778 // There are currently some cases where the active task isn't
779 // marked as busy. This is illegal in VMX, so we force it to busy.
780 if (sregs
.tr
.type
== SEG_SYS_TYPE_TSS_AVAILABLE
) {
781 hack("tr.type (%i) is not busy. Forcing the busy bit.\n",
783 sregs
.tr
.type
= SEG_SYS_TYPE_TSS_BUSY
;
786 // VMX requires the DPL of SS and CS to be the same for
787 // non-conforming code segments. It seems like m5 doesn't set the
788 // DPL of SS correctly when taking interrupts, so we need to fix
790 if ((sregs
.cs
.type
== SEG_CS_TYPE_ACCESSED
||
791 sregs
.cs
.type
== SEG_CS_TYPE_READ_ACCESSED
) &&
792 sregs
.cs
.dpl
!= sregs
.ss
.dpl
) {
794 hack("CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
795 sregs
.cs
.dpl
, sregs
.ss
.dpl
, sregs
.cs
.dpl
);
796 sregs
.ss
.dpl
= sregs
.cs
.dpl
;
799 // Do checks after fixing up the state to avoid getting excessive
800 // amounts of warnings.
801 RFLAGS
rflags_nocc(tc
->readMiscReg(MISCREG_RFLAGS
));
802 if (!rflags_nocc
.vm
) {
803 // Do segment verification if the CPU isn't entering virtual
804 // 8086 mode. We currently assume that unrestricted guest
805 // mode is available.
807 #define APPLY_SEGMENT(kreg, idx) \
808 checkSeg(# kreg, idx + MISCREG_SEG_SEL_BASE, sregs.kreg, sregs)
814 setSpecialRegisters(sregs
);
817 template <typename T
>
819 updateKvmStateFPUCommon(ThreadContext
*tc
, T
&fpu
)
821 static_assert(sizeof(X86ISA::FloatRegBits
) == 8,
822 "Unexpected size of X86ISA::FloatRegBits");
824 fpu
.mxcsr
= tc
->readMiscRegNoEffect(MISCREG_MXCSR
);
825 fpu
.fcw
= tc
->readMiscRegNoEffect(MISCREG_FCW
);
826 // No need to rebuild from MISCREG_FSW and MISCREG_TOP if we read
828 fpu
.fsw
= tc
->readMiscReg(MISCREG_FSW
);
830 uint64_t ftw(tc
->readMiscRegNoEffect(MISCREG_FTW
));
831 fpu
.ftwx
= X86ISA::convX87TagsToXTags(ftw
);
833 fpu
.last_opcode
= tc
->readMiscRegNoEffect(MISCREG_FOP
);
835 const unsigned top((fpu
.fsw
>> 11) & 0x7);
836 for (int i
= 0; i
< 8; ++i
) {
837 const unsigned reg_idx((i
+ top
) & 0x7);
838 const double value(tc
->readFloatReg(FLOATREG_FPR(reg_idx
)));
839 DPRINTF(KvmContext
, "Setting KVM FP reg %i (st[%i]) := %f\n",
841 X86ISA::storeFloat80(fpu
.fpr
[i
], value
);
844 // TODO: We should update the MMX state
846 for (int i
= 0; i
< 16; ++i
) {
847 *(X86ISA::FloatRegBits
*)&fpu
.xmm
[i
][0] =
848 tc
->readFloatRegBits(FLOATREG_XMM_LOW(i
));
849 *(X86ISA::FloatRegBits
*)&fpu
.xmm
[i
][8] =
850 tc
->readFloatRegBits(FLOATREG_XMM_HIGH(i
));
855 X86KvmCPU::updateKvmStateFPULegacy()
859 // There is some padding in the FP registers, so we'd better zero
861 memset(&fpu
, 0, sizeof(fpu
));
863 updateKvmStateFPUCommon(tc
, fpu
);
865 if (tc
->readMiscRegNoEffect(MISCREG_FISEG
))
866 warn_once("MISCREG_FISEG is non-zero.\n");
868 fpu
.last_ip
= tc
->readMiscRegNoEffect(MISCREG_FIOFF
);
870 if (tc
->readMiscRegNoEffect(MISCREG_FOSEG
))
871 warn_once("MISCREG_FOSEG is non-zero.\n");
873 fpu
.last_dp
= tc
->readMiscRegNoEffect(MISCREG_FOOFF
);
879 X86KvmCPU::updateKvmStateFPUXSave()
881 struct kvm_xsave kxsave
;
882 FXSave
&xsave(*(FXSave
*)kxsave
.region
);
884 // There is some padding and reserved fields in the structure, so
885 // we'd better zero the whole thing.
886 memset(&kxsave
, 0, sizeof(kxsave
));
888 updateKvmStateFPUCommon(tc
, xsave
);
890 if (tc
->readMiscRegNoEffect(MISCREG_FISEG
))
891 warn_once("MISCREG_FISEG is non-zero.\n");
893 xsave
.ctrl64
.fpu_ip
= tc
->readMiscRegNoEffect(MISCREG_FIOFF
);
895 if (tc
->readMiscRegNoEffect(MISCREG_FOSEG
))
896 warn_once("MISCREG_FOSEG is non-zero.\n");
898 xsave
.ctrl64
.fpu_dp
= tc
->readMiscRegNoEffect(MISCREG_FOOFF
);
904 X86KvmCPU::updateKvmStateFPU()
907 updateKvmStateFPUXSave();
909 updateKvmStateFPULegacy();
913 X86KvmCPU::updateKvmStateMSRs()
917 const Kvm::MSRIndexVector
&indices(getMsrIntersection());
919 for (auto it
= indices
.cbegin(); it
!= indices
.cend(); ++it
) {
920 struct kvm_msr_entry e
;
924 e
.data
= tc
->readMiscReg(msrMap
.at(*it
));
925 DPRINTF(KvmContext
, "Adding MSR: idx: 0x%x, data: 0x%x\n",
935 X86KvmCPU::updateThreadContext()
937 struct kvm_regs regs
;
938 struct kvm_sregs sregs
;
941 getSpecialRegisters(sregs
);
943 DPRINTF(KvmContext
, "X86KvmCPU::updateThreadContext():\n");
944 if (DTRACE(KvmContext
))
947 updateThreadContextRegs(regs
, sregs
);
948 updateThreadContextSRegs(sregs
);
950 struct kvm_xsave xsave
;
953 updateThreadContextXSave(xsave
);
958 updateThreadContextFPU(fpu
);
960 updateThreadContextMSRs();
962 // The M5 misc reg caches some values from other
963 // registers. Writing to it with side effects causes it to be
964 // updated from its source registers.
965 tc
->setMiscReg(MISCREG_M5_REG
, 0);
969 X86KvmCPU::updateThreadContextRegs(const struct kvm_regs
®s
,
970 const struct kvm_sregs
&sregs
)
972 #define APPLY_IREG(kreg, mreg) tc->setIntReg(mreg, regs.kreg)
978 tc
->pcState(PCState(regs
.rip
+ sregs
.cs
.base
));
980 // Flags are spread out across multiple semi-magic registers so we
981 // need some special care when updating them.
982 X86ISA::setRFlags(tc
, regs
.rflags
);
987 setContextSegment(ThreadContext
*tc
, const struct kvm_segment
&kvm_seg
,
992 attr
.type
= kvm_seg
.type
;
993 attr
.present
= kvm_seg
.present
;
994 attr
.dpl
= kvm_seg
.dpl
;
995 attr
.defaultSize
= kvm_seg
.db
;
996 attr
.system
= kvm_seg
.s
;
997 attr
.longMode
= kvm_seg
.l
;
998 attr
.granularity
= kvm_seg
.g
;
999 attr
.avl
= kvm_seg
.avl
;
1000 attr
.unusable
= kvm_seg
.unusable
;
1002 // We need some setMiscReg magic here to keep the effective base
1003 // addresses in sync. We need an up-to-date version of EFER, so
1004 // make sure this is called after the sregs have been synced.
1005 tc
->setMiscReg(MISCREG_SEG_BASE(index
), kvm_seg
.base
);
1006 tc
->setMiscReg(MISCREG_SEG_LIMIT(index
), kvm_seg
.limit
);
1007 tc
->setMiscReg(MISCREG_SEG_SEL(index
), kvm_seg
.selector
);
1008 tc
->setMiscReg(MISCREG_SEG_ATTR(index
), attr
);
1012 setContextSegment(ThreadContext
*tc
, const struct kvm_dtable
&kvm_dtable
,
1015 // We need some setMiscReg magic here to keep the effective base
1016 // addresses in sync. We need an up-to-date version of EFER, so
1017 // make sure this is called after the sregs have been synced.
1018 tc
->setMiscReg(MISCREG_SEG_BASE(index
), kvm_dtable
.base
);
1019 tc
->setMiscReg(MISCREG_SEG_LIMIT(index
), kvm_dtable
.limit
);
1023 X86KvmCPU::updateThreadContextSRegs(const struct kvm_sregs
&sregs
)
1025 assert(getKvmRunState()->apic_base
== sregs
.apic_base
);
1026 assert(getKvmRunState()->cr8
== sregs
.cr8
);
1028 #define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
1029 #define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1030 #define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1035 #undef APPLY_SEGMENT
1039 template<typename T
>
1041 updateThreadContextFPUCommon(ThreadContext
*tc
, const T
&fpu
)
1043 const unsigned top((fpu
.fsw
>> 11) & 0x7);
1045 static_assert(sizeof(X86ISA::FloatRegBits
) == 8,
1046 "Unexpected size of X86ISA::FloatRegBits");
1048 for (int i
= 0; i
< 8; ++i
) {
1049 const unsigned reg_idx((i
+ top
) & 0x7);
1050 const double value(X86ISA::loadFloat80(fpu
.fpr
[i
]));
1051 DPRINTF(KvmContext
, "Setting gem5 FP reg %i (st[%i]) := %f\n",
1053 tc
->setFloatReg(FLOATREG_FPR(reg_idx
), value
);
1056 // TODO: We should update the MMX state
1058 tc
->setMiscRegNoEffect(MISCREG_X87_TOP
, top
);
1059 tc
->setMiscRegNoEffect(MISCREG_MXCSR
, fpu
.mxcsr
);
1060 tc
->setMiscRegNoEffect(MISCREG_FCW
, fpu
.fcw
);
1061 tc
->setMiscRegNoEffect(MISCREG_FSW
, fpu
.fsw
);
1063 uint64_t ftw(convX87XTagsToTags(fpu
.ftwx
));
1064 // TODO: Are these registers really the same?
1065 tc
->setMiscRegNoEffect(MISCREG_FTW
, ftw
);
1066 tc
->setMiscRegNoEffect(MISCREG_FTAG
, ftw
);
1068 tc
->setMiscRegNoEffect(MISCREG_FOP
, fpu
.last_opcode
);
1070 for (int i
= 0; i
< 16; ++i
) {
1071 tc
->setFloatRegBits(FLOATREG_XMM_LOW(i
),
1072 *(X86ISA::FloatRegBits
*)&fpu
.xmm
[i
][0]);
1073 tc
->setFloatRegBits(FLOATREG_XMM_HIGH(i
),
1074 *(X86ISA::FloatRegBits
*)&fpu
.xmm
[i
][8]);
1079 X86KvmCPU::updateThreadContextFPU(const struct kvm_fpu
&fpu
)
1081 updateThreadContextFPUCommon(tc
, fpu
);
1083 tc
->setMiscRegNoEffect(MISCREG_FISEG
, 0);
1084 tc
->setMiscRegNoEffect(MISCREG_FIOFF
, fpu
.last_ip
);
1085 tc
->setMiscRegNoEffect(MISCREG_FOSEG
, 0);
1086 tc
->setMiscRegNoEffect(MISCREG_FOOFF
, fpu
.last_dp
);
1090 X86KvmCPU::updateThreadContextXSave(const struct kvm_xsave
&kxsave
)
1092 const FXSave
&xsave(*(const FXSave
*)kxsave
.region
);
1094 updateThreadContextFPUCommon(tc
, xsave
);
1096 tc
->setMiscRegNoEffect(MISCREG_FISEG
, 0);
1097 tc
->setMiscRegNoEffect(MISCREG_FIOFF
, xsave
.ctrl64
.fpu_ip
);
1098 tc
->setMiscRegNoEffect(MISCREG_FOSEG
, 0);
1099 tc
->setMiscRegNoEffect(MISCREG_FOOFF
, xsave
.ctrl64
.fpu_dp
);
1103 X86KvmCPU::updateThreadContextMSRs()
1105 const Kvm::MSRIndexVector
&msrs(getMsrIntersection());
1107 std::unique_ptr
<struct kvm_msrs
> kvm_msrs(
1108 newVarStruct
<struct kvm_msrs
, struct kvm_msr_entry
>(msrs
.size()));
1109 struct kvm_msr_entry
*entry
;
1111 // Create a list of MSRs to read
1112 kvm_msrs
->nmsrs
= msrs
.size();
1113 entry
= &kvm_msrs
->entries
[0];
1114 for (auto it
= msrs
.cbegin(); it
!= msrs
.cend(); ++it
, ++entry
) {
1116 entry
->reserved
= 0;
1120 getMSRs(*kvm_msrs
.get());
1122 // Update M5's state
1123 entry
= &kvm_msrs
->entries
[0];
1124 for (int i
= 0; i
< kvm_msrs
->nmsrs
; ++i
, ++entry
) {
1125 DPRINTF(KvmContext
, "Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
1126 entry
->index
, entry
->data
);
1128 tc
->setMiscReg(X86ISA::msrMap
.at(entry
->index
), entry
->data
);
1133 X86KvmCPU::deliverInterrupts()
1137 syncThreadContext();
1140 // Migrate to the interrupt controller's thread to get the
1141 // interrupt. Even though the individual methods are safe to
1142 // call across threads, we might still lose interrupts unless
1143 // they are getInterrupt() and updateIntrInfo() are called
1145 EventQueue::ScopedMigration
migrate(interrupts
[0]->eventQueue());
1146 fault
= interrupts
[0]->getInterrupt(tc
);
1147 interrupts
[0]->updateIntrInfo(tc
);
1150 X86Interrupt
*x86int(dynamic_cast<X86Interrupt
*>(fault
.get()));
1151 if (dynamic_cast<NonMaskableInterrupt
*>(fault
.get())) {
1152 DPRINTF(KvmInt
, "Delivering NMI\n");
1153 kvmNonMaskableInterrupt();
1154 } else if (dynamic_cast<InitInterrupt
*>(fault
.get())) {
1155 DPRINTF(KvmInt
, "INIT interrupt\n");
1156 fault
.get()->invoke(tc
);
1157 // Delay the kvm state update since we won't enter KVM on this
1159 threadContextDirty
= true;
1160 // HACK: gem5 doesn't actually have any BIOS code, which means
1161 // that we need to halt the thread and wait for a startup
1162 // interrupt before restarting the thread. The simulated CPUs
1163 // use the same kind of hack using a microcode routine.
1165 } else if (dynamic_cast<StartupInterrupt
*>(fault
.get())) {
1166 DPRINTF(KvmInt
, "STARTUP interrupt\n");
1167 fault
.get()->invoke(tc
);
1168 // The kvm state is assumed to have been updated when entering
1169 // kvmRun(), so we need to update manually it here.
1171 } else if (x86int
) {
1172 struct kvm_interrupt kvm_int
;
1173 kvm_int
.irq
= x86int
->getVector();
1175 DPRINTF(KvmInt
, "Delivering interrupt: %s (%u)\n",
1176 fault
->name(), kvm_int
.irq
);
1178 kvmInterrupt(kvm_int
);
1180 panic("KVM: Unknown interrupt type\n");
1186 X86KvmCPU::kvmRun(Tick ticks
)
1188 struct kvm_run
&kvm_run(*getKvmRunState());
1190 if (interrupts
[0]->checkInterruptsRaw()) {
1191 if (interrupts
[0]->hasPendingUnmaskable()) {
1193 "Delivering unmaskable interrupt.\n");
1194 syncThreadContext();
1195 deliverInterrupts();
1196 } else if (kvm_run
.ready_for_interrupt_injection
) {
1197 // KVM claims that it is ready for an interrupt. It might
1198 // be lying if we just updated rflags and disabled
1199 // interrupts (e.g., by doing a CPU handover). Let's sync
1200 // the thread context and check if there are /really/
1201 // interrupts that should be delivered now.
1202 syncThreadContext();
1203 if (interrupts
[0]->checkInterrupts(tc
)) {
1205 "M5 has pending interrupts, delivering interrupt.\n");
1207 deliverInterrupts();
1210 "Interrupt delivery delayed due to KVM confusion.\n");
1211 kvm_run
.request_interrupt_window
= 1;
1213 } else if (!kvm_run
.request_interrupt_window
) {
1215 "M5 has pending interrupts, requesting interrupt "
1217 kvm_run
.request_interrupt_window
= 1;
1220 kvm_run
.request_interrupt_window
= 0;
1223 // The CPU might have been suspended as a result of the INIT
1224 // interrupt delivery hack. In that case, don't enter into KVM.
1225 if (_status
== Idle
)
1228 return kvmRunWrapper(ticks
);
1232 X86KvmCPU::kvmRunDrain()
1234 struct kvm_run
&kvm_run(*getKvmRunState());
1236 if (!archIsDrained()) {
1237 DPRINTF(Drain
, "kvmRunDrain: Architecture code isn't drained\n");
1239 // Tell KVM to find a suitable place to deliver interrupts. This
1240 // should ensure that pending interrupts have been delivered and
1241 // things are reasonably consistent (i.e., no interrupts pending
1243 kvm_run
.request_interrupt_window
= 1;
1245 // Limit the run to 1 millisecond. That is hopefully enough to
1246 // reach an interrupt window. Otherwise, we'll just try again
1248 return kvmRunWrapper(1 * SimClock::Float::ms
);
1250 DPRINTF(Drain
, "kvmRunDrain: Delivering pending IO\n");
1252 return kvmRunWrapper(0);
1257 X86KvmCPU::kvmRunWrapper(Tick ticks
)
1259 struct kvm_run
&kvm_run(*getKvmRunState());
1261 // Synchronize the APIC base and CR8 here since they are present
1262 // in the kvm_run struct, which makes the synchronization really
1264 kvm_run
.apic_base
= tc
->readMiscReg(MISCREG_APIC_BASE
);
1265 kvm_run
.cr8
= tc
->readMiscReg(MISCREG_CR8
);
1267 const Tick
run_ticks(BaseKvmCPU::kvmRun(ticks
));
1269 tc
->setMiscReg(MISCREG_APIC_BASE
, kvm_run
.apic_base
);
1270 kvm_run
.cr8
= tc
->readMiscReg(MISCREG_CR8
);
1276 X86KvmCPU::getHostCycles() const
1278 return getMSR(MSR_TSC
);
1282 X86KvmCPU::handleIOMiscReg32(int miscreg
)
1284 struct kvm_run
&kvm_run(*getKvmRunState());
1285 const uint16_t port(kvm_run
.io
.port
);
1287 assert(kvm_run
.exit_reason
== KVM_EXIT_IO
);
1289 if (kvm_run
.io
.size
!= 4) {
1290 panic("Unexpected IO size (%u) for address 0x%x.\n",
1291 kvm_run
.io
.size
, port
);
1294 if (kvm_run
.io
.count
!= 1) {
1295 panic("Unexpected IO count (%u) for address 0x%x.\n",
1296 kvm_run
.io
.count
, port
);
1299 uint32_t *data((uint32_t *)getGuestData(kvm_run
.io
.data_offset
));
1300 if (kvm_run
.io
.direction
== KVM_EXIT_IO_OUT
)
1301 tc
->setMiscReg(miscreg
, *data
);
1303 *data
= tc
->readMiscRegNoEffect(miscreg
);
1307 X86KvmCPU::handleKvmExitIO()
1309 struct kvm_run
&kvm_run(*getKvmRunState());
1310 bool isWrite(kvm_run
.io
.direction
== KVM_EXIT_IO_OUT
);
1311 unsigned char *guestData(getGuestData(kvm_run
.io
.data_offset
));
1313 uint16_t port(kvm_run
.io
.port
);
1315 const int count(kvm_run
.io
.count
);
1317 assert(kvm_run
.io
.direction
== KVM_EXIT_IO_IN
||
1318 kvm_run
.io
.direction
== KVM_EXIT_IO_OUT
);
1320 DPRINTF(KvmIO
, "KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1321 (isWrite
? "out" : "in"), kvm_run
.io
.port
);
1323 /* Vanilla gem5 handles PCI discovery in the TLB(!). Since we
1324 * don't use the TLB component, we need to intercept and handle
1325 * the PCI configuration space IO ports here.
1327 * The IO port PCI discovery mechanism uses one address register
1328 * and one data register. We map the address register to a misc
1329 * reg and use that to re-route data register accesses to the
1330 * right location in the PCI configuration space.
1332 if (port
== IO_PCI_CONF_ADDR
) {
1333 handleIOMiscReg32(MISCREG_PCI_CONFIG_ADDRESS
);
1335 } else if ((port
& ~0x3) == IO_PCI_CONF_DATA_BASE
) {
1336 Addr
pciConfigAddr(tc
->readMiscRegNoEffect(MISCREG_PCI_CONFIG_ADDRESS
));
1337 if (pciConfigAddr
& 0x80000000) {
1338 pAddr
= X86ISA::x86PciConfigAddress((pciConfigAddr
& 0x7ffffffc) |
1341 pAddr
= X86ISA::x86IOAddress(port
);
1344 pAddr
= X86ISA::x86IOAddress(port
);
1347 Request
io_req(pAddr
, kvm_run
.io
.size
, Request::UNCACHEABLE
,
1349 io_req
.setThreadContext(tc
->contextId(), 0);
1351 const MemCmd
cmd(isWrite
? MemCmd::WriteReq
: MemCmd::ReadReq
);
1352 // Temporarily lock and migrate to the event queue of the
1353 // VM. This queue is assumed to "own" all devices we need to
1354 // access if running in multi-core mode.
1355 EventQueue::ScopedMigration
migrate(vm
.eventQueue());
1356 for (int i
= 0; i
< count
; ++i
) {
1357 Packet
pkt(&io_req
, cmd
);
1359 pkt
.dataStatic(guestData
);
1360 delay
+= dataPort
.sendAtomic(&pkt
);
1362 guestData
+= kvm_run
.io
.size
;
1369 X86KvmCPU::handleKvmExitIRQWindowOpen()
1371 // We don't need to do anything here since this is caught the next
1372 // time we execute kvmRun(). We still overload the exit event to
1373 // silence the warning about an unhandled exit event.
1378 X86KvmCPU::archIsDrained() const
1380 struct kvm_vcpu_events events
;
1382 getVCpuEvents(events
);
1384 // We could probably handle this in a by re-inserting interrupts
1385 // that are pending into gem5 on a drain. However, that would
1386 // probably be tricky to do reliably, so we'll just prevent a
1387 // drain if there is anything pending in the
1388 // guest. X86KvmCPU::kvmRunDrain() minimizes the amount of code
1389 // executed in the guest by requesting an interrupt window if
1390 // there are pending interrupts.
1391 const bool pending_events(events
.exception
.injected
||
1392 events
.interrupt
.injected
||
1393 events
.nmi
.injected
|| events
.nmi
.pending
);
1395 if (pending_events
) {
1396 DPRINTF(Drain
, "archIsDrained: Pending events: %s %s %s %s\n",
1397 events
.exception
.injected
? "exception" : "",
1398 events
.interrupt
.injected
? "interrupt" : "",
1399 events
.nmi
.injected
? "nmi[i]" : "",
1400 events
.nmi
.pending
? "nmi[p]" : "");
1403 return !pending_events
;
1406 static struct kvm_cpuid_entry2
1407 makeKvmCpuid(uint32_t function
, uint32_t index
,
1408 CpuidResult
&result
)
1410 struct kvm_cpuid_entry2 e
;
1411 e
.function
= function
;
1414 e
.eax
= (uint32_t)result
.rax
;
1415 e
.ebx
= (uint32_t)result
.rbx
;
1416 e
.ecx
= (uint32_t)result
.rcx
;
1417 e
.edx
= (uint32_t)result
.rdx
;
1423 X86KvmCPU::updateCPUID()
1425 Kvm::CPUIDVector m5_supported
;
1427 /* TODO: We currently don't support any of the functions that
1428 * iterate through data structures in the CPU using an index. It's
1429 * currently not a problem since M5 doesn't expose any of them at
1433 /* Basic features */
1435 X86ISA::doCpuid(tc
, 0x0, 0, func0
);
1436 for (uint32_t function
= 0; function
<= func0
.rax
; ++function
) {
1440 X86ISA::doCpuid(tc
, function
, idx
, cpuid
);
1441 m5_supported
.push_back(makeKvmCpuid(function
, idx
, cpuid
));
1444 /* Extended features */
1446 X86ISA::doCpuid(tc
, 0x80000000, 0, efunc0
);
1447 for (uint32_t function
= 0x80000000; function
<= efunc0
.rax
; ++function
) {
1451 X86ISA::doCpuid(tc
, function
, idx
, cpuid
);
1452 m5_supported
.push_back(makeKvmCpuid(function
, idx
, cpuid
));
1455 setCPUID(m5_supported
);
1459 X86KvmCPU::setCPUID(const struct kvm_cpuid2
&cpuid
)
1461 if (ioctl(KVM_SET_CPUID2
, (void *)&cpuid
) == -1)
1462 panic("KVM: Failed to set guest CPUID2 (errno: %i)\n",
1467 X86KvmCPU::setCPUID(const Kvm::CPUIDVector
&cpuid
)
1469 std::unique_ptr
<struct kvm_cpuid2
> kvm_cpuid(
1470 newVarStruct
<struct kvm_cpuid2
, struct kvm_cpuid_entry2
>(cpuid
.size()));
1472 kvm_cpuid
->nent
= cpuid
.size();
1473 std::copy(cpuid
.begin(), cpuid
.end(), kvm_cpuid
->entries
);
1475 setCPUID(*kvm_cpuid
);
1479 X86KvmCPU::setMSRs(const struct kvm_msrs
&msrs
)
1481 if (ioctl(KVM_SET_MSRS
, (void *)&msrs
) == -1)
1482 panic("KVM: Failed to set guest MSRs (errno: %i)\n",
1487 X86KvmCPU::setMSRs(const KvmMSRVector
&msrs
)
1489 std::unique_ptr
<struct kvm_msrs
> kvm_msrs(
1490 newVarStruct
<struct kvm_msrs
, struct kvm_msr_entry
>(msrs
.size()));
1492 kvm_msrs
->nmsrs
= msrs
.size();
1493 std::copy(msrs
.begin(), msrs
.end(), kvm_msrs
->entries
);
1499 X86KvmCPU::getMSRs(struct kvm_msrs
&msrs
) const
1501 if (ioctl(KVM_GET_MSRS
, (void *)&msrs
) == -1)
1502 panic("KVM: Failed to get guest MSRs (errno: %i)\n",
1508 X86KvmCPU::setMSR(uint32_t index
, uint64_t value
)
1510 std::unique_ptr
<struct kvm_msrs
> kvm_msrs(
1511 newVarStruct
<struct kvm_msrs
, struct kvm_msr_entry
>(1));
1512 struct kvm_msr_entry
&entry(kvm_msrs
->entries
[0]);
1514 kvm_msrs
->nmsrs
= 1;
1515 entry
.index
= index
;
1519 setMSRs(*kvm_msrs
.get());
1523 X86KvmCPU::getMSR(uint32_t index
) const
1525 std::unique_ptr
<struct kvm_msrs
> kvm_msrs(
1526 newVarStruct
<struct kvm_msrs
, struct kvm_msr_entry
>(1));
1527 struct kvm_msr_entry
&entry(kvm_msrs
->entries
[0]);
1529 kvm_msrs
->nmsrs
= 1;
1530 entry
.index
= index
;
1534 getMSRs(*kvm_msrs
.get());
1538 const Kvm::MSRIndexVector
&
1539 X86KvmCPU::getMsrIntersection() const
1541 if (cachedMsrIntersection
.empty()) {
1542 const Kvm::MSRIndexVector
&kvm_msrs(vm
.kvm
->getSupportedMSRs());
1544 DPRINTF(Kvm
, "kvm-x86: Updating MSR intersection\n");
1545 for (auto it
= kvm_msrs
.cbegin(); it
!= kvm_msrs
.cend(); ++it
) {
1546 if (X86ISA::msrMap
.find(*it
) != X86ISA::msrMap
.end()) {
1547 cachedMsrIntersection
.push_back(*it
);
1548 DPRINTF(Kvm
, "kvm-x86: Adding MSR 0x%x\n", *it
);
1550 warn("kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1556 return cachedMsrIntersection
;
1560 X86KvmCPU::getDebugRegisters(struct kvm_debugregs
®s
) const
1562 #ifdef KVM_GET_DEBUGREGS
1563 if (ioctl(KVM_GET_DEBUGREGS
, ®s
) == -1)
1564 panic("KVM: Failed to get guest debug registers\n");
1566 panic("KVM: Unsupported getDebugRegisters call.\n");
1571 X86KvmCPU::setDebugRegisters(const struct kvm_debugregs
®s
)
1573 #ifdef KVM_SET_DEBUGREGS
1574 if (ioctl(KVM_SET_DEBUGREGS
, (void *)®s
) == -1)
1575 panic("KVM: Failed to set guest debug registers\n");
1577 panic("KVM: Unsupported setDebugRegisters call.\n");
1582 X86KvmCPU::getXCRs(struct kvm_xcrs
®s
) const
1584 if (ioctl(KVM_GET_XCRS
, ®s
) == -1)
1585 panic("KVM: Failed to get guest debug registers\n");
1589 X86KvmCPU::setXCRs(const struct kvm_xcrs
®s
)
1591 if (ioctl(KVM_SET_XCRS
, (void *)®s
) == -1)
1592 panic("KVM: Failed to set guest debug registers\n");
1596 X86KvmCPU::getXSave(struct kvm_xsave
&xsave
) const
1598 if (ioctl(KVM_GET_XSAVE
, &xsave
) == -1)
1599 panic("KVM: Failed to get guest debug registers\n");
1603 X86KvmCPU::setXSave(const struct kvm_xsave
&xsave
)
1605 if (ioctl(KVM_SET_XSAVE
, (void *)&xsave
) == -1)
1606 panic("KVM: Failed to set guest debug registers\n");
1611 X86KvmCPU::getVCpuEvents(struct kvm_vcpu_events
&events
) const
1613 if (ioctl(KVM_GET_VCPU_EVENTS
, &events
) == -1)
1614 panic("KVM: Failed to get guest debug registers\n");
1618 X86KvmCPU::setVCpuEvents(const struct kvm_vcpu_events
&events
)
1620 if (ioctl(KVM_SET_VCPU_EVENTS
, (void *)&events
) == -1)
1621 panic("KVM: Failed to set guest debug registers\n");
1625 X86KvmCPUParams::create()
1627 return new X86KvmCPU(this);