--- /dev/null
+# -*- mode:python -*-
+
+# Copyright (c) 2014 ARM Limited
+# All rights reserved.
+#
+# The license below extends only to copyright in the software and shall
+# not be construed as granting a license to any other intellectual
+# property including but not limited to intellectual property relating
+# to a hardware implementation of the functionality of the software
+# licensed hereunder. You may use the software subject to the license
+# terms below provided that you ensure that this notice is replicated
+# unmodified and in its entirety in all distributions of the software,
+# modified or unmodified, in source code or in binary form.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Andreas Sandberg
+
+Import('*')
+
+if env['TARGET_ISA'] == 'null':
+ Return()
+
+SimObject('VirtIO.py')
+
+Source('base.cc')
+Source('pci.cc')
+
+DebugFlag('VIO', 'VirtIO base functionality')
+DebugFlag('VIOPci', 'VirtIO PCI transport')
--- /dev/null
+# -*- mode:python -*-
+
+# Copyright (c) 2014 ARM Limited
+# All rights reserved.
+#
+# The license below extends only to copyright in the software and shall
+# not be construed as granting a license to any other intellectual
+# property including but not limited to intellectual property relating
+# to a hardware implementation of the functionality of the software
+# licensed hereunder. You may use the software subject to the license
+# terms below provided that you ensure that this notice is replicated
+# unmodified and in its entirety in all distributions of the software,
+# modified or unmodified, in source code or in binary form.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Andreas Sandberg
+
+from m5.SimObject import SimObject
+from m5.params import *
+from m5.proxy import *
+from Device import PioDevice
+from Pci import PciDevice
+
+
+class VirtIODeviceBase(SimObject):
+ type = 'VirtIODeviceBase'
+ cxx_header = 'dev/virtio/base.hh'
+ abstract = True
+
+ subsystem = Param.UInt8(0x00, "VirtIO subsystem ID")
+
+ system = Param.System(Parent.any, "system object")
+
+class PciVirtIO(PciDevice):
+ type = 'PciVirtIO'
+ cxx_header = 'dev/virtio/pci.hh'
+
+ vio = Param.VirtIODeviceBase("VirtIO device")
+
+ VendorID = 0x1AF4
+ SubsystemVendorID = VendorID;
+ DeviceID = 0x1000
+
+ ClassCode = 0xff # Misc device
+
+ BAR0 = 0x00000000 # Anywhere in 32-bit space
+ BAR0Size = '0B' # Overridden by the device model
+
+ InterruptPin = 0x01 # Use #INTA
--- /dev/null
+/*
+ * Copyright (c) 2014 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Andreas Sandberg
+ */
+
+#include "debug/VIO.hh"
+#include "dev/virtio/base.hh"
+#include "params/VirtIODeviceBase.hh"
+
+VirtDescriptor::VirtDescriptor(PortProxy &_memProxy, VirtQueue &_queue,
+ Index descIndex)
+ : memProxy(&_memProxy), queue(&_queue), _index(descIndex)
+{
+}
+
+VirtDescriptor::VirtDescriptor(VirtDescriptor &&other) noexcept
+{
+ *this = std::forward<VirtDescriptor>(other);
+}
+
+VirtDescriptor::~VirtDescriptor() noexcept
+{
+}
+
+VirtDescriptor &
+VirtDescriptor::operator=(VirtDescriptor &&rhs) noexcept
+{
+ memProxy = std::move(rhs.memProxy);
+ queue = std::move(rhs.queue);
+ _index = std::move(rhs._index);
+ desc = std::move(rhs.desc);
+
+ return *this;
+}
+
+void
+VirtDescriptor::update()
+{
+ const Addr vq_addr(queue->getAddress());
+ // Check if the queue has been initialized yet
+ if (vq_addr == 0)
+ return;
+
+ assert(_index < queue->getSize());
+ const Addr desc_addr(vq_addr + sizeof(desc) * _index);
+ vring_desc guest_desc;
+ memProxy->readBlob(desc_addr, (uint8_t *)&guest_desc, sizeof(guest_desc));
+ desc = vtoh_legacy(guest_desc);
+ DPRINTF(VIO,
+ "VirtDescriptor(%i): Addr: 0x%x, Len: %i, Flags: 0x%x, "
+ "Next: 0x%x\n",
+ _index, desc.addr, desc.len, desc.flags, desc.next);
+}
+
+void
+VirtDescriptor::updateChain()
+{
+ VirtDescriptor *desc(this);
+ do {
+ desc->update();
+ } while((desc = desc->next()) != NULL && desc != this);
+
+ if (desc == this)
+ panic("Loop in descriptor chain!\n");
+}
+
+void
+VirtDescriptor::dump() const
+{
+ if (!DTRACE(VIO))
+ return;
+
+ DPRINTF(VIO, "Descriptor[%i]: "
+ "Addr: 0x%x, Len: %i, Flags: 0x%x, Next: 0x%x\n",
+ _index, desc.addr, desc.len, desc.flags, desc.next);
+
+ if (isIncoming()) {
+ uint8_t data[desc.len];
+ read(0, data, desc.len);
+ DDUMP(VIO, data, desc.len);
+ }
+}
+
+void
+VirtDescriptor::dumpChain() const
+{
+ if (!DTRACE(VIO))
+ return;
+
+ const VirtDescriptor *desc(this);
+ do {
+ desc->dump();
+ } while((desc = desc->next()) != NULL);
+}
+
+VirtDescriptor *
+VirtDescriptor::next() const
+{
+ if (hasNext()) {
+ return queue->getDescriptor(desc.next);
+ } else {
+ return NULL;
+ }
+}
+
+void
+VirtDescriptor::read(size_t offset, uint8_t *dst, size_t size) const
+{
+ DPRINTF(VIO, "VirtDescriptor(%p, 0x%x, %i)::read: offset: %i, dst: 0x%x, size: %i\n",
+ this, desc.addr, desc.len, offset, (long)dst, size);
+ assert(size <= desc.len - offset);
+ if (!isIncoming())
+ panic("Trying to read from outgoing buffer\n");
+
+ memProxy->readBlob(desc.addr + offset, dst, size);
+}
+
+void
+VirtDescriptor::write(size_t offset, const uint8_t *src, size_t size)
+{
+ DPRINTF(VIO, "VirtDescriptor(%p, 0x%x, %i)::write: offset: %i, src: 0x%x, size: %i\n",
+ this, desc.addr, desc.len, offset, (long)src, size);
+ assert(size <= desc.len - offset);
+ if (!isOutgoing())
+ panic("Trying to write to incoming buffer\n");
+
+ memProxy->writeBlob(desc.addr + offset, const_cast<uint8_t *>(src), size);
+}
+
+void
+VirtDescriptor::chainRead(size_t offset, uint8_t *dst, size_t size) const
+{
+ const VirtDescriptor *desc(this);
+ const size_t full_size(size);
+ do {
+ if (offset < desc->size()) {
+ const size_t chunk_size(std::min(desc->size() - offset, size));
+ desc->read(offset, dst, chunk_size);
+ dst += chunk_size;
+ size -= chunk_size;
+ offset = 0;
+ } else {
+ offset -= desc->size();
+ }
+ } while((desc = desc->next()) != NULL && desc->isIncoming() && size > 0);
+
+ if (size != 0) {
+ panic("Failed to read %i bytes from chain of %i bytes @ offset %i\n",
+ full_size, chainSize(), offset);
+ }
+}
+
+void
+VirtDescriptor::chainWrite(size_t offset, const uint8_t *src, size_t size)
+{
+ VirtDescriptor *desc(this);
+ const size_t full_size(size);
+ do {
+ if (offset < desc->size()) {
+ const size_t chunk_size(std::min(desc->size() - offset, size));
+ desc->write(offset, src, chunk_size);
+ src += chunk_size;
+ size -= chunk_size;
+ offset = 0;
+ } else {
+ offset -= desc->size();
+ }
+ } while((desc = desc->next()) != NULL && size > 0);
+
+ if (size != 0) {
+ panic("Failed to write %i bytes into chain of %i bytes @ offset %i\n",
+ full_size, chainSize(), offset);
+ }
+}
+
+size_t
+VirtDescriptor::chainSize() const
+{
+ size_t size(0);
+ const VirtDescriptor *desc(this);
+ do {
+ size += desc->size();
+ } while((desc = desc->next()) != NULL);
+
+ return size;
+}
+
+
+
+VirtQueue::VirtQueue(PortProxy &proxy, uint16_t size)
+ : _size(size), _address(0), memProxy(proxy),
+ avail(proxy, size), used(proxy, size),
+ _last_avail(0)
+{
+ descriptors.reserve(_size);
+ for (int i = 0; i < _size; ++i)
+ descriptors.emplace_back(proxy, *this, i);
+}
+
+void
+VirtQueue::serialize(std::ostream &os)
+{
+ SERIALIZE_SCALAR(_address);
+ SERIALIZE_SCALAR(_last_avail);
+}
+
+void
+VirtQueue::unserialize(Checkpoint *cp, const std::string §ion)
+{
+ Addr addr_in;
+
+ paramIn(cp, section, "_address", addr_in);
+ UNSERIALIZE_SCALAR(_last_avail);
+
+ // Use the address setter to ensure that the ring buffer addresses
+ // are updated as well.
+ setAddress(addr_in);
+}
+
+void
+VirtQueue::setAddress(Addr address)
+{
+ const Addr addr_avail(address + _size * sizeof(struct vring_desc));
+ const Addr addr_avail_end(addr_avail + sizeof(struct vring_avail) +
+ _size * sizeof(uint16_t));
+ const Addr addr_used((addr_avail_end + sizeof(uint16_t) +
+ (ALIGN_SIZE - 1)) & ~(ALIGN_SIZE - 1));
+ _address = address;
+ avail.setAddress(addr_avail);
+ used.setAddress(addr_used);
+}
+
+VirtDescriptor *
+VirtQueue::consumeDescriptor()
+{
+ avail.read();
+ DPRINTF(VIO, "consumeDescriptor: _last_avail: %i, avail.idx: %i (->%i)\n",
+ _last_avail, avail.header.index,
+ avail.ring[_last_avail % used.ring.size()]);
+ if (_last_avail == avail.header.index)
+ return NULL;
+
+ VirtDescriptor::Index index(avail.ring[_last_avail % used.ring.size()]);
+ ++_last_avail;
+
+ VirtDescriptor *d(&descriptors[index]);
+ d->updateChain();
+
+ return d;
+}
+
+void
+VirtQueue::produceDescriptor(VirtDescriptor *desc, uint32_t len)
+{
+ used.readHeader();
+ DPRINTF(VIO, "produceDescriptor: dscIdx: %i, len: %i, used.idx: %i\n",
+ desc->index(), len, used.header.index);
+
+ struct vring_used_elem &e(used.ring[used.header.index % used.ring.size()]);
+ e.id = desc->index();
+ e.len = len;
+ used.header.index += 1;
+ used.write();
+}
+
+void
+VirtQueue::dump() const
+{
+ if (!DTRACE(VIO))
+ return;
+
+ for (const VirtDescriptor &d : descriptors)
+ d.dump();
+}
+
+void
+VirtQueue::onNotify()
+{
+ DPRINTF(VIO, "onNotify\n");
+
+ // Consume all pending descriptors from the input queue.
+ VirtDescriptor *d;
+ while((d = consumeDescriptor()) != NULL)
+ onNotifyDescriptor(d);
+}
+
+
+VirtIODeviceBase::VirtIODeviceBase(Params *params, DeviceId id,
+ size_t config_size, FeatureBits features)
+ : SimObject(params),
+ guestFeatures(0),
+ deviceId(id), configSize(config_size), deviceFeatures(features),
+ _deviceStatus(0), _queueSelect(0),
+ transKick(NULL)
+{
+}
+
+
+VirtIODeviceBase::~VirtIODeviceBase()
+{
+}
+
+void
+VirtIODeviceBase::serialize(std::ostream &os)
+{
+ SERIALIZE_SCALAR(guestFeatures);
+ paramOut(os, "_deviceStatus", (uint8_t)_deviceStatus);
+ SERIALIZE_SCALAR(_queueSelect);
+ for (QueueID i = 0; i < _queues.size(); ++i) {
+ nameOut(os, csprintf("%s._queues.%i", name(), i));
+ _queues[i]->serialize(os);
+ }
+}
+
+void
+VirtIODeviceBase::unserialize(Checkpoint *cp, const std::string §ion)
+{
+ UNSERIALIZE_SCALAR(guestFeatures);
+ uint8_t status;
+ paramIn(cp, section, "_deviceStatus", status);
+ _deviceStatus = status;
+ UNSERIALIZE_SCALAR(_queueSelect);
+ for (QueueID i = 0; i < _queues.size(); ++i)
+ _queues[i]->unserialize(cp, csprintf("%s._queues.%i", section, i));
+}
+
+void
+VirtIODeviceBase::reset()
+{
+ _queueSelect = 0;
+ guestFeatures = 0;
+ _deviceStatus = 0;
+
+ for (QueueID i = 0; i < _queues.size(); ++i)
+ _queues[i]->setAddress(0);
+}
+
+void
+VirtIODeviceBase::onNotify(QueueID idx)
+{
+ DPRINTF(VIO, "onNotify: idx: %i\n", idx);
+ if (idx >= _queues.size()) {
+ panic("Guest tried to notify queue (%i), but only %i "
+ "queues registered.\n",
+ idx, _queues.size());
+ }
+ _queues[idx]->onNotify();
+}
+
+void
+VirtIODeviceBase::setGuestFeatures(FeatureBits features)
+{
+ DPRINTF(VIO, "Setting guest features: 0x%x\n", features);
+ if (~deviceFeatures & features) {
+ panic("Guest tried to enable unsupported features:\n"
+ "Device features: 0x%x\n"
+ "Requested features: 0x%x\n",
+ deviceFeatures, features);
+ }
+ guestFeatures = features;
+}
+
+
+void
+VirtIODeviceBase::setDeviceStatus(DeviceStatus status)
+{
+ _deviceStatus = status;
+ DPRINTF(VIO, "ACK: %i, DRIVER: %i, DRIVER_OK: %i, FAILED: %i\n",
+ status.acknowledge, status.driver, status.driver_ok, status.failed);
+ if (status == 0)
+ reset();
+}
+
+void
+VirtIODeviceBase::readConfig(PacketPtr pkt, Addr cfgOffset)
+{
+ panic("Unhandled device config read (offset: 0x%x).\n", cfgOffset);
+}
+
+void
+VirtIODeviceBase::writeConfig(PacketPtr pkt, Addr cfgOffset)
+{
+ panic("Unhandled device config write (offset: 0x%x).\n", cfgOffset);
+}
+
+void
+VirtIODeviceBase::readConfigBlob(PacketPtr pkt, Addr cfgOffset, const uint8_t *cfg)
+{
+ const unsigned size(pkt->getSize());
+ pkt->allocate();
+
+ if (cfgOffset + size > configSize)
+ panic("Config read out of bounds.\n");
+
+ pkt->setData(const_cast<uint8_t *>(cfg) + cfgOffset);
+}
+
+void
+VirtIODeviceBase::writeConfigBlob(PacketPtr pkt, Addr cfgOffset, uint8_t *cfg)
+{
+ const unsigned size(pkt->getSize());
+ pkt->allocate();
+
+ if (cfgOffset + size > configSize)
+ panic("Config write out of bounds.\n");
+
+ pkt->writeData((uint8_t *)cfg + cfgOffset);
+}
+
+
+const VirtQueue &
+VirtIODeviceBase::getCurrentQueue() const
+{
+ if (_queueSelect >= _queues.size())
+ panic("Guest tried to access non-existing VirtQueue (%i).\n", _queueSelect);
+
+ return *_queues[_queueSelect];
+}
+
+VirtQueue &
+VirtIODeviceBase::getCurrentQueue()
+{
+ if (_queueSelect >= _queues.size())
+ panic("Guest tried to access non-existing VirtQueue (%i).\n", _queueSelect);
+
+ return *_queues[_queueSelect];
+}
+
+void
+VirtIODeviceBase::setQueueAddress(uint32_t address)
+{
+ getCurrentQueue().setAddress(address * VirtQueue::ALIGN_SIZE);
+}
+
+uint32_t
+VirtIODeviceBase::getQueueAddress() const
+{
+ Addr address(getCurrentQueue().getAddress());
+ assert(!(address & ((1 >> VirtQueue::ALIGN_BITS) - 1)));
+ return address >> VirtQueue::ALIGN_BITS;
+}
+
+void
+VirtIODeviceBase::registerQueue(VirtQueue &queue)
+{
+ _queues.push_back(&queue);
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Andreas Sandberg
+ */
+
+#ifndef __DEV_VIRTIO_BASE_HH__
+#define __DEV_VIRTIO_BASE_HH__
+
+#include "arch/isa_traits.hh"
+#include "base/bitunion.hh"
+#include "base/callback.hh"
+#include "dev/virtio/virtio_ring.h"
+#include "mem/port_proxy.hh"
+#include "sim/sim_object.hh"
+
+struct VirtIODeviceBaseParams;
+class VirtQueue;
+
+/** @{
+ * @name VirtIO endian conversion helpers
+ *
+ * VirtIO prior to version 1.0 (legacy versions) normally send values
+ * to the host in the guest systems native byte order. This is going
+ * to change in version 1.0 which mandates little endian. We currently
+ * only support the legacy version of VirtIO (the new and shiny
+ * standard is still in a draft state and not implemented by the
+ * kernel). Once we support the new standard, we should negotiate the
+ * VirtIO version with the guest and automatically use the right type
+ * of byte swapping.
+ */
+
+/** Convert legacy VirtIO endianness to host endianness. */
+template <typename T> inline T
+vtoh_legacy(T v) {
+ return TheISA::gtoh(v);
+}
+
+/** Convert host endianness to legacy VirtIO endianness. */
+template <typename T> inline T
+htov_legacy(T v) {
+ return TheISA::htog(v);
+}
+
+
+template <> inline vring_used_elem
+vtoh_legacy(vring_used_elem v) {
+ v.id = vtoh_legacy(v.id);
+ v.len = vtoh_legacy(v.len);
+ return v;
+}
+
+template <> inline vring_used_elem
+htov_legacy(vring_used_elem v) {
+ v.id = htov_legacy(v.id);
+ v.len = htov_legacy(v.len);
+ return v;
+}
+
+template <> inline vring_desc
+vtoh_legacy(vring_desc v) {
+ v.addr = vtoh_legacy(v.addr);
+ v.len = vtoh_legacy(v.len);
+ v.flags = vtoh_legacy(v.flags);
+ v.next = vtoh_legacy(v.next);
+ return v;
+}
+
+template <> inline vring_desc
+htov_legacy(vring_desc v) {
+ v.addr = htov_legacy(v.addr);
+ v.len = htov_legacy(v.len);
+ v.flags = htov_legacy(v.flags);
+ v.next = htov_legacy(v.next);
+ return v;
+}
+
+/** @} */
+
+/**
+ * VirtIO descriptor (chain) wrapper
+ *
+ * Communication in VirtIO takes place by sending and receiving chains
+ * of so called descriptors using device queues. The queue is
+ * responsible for sending a descriptor chain from the guest to the
+ * host and later sending it back to the guest. The descriptor chain
+ * itself can be thought of as a linked list of buffers (descriptors)
+ * that are read only (isIncoming() is true) or write only
+ * (isOutgoing() is true). A single chain may contain any mix of input
+ * and output buffers.
+ *
+ * The descriptor wrapper is normally <i>only</i> instantiated by the
+ * virtqueue wrapper (VirtQueue) and should never be instantiated in
+ * device models. The VirtQueue also ensures that the descriptor
+ * wrapper is re-populated with new data from the guest by calling
+ * updateChain() whenever a new descriptor chain is passed to the host
+ * (VirtQueue::consumeDescriptor()). The updateChain() method
+ * automatically does some sanity checks on the descriptor chain to
+ * detect loops.
+ */
+class VirtDescriptor
+{
+ public:
+ /** Descriptor index in virtqueue */
+ typedef uint16_t Index;
+
+ /** @{
+ * @name VirtIO Descriptor <-> Queue Interface
+ */
+ /**
+ * Create a descriptor wrapper.
+ *
+ * @param memProxy Proxy to the guest physical memory.
+ * @param queue Queue owning this descriptor.
+ * @param index Index within the queue.
+ */
+ VirtDescriptor(PortProxy &memProxy, VirtQueue &queue, Index index);
+ // WORKAROUND: The noexcept declaration works around a bug where
+ // gcc 4.7 tries to call the wrong constructor when emplacing
+ // something into a vector.
+ VirtDescriptor(VirtDescriptor &&other) noexcept;
+ ~VirtDescriptor() noexcept;
+
+ VirtDescriptor &operator=(VirtDescriptor &&rhs) noexcept;
+
+ /** Get the descriptor's index into the virtqueue. */
+ Index index() const { return _index; }
+
+ /** Populate this descriptor with data from the guest. */
+ void update();
+
+ /** Populate this descriptor chain with data from the guest. */
+ void updateChain();
+ /** @} */
+
+ /** @{
+ * @name Debug interfaces
+ */
+ /**
+ * Dump the contents of a descriptor
+ */
+ void dump() const;
+ /**
+ * Dump the contents of a descriptor chain starting at this
+ * descriptor.
+ */
+ void dumpChain() const;
+ /** @} */
+
+
+ /** @{
+ * @name Device Model Interfaces
+ */
+ /**
+ * Read the contents of a descriptor.
+ *
+ * This method copies the contents of a descriptor into a buffer
+ * within gem5. Devices should typically use chainRead() instead
+ * as it automatically follows the descriptor chain to read the
+ * desired number of bytes.
+ *
+ * @see chainRead
+ *
+ * @param offset Offset into the descriptor.
+ * @param dst Destination buffer.
+ * @param size Amount of data to read (in bytes).
+ */
+ void read(size_t offset, uint8_t *dst, size_t size) const;
+ /**
+ * Write to the contents of a descriptor.
+ *
+ * This method copies the contents of a descriptor into a buffer
+ * within gem5. Devices should typically use chainWrite() instead
+ * as it automatically follows the descriptor chain to read the
+ * desired number of bytes.
+ *
+ * @see chainWrite
+ *
+ * @param offset Offset into the descriptor.
+ * @param src Source buffer.
+ * @param size Amount of data to read (in bytes).
+ */
+ void write(size_t offset, const uint8_t *src, size_t size);
+ /**
+ * Retrieve the size of this descriptor.
+ *
+ * This method gets the size of a single descriptor. For incoming
+ * data, it corresponds to the amount of data that can be read
+ * from the descriptor. For outgoing data, it corresponds to the
+ * amount of data that can be written to it.
+ *
+ * @see chainSize
+ *
+ * @return Size of descriptor in bytes.
+ */
+ size_t size() const { return desc.len; }
+
+ /**
+ * Is this descriptor chained to another descriptor?
+ *
+ * @return true if there is a next pointer, false otherwise.
+ */
+ bool hasNext() const { return desc.flags & VRING_DESC_F_NEXT; }
+ /**
+ * Get the pointer to the next descriptor in a chain.
+ *
+ * @return Pointer to the next descriptor or NULL if this is the
+ * last element in a chain.
+ */
+ VirtDescriptor *next() const;
+
+ /** Check if this is a read-only descriptor (incoming data). */
+ bool isIncoming() const { return !isOutgoing(); }
+ /** Check if this is a write-only descriptor (outgoing data). */
+ bool isOutgoing() const { return desc.flags & VRING_DESC_F_WRITE; }
+
+
+ /**
+ * Read the contents of a descriptor chain.
+ *
+ * This method reads the specified number of bytes from a
+ * descriptor chain starting at the this descriptor plus an offset
+ * in bytes. The method automatically follows the links in the
+ * descriptor chain.
+ *
+ * @param offset Offset into the chain (in bytes).
+ * @param dst Pointer to destination buffer.
+ * @param size Size (in bytes).
+ */
+ void chainRead(size_t offset, uint8_t *dst, size_t size) const;
+ /**
+ * Write to a descriptor chain.
+ *
+ * This method writes the specified number of bytes to a
+ * descriptor chain starting at the this descriptor plus an offset
+ * in bytes. The method automatically follows the links in the
+ * descriptor chain.
+ *
+ * @param offset Offset into the chain (in bytes).
+ * @param src Pointer to source buffer.
+ * @param size Size (in bytes).
+ */
+ void chainWrite(size_t offset, const uint8_t *src, size_t size);
+ /**
+ * Retrieve the size of this descriptor chain.
+ *
+ * This method gets the size of a descriptor chain starting at
+ * this descriptor.
+ *
+ * @return Size of descriptor chain in bytes.
+ */
+ size_t chainSize() const;
+ /** @} */
+
+ private:
+ // Remove default constructor
+ VirtDescriptor();
+ // Prevent copying
+ VirtDescriptor(const VirtDescriptor &other);
+
+ /** Pointer to memory proxy */
+ PortProxy *memProxy;
+ /** Pointer to virtqueue owning this descriptor */
+ VirtQueue *queue;
+
+ /** Index in virtqueue */
+ Index _index;
+
+ /** Underlying descriptor */
+ vring_desc desc;
+};
+
+/**
+ * Base wrapper around a virtqueue.
+ *
+ * VirtIO device models typically need to extend this class to
+ * implement their own device queues.
+ *
+ * @note Queues must be registered with
+ * VirtIODeviceBase::registerQueue() to be active.
+ */
+class VirtQueue {
+public:
+ virtual ~VirtQueue() {};
+
+ /** @{
+ * @name Checkpointing Interface
+ */
+ virtual void serialize(std::ostream &os);
+ virtual void unserialize(Checkpoint *cp, const std::string §ion);
+ /** @} */
+
+ /** @{
+ * @name Low-level Device Interface
+ */
+ /**
+ * Set the base address of this queue.
+ *
+ * @param address Guest physical base address of the queue.
+ */
+ void setAddress(Addr address);
+ /**
+ * Get the guest physical address of this queue.
+ *
+ * @return Physical address in guest where this queue resides.
+ */
+ Addr getAddress() const { return _address; }
+
+ /**
+ * Get the number of descriptors available in this queue.
+ *
+ * @return Size of queue in descriptors.
+ */
+ uint16_t getSize() const { return _size; }
+
+ /**
+ * Get a pointer to a specific descriptor in the queue.
+ *
+ * @note This interfaces is normally only used by VirtDescriptor
+ * to follow descriptor chains. Device models typically don't need
+ * to use it.
+ *
+ * @return Pointer to a VirtDescriptor.
+ */
+ VirtDescriptor *getDescriptor(VirtDescriptor::Index index) {
+ return &descriptors[index];
+ }
+ /** @} */
+
+ /** @{
+ * @name Device Model Interfaces
+ */
+ /**
+ * Get an incoming descriptor chain from the queue.
+ *
+ * @return Pointer to descriptor on success, NULL if no pending
+ * descriptors are available.
+ */
+ VirtDescriptor *consumeDescriptor();
+ /**
+ * Send a descriptor chain to the guest.
+ *
+ * This method posts a descriptor chain to the guest after a
+ * device model has finished processing it. The device model
+ * typically needs to call VirtIODeviceBase::kick() to deliver
+ * notify tell the guest that the queue has been updated.
+ *
+ * @note The desc parameter must refer to the first descriptor in
+ * a chain that has been retrieved using consumeDescriptor().
+ *
+ * @note The len parameter specified the amount of data produced
+ * by the device model. It seems to be ignored by Linux and it is
+ * not well defined.
+ *
+ * @param desc Start of descriptor chain.
+ * @param len Length of the produced data.
+ */
+ void produceDescriptor(VirtDescriptor *desc, uint32_t len);
+ /** @} */
+
+ /** @{
+ * @name Device Model Callbacks
+ */
+ /**
+ * Notify queue of pending events.
+ *
+ * This method is called by VirtIODeviceBase::onNotify() to notify
+ * the device model of pending data in a virtqueue. The default
+ * implementation of this method iterates over the available
+ * descriptor chains and calls onNotifyDescriptor() for every new
+ * incoming chain.
+ *
+ * Device models should normally overload one of onNotify() and
+ * onNotifyDescriptor().
+ */
+ virtual void onNotify();
+ /**
+ * Notify queue of pending incoming descriptor.
+ *
+ * This method is called by the default implementation of
+ * onNotify() to notify the device model of pending data in a
+ * descriptor chain.
+ *
+ * Device models should normally overload one of onNotify() and
+ * onNotifyDescriptor().
+ */
+ virtual void onNotifyDescriptor(VirtDescriptor *desc) {};
+ /** @} */
+
+ /** @{
+ * @name Debug interfaces
+ */
+ /** Dump the contents of a queue */
+ void dump() const;
+ /** @} */
+
+ /** @{ */
+ /**
+ * Page size used by VirtIO.\ It's hard-coded to 4096 bytes in
+ * the spec for historical reasons.
+ */
+ static const unsigned ALIGN_BITS = 12;
+ static const unsigned ALIGN_SIZE = 1 << ALIGN_BITS;
+ /** @} */
+
+ protected:
+ /**
+ * Instantiate a new virtqueue.
+ *
+ * Instantiate a virtqueue with a fixed size. The size is
+ * specified in descriptors which are defined as 4096 bytes each.
+ *
+ * @param proxy Proxy to the guest physical memory.
+ * @param size Size in descriptors/pages.
+ */
+ VirtQueue(PortProxy &proxy, uint16_t size);
+
+ private:
+ VirtQueue();
+
+ /** Queue size in terms of number of descriptors */
+ const uint16_t _size;
+ /** Base address of the queue */
+ Addr _address;
+ /** Guest physical memory proxy */
+ PortProxy &memProxy;
+
+ private:
+ /**
+ * VirtIO ring buffer wrapper.
+ *
+ * This class wraps a VirtIO ring buffer. The template parameter T
+ * is used to select the data type for the items in the ring (used
+ * or available descriptors).
+ */
+ template<typename T>
+ class VirtRing
+ {
+ public:
+ typedef uint16_t Flags;
+ typedef uint16_t Index;
+
+ struct Header {
+ Flags flags;
+ Index index;
+ } M5_ATTR_PACKED;
+
+ VirtRing<T>(PortProxy &proxy, uint16_t size)
+ : ring(size), _proxy(proxy), _base(0) {}
+
+ /**
+ * Set the base address of the VirtIO ring buffer.
+ *
+ * @param addr New host physical address
+ */
+ void setAddress(Addr addr) { _base = addr; }
+
+ /** Update the ring buffer header with data from the guest. */
+ void readHeader() {
+ assert(_base != 0);
+ _proxy.readBlob(_base, (uint8_t *)&header, sizeof(header));
+ header.flags = vtoh_legacy(header.flags);
+ header.index = vtoh_legacy(header.index);
+ }
+
+ void writeHeader() {
+ Header out;
+ assert(_base != 0);
+ out.flags = htov_legacy(header.flags);
+ out.index = htov_legacy(header.index);
+ _proxy.writeBlob(_base, (uint8_t *)&out, sizeof(out));
+ }
+
+ void read() {
+ readHeader();
+
+ /* Read and byte-swap the elements in the ring */
+ T temp[ring.size()];
+ _proxy.readBlob(_base + sizeof(header),
+ (uint8_t *)temp, sizeof(T) * ring.size());
+ for (int i = 0; i < ring.size(); ++i)
+ ring[i] = vtoh_legacy(temp[i]);
+ }
+
+ void write() {
+ assert(_base != 0);
+ /* Create a byte-swapped copy of the ring and write it to
+ * guest memory. */
+ T temp[ring.size()];
+ for (int i = 0; i < ring.size(); ++i)
+ temp[i] = htov_legacy(ring[i]);
+ _proxy.writeBlob(_base + sizeof(header),
+ (uint8_t *)temp, sizeof(T) * ring.size());
+ writeHeader();
+ }
+
+ /** Ring buffer header in host byte order */
+ Header header;
+ /** Elements in ring in host byte order */
+ std::vector<T> ring;
+
+ private:
+ // Remove default constructor
+ VirtRing<T>();
+
+ /** Guest physical memory proxy */
+ PortProxy &_proxy;
+ /** Guest physical base address of the ring buffer */
+ Addr _base;
+ };
+
+ /** Ring of available (incoming) descriptors */
+ VirtRing<VirtDescriptor::Index> avail;
+ /** Ring of used (outgoing) descriptors */
+ VirtRing<struct vring_used_elem> used;
+
+ /** Offset of last consumed descriptor in the VirtQueue::avail
+ * ring */
+ uint16_t _last_avail;
+
+ /** Vector of pre-created descriptors indexed by their index into
+ * the queue. */
+ std::vector<VirtDescriptor> descriptors;
+};
+
+/**
+ * Base class for all VirtIO-based devices.
+ *
+ * This class implements the functionality of the VirtIO 0.9.5
+ * specification. This version of VirtIO is also known as "legacy" in
+ * the VirtIO 1.0 specification from OASIS.
+ *
+ * @see https://github.com/rustyrussell/virtio-spec
+ * @see http://docs.oasis-open.org/virtio/virtio/v1.0/virtio-v1.0.html
+ */
+class VirtIODeviceBase : public SimObject
+{
+ public:
+ typedef uint16_t QueueID;
+ typedef uint32_t FeatureBits;
+ /** This is a VirtQueue address as exposed through the low-level
+ * interface.\ The address needs to be multiplied by the page size
+ * (seems to be hardcoded to 4096 in the spec) to get the real
+ * physical address.
+ */
+ typedef uint16_t VirtAddress;
+ /** Device Type (sometimes known as subsystem ID) */
+ typedef uint16_t DeviceId;
+
+ BitUnion8(DeviceStatus)
+ Bitfield<7> failed;
+ Bitfield<2> driver_ok;
+ Bitfield<1> driver;
+ Bitfield<0> acknowledge;
+ EndBitUnion(DeviceStatus)
+
+ typedef VirtIODeviceBaseParams Params;
+ VirtIODeviceBase(Params *params, DeviceId id, size_t config_size,
+ FeatureBits features);
+ virtual ~VirtIODeviceBase();
+
+ public:
+ /** @{
+ * @name SimObject Interfaces
+ */
+
+ void serialize(std::ostream &os);
+ void unserialize(Checkpoint *cp, const std::string §ion);
+
+ /** @} */
+
+
+ protected:
+ /** @{
+ * @name Device Model Interfaces
+ */
+
+ /**
+ * Inform the guest of available buffers.
+ *
+ * When a device model has finished processing incoming buffers
+ * (after onNotify has been called), it typically needs to inform
+ * the guest that there are new pending outgoing buffers. The
+ * method used to inform the guest is transport dependent, but is
+ * typically through an interrupt. Device models call this method
+ * to tell the transport interface to notify the guest.
+ */
+ void kick() {
+ assert(transKick);
+ transKick->process();
+ };
+
+ /**
+ * Register a new VirtQueue with the device model.
+ *
+ * Devices typically register at least one VirtQueue to use for
+ * communication with the guest. This <i>must</i> be done from the
+ * constructor since the number of queues are assumed to be
+ * constant throughout the lifetime of the device.
+ *
+ * @warning This method may only be called from the device model
+ * constructor.
+ */
+ void registerQueue(VirtQueue &queue);
+
+
+ /**
+ * Feature set accepted by the guest.
+ *
+ * When the guest starts the driver for the device, it starts by
+ * negotiating features. The device first offers a set of features
+ * (see deviceFeatures), the driver then notifies the device of
+ * which features it accepted. The base class will automatically
+ * accept any feature set that is a subset of the features offered
+ * by the device.
+ */
+ FeatureBits guestFeatures;
+ /** @} */
+
+ public:
+ /** @{
+ * @name Optional VirtIO Interfaces
+ */
+ /**
+ * Read from the configuration space of a device.
+ *
+ * This method is called by the transport interface to read data
+ * from a device model's configuration space. The device model
+ * should use the cfgOffset parameter as the offset into its
+ * configuration space.
+ *
+ * @warning The address in the packet should not be used to
+ * determine the offset into a device's configuration space.
+ *
+ * @param pkt Read request packet.
+ * @param cfgOffset Offset into the device's configuration space.
+ */
+ virtual void readConfig(PacketPtr pkt, Addr cfgOffset);
+ /**
+ * Write to the configuration space of a device.
+ *
+ * This method is called by the transport interface to write data
+ * into a device model's configuration space. The device model
+ * should use the cfgOffset parameter as the offset into its
+ * configuration space.
+ *
+ * @warning The address in the packet should not be used to
+ * determine the offset into a device's configuration space.
+ *
+ * @param pkt Write request packet.
+ * @param cfgOffset Offset into the device's configuration space.
+ */
+ virtual void writeConfig(PacketPtr pkt, Addr cfgOffset);
+
+ /**
+ * Driver-request device reset.
+ *
+ * The device driver may reset a device by writing zero to the
+ * device status register (using setDeviceStatus()), which causes
+ * this method to be called. Device models overriding this method
+ * <i>must</i> ensure that the reset method of the base class is
+ * called when the device is reset.
+ *
+ * @note Always call the reset method of the base class from
+ * device-specific reset methods.
+ */
+ virtual void reset();
+ /** @} */
+
+ protected:
+ /** @{
+ * @name Device Model Helpers
+ */
+
+ /**
+ * Read configuration data from a device structure.
+ *
+ * @param pkt Read request packet.
+ * @param cfgOffset Offset into the device's configuration space.
+ * @param cfg Device configuration
+ */
+ void readConfigBlob(PacketPtr pkt, Addr cfgOffset, const uint8_t *cfg);
+
+ /**
+ * Write configuration data to a device structure.
+ *
+ * @param pkt Write request packet.
+ * @param cfgOffset Offset into the device's configuration space.
+ * @param cfg Device configuration
+ */
+ void writeConfigBlob(PacketPtr pkt, Addr cfgOffset, uint8_t *cfg);
+
+ /** @} */
+
+ public:
+ /** @{
+ * @name VirtIO Transport Interfaces
+ */
+ /**
+ * Register a callback to kick the guest through the transport
+ * interface.
+ *
+ * @param c Callback into transport interface.
+ */
+ void registerKickCallback(Callback *c) {
+ assert(!transKick);
+ transKick = c;
+ }
+
+
+ /**
+ * Driver is requesting service.
+ *
+ * This method is called by the underlying hardware interface
+ * (e.g., PciVirtIO or MmmioVirtIO) to notify a device of pending
+ * incoming descriptors.
+ *
+ * @param index ID of the queue with pending actions.
+ */
+ void onNotify(QueueID index);
+
+
+ /**
+ * Change currently active queue.
+ *
+ * The transport interface works on a queue at a time. The
+ * currently active queue is decided by the value of the queue
+ * select field in a device.
+ *
+ * @param idx ID of the queue to select.
+ */
+ void setQueueSelect(QueueID idx) { _queueSelect = idx; }
+ /**
+ * Get the currently active queue.
+ *
+ * The transport interface works on a queue at a time. The
+ * currently active queue is decided by the value of the queue
+ * select field in a device.
+ *
+ * @return The ID of the currently active queue.
+ */
+ QueueID getQueueSelect() const { return _queueSelect; }
+
+ /**
+ * Change the host physical address of the currently active queue.
+ *
+ * @note The new address is specified in multiples of the page
+ * size (fixed to 4096 bytes in the standard). For example, if the
+ * address 10 is selected, the actual host physical address will
+ * be 40960.
+ *
+ * @see setQueueSelect
+ * @see getQueueSelect
+ *
+ * @param address New address of the currently active queue (in
+ * pages).
+ */
+ void setQueueAddress(uint32_t address);
+ /**
+ * Get the host physical address of the currently active queue.
+ *
+ * @note The new address is specified in multiples of the page
+ * size (fixed to 4096 bytes in the standard). For example, if the
+ * address 10 is selected, the actual host physical address will
+ * be 40960.
+ *
+ * @see setQueueSelect
+ * @see getQueueSelect
+ *
+ * @return Address of the currently active queue (in pages).
+ */
+ uint32_t getQueueAddress() const;
+
+ /**
+ * Get the size (descriptors) of the currently active queue.
+ *
+ * @return Size of the currently active queue in number of
+ * descriptors.
+ */
+ uint16_t getQueueSize() const { return getCurrentQueue().getSize(); }
+
+ /**
+ * Update device status and optionally reset device.
+ *
+ * The special device status of 0 is used to reset the device by
+ * calling reset().
+ *
+ * @param status New device status.
+ */
+ void setDeviceStatus(DeviceStatus status);
+
+ /**
+ * Retrieve the device status.
+ *
+ * @return Device status.
+ */
+ DeviceStatus getDeviceStatus() const { return _deviceStatus; }
+
+ /**
+ * Set feature bits accepted by the guest driver.
+ *
+ * This enables a subset of the features offered by the device
+ * model through the getGuestFeatures() interface.
+ */
+ void setGuestFeatures(FeatureBits features);
+
+ /**
+ * Get features accepted by the guest driver.
+ *
+ * @return Currently active features.
+ */
+ FeatureBits getGuestFeatures() const { return guestFeatures; }
+
+ /** Device ID (sometimes known as subsystem ID) */
+ const DeviceId deviceId;
+
+ /** Size of the device's configuration space */
+ const size_t configSize;
+
+ /** Feature set offered by the device */
+ const FeatureBits deviceFeatures;
+ /** @} */
+
+ private:
+ /** Convenience method to get the currently selected queue */
+ const VirtQueue &getCurrentQueue() const;
+ /** Convenience method to get the currently selected queue */
+ VirtQueue &getCurrentQueue();
+
+ /**
+ * Status of the device
+ *
+ * @see getDeviceStatus
+ * @see setDeviceStatus
+ */
+ DeviceStatus _deviceStatus;
+
+ /** Queue select register (set by guest) */
+ QueueID _queueSelect;
+
+ /** List of virtual queues supported by this device */
+ std::vector<VirtQueue *> _queues;
+
+ /** Callbacks to kick the guest through the transport layer */
+ Callback *transKick;
+};
+
+#endif // __DEV_VIRTIO_BASE_HH__
--- /dev/null
+/*
+ * Copyright (c) 2014 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Andreas Sandberg
+ */
+
+#include "debug/VIOPci.hh"
+#include "dev/virtio/pci.hh"
+#include "mem/packet_access.hh"
+#include "params/PciVirtIO.hh"
+
+PciVirtIO::PciVirtIO(const Params *params)
+ : PciDevice(params), vio(*params->vio),
+ callbackKick(this)
+{
+ // Override the subsystem ID with the device ID from VirtIO
+ config.subsystemID = htole(vio.deviceId);
+ BARSize[0] = BAR0_SIZE_BASE + vio.configSize;
+
+ vio.registerKickCallback(&callbackKick);
+}
+
+PciVirtIO::~PciVirtIO()
+{
+}
+
+Tick
+PciVirtIO::read(PacketPtr pkt)
+{
+ const unsigned M5_VAR_USED size(pkt->getSize());
+ int bar;
+ Addr offset;
+ if (!getBAR(pkt->getAddr(), bar, offset))
+ panic("Invalid PCI memory access to unmapped memory.\n");
+ assert(bar == 0);
+
+ DPRINTF(VIOPci, "Reading offset 0x%x [len: %i]\n", offset, size);
+
+ // Forward device configuration writes to the device VirtIO model
+ if (offset >= OFF_VIO_DEVICE) {
+ vio.readConfig(pkt, offset - OFF_VIO_DEVICE);
+ return 0;
+ }
+
+ pkt->allocate();
+
+ switch(offset) {
+ case OFF_DEVICE_FEATURES:
+ DPRINTF(VIOPci, " DEVICE_FEATURES request\n");
+ assert(size == sizeof(uint32_t));
+ pkt->set<uint32_t>(vio.deviceFeatures);
+ break;
+
+ case OFF_GUEST_FEATURES:
+ DPRINTF(VIOPci, " GUEST_FEATURES request\n");
+ assert(size == sizeof(uint32_t));
+ pkt->set<uint32_t>(vio.getGuestFeatures());
+ break;
+
+ case OFF_QUEUE_ADDRESS:
+ DPRINTF(VIOPci, " QUEUE_ADDRESS request\n");
+ assert(size == sizeof(uint32_t));
+ pkt->set<uint32_t>(vio.getQueueAddress());
+ break;
+
+ case OFF_QUEUE_SIZE:
+ DPRINTF(VIOPci, " QUEUE_SIZE request\n");
+ assert(size == sizeof(uint16_t));
+ pkt->set<uint16_t>(vio.getQueueSize());
+ break;
+
+ case OFF_QUEUE_SELECT:
+ DPRINTF(VIOPci, " QUEUE_SELECT\n");
+ assert(size == sizeof(uint16_t));
+ pkt->set<uint16_t>(vio.getQueueSelect());
+ break;
+
+ case OFF_QUEUE_NOTIFY:
+ DPRINTF(VIOPci, " QUEUE_NOTIFY request\n");
+ assert(size == sizeof(uint16_t));
+ pkt->set<uint16_t>(queueNotify);
+ break;
+
+ case OFF_DEVICE_STATUS:
+ DPRINTF(VIOPci, " DEVICE_STATUS request\n");
+ assert(size == sizeof(uint8_t));
+ pkt->set<uint8_t>(vio.getDeviceStatus());
+ break;
+
+ case OFF_ISR_STATUS: {
+ DPRINTF(VIOPci, " ISR_STATUS\n");
+ assert(size == sizeof(uint8_t));
+ uint8_t isr_status(interruptDeliveryPending ? 1 : 0);
+ interruptDeliveryPending = false;
+ pkt->set<uint8_t>(isr_status);
+ } break;
+
+ default:
+ panic("Unhandled read offset (0x%x)\n", offset);
+ }
+
+ return 0;
+}
+
+Tick
+PciVirtIO::write(PacketPtr pkt)
+{
+ const unsigned M5_VAR_USED size(pkt->getSize());
+ int bar;
+ Addr offset;
+ if (!getBAR(pkt->getAddr(), bar, offset))
+ panic("Invalid PCI memory access to unmapped memory.\n");
+ assert(bar == 0);
+
+ DPRINTF(VIOPci, "Writing offset 0x%x [len: %i]\n", offset, size);
+
+ // Forward device configuration writes to the device VirtIO model
+ if (offset >= OFF_VIO_DEVICE) {
+ vio.writeConfig(pkt, offset - OFF_VIO_DEVICE);
+ return 0;
+ }
+
+ pkt->allocate();
+
+ switch(offset) {
+ case OFF_DEVICE_FEATURES:
+ warn("Guest tried to write device features.");
+ break;
+
+ case OFF_GUEST_FEATURES:
+ DPRINTF(VIOPci, " WRITE GUEST_FEATURES request\n");
+ assert(size == sizeof(uint32_t));
+ vio.setGuestFeatures(pkt->get<uint32_t>());
+ break;
+
+ case OFF_QUEUE_ADDRESS:
+ DPRINTF(VIOPci, " WRITE QUEUE_ADDRESS\n");
+ assert(size == sizeof(uint32_t));
+ vio.setQueueAddress(pkt->get<uint32_t>());
+ break;
+
+ case OFF_QUEUE_SIZE:
+ panic("Guest tried to write queue size.");
+ break;
+
+ case OFF_QUEUE_SELECT:
+ DPRINTF(VIOPci, " WRITE QUEUE_SELECT\n");
+ assert(size == sizeof(uint16_t));
+ vio.setQueueSelect(pkt->get<uint16_t>());
+ break;
+
+ case OFF_QUEUE_NOTIFY:
+ DPRINTF(VIOPci, " WRITE QUEUE_NOTIFY\n");
+ assert(size == sizeof(uint16_t));
+ queueNotify = pkt->get<uint16_t>();
+ vio.onNotify(queueNotify);
+ break;
+
+ case OFF_DEVICE_STATUS: {
+ assert(size == sizeof(uint8_t));
+ uint8_t status(pkt->get<uint8_t>());
+ DPRINTF(VIOPci, "VirtIO set status: 0x%x\n", status);
+ vio.setDeviceStatus(status);
+ } break;
+
+ case OFF_ISR_STATUS:
+ warn("Guest tried to write ISR status.");
+ break;
+
+ default:
+ panic("Unhandled read offset (0x%x)\n", offset);
+ }
+
+ return 0;
+}
+
+void
+PciVirtIO::kick()
+{
+ DPRINTF(VIOPci, "kick(): Sending interrupt...\n");
+ interruptDeliveryPending = true;
+ intrPost();
+}
+
+PciVirtIO *
+PciVirtIOParams::create()
+{
+ return new PciVirtIO(this);
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Andreas Sandberg
+ */
+
+#ifndef __DEV_VIRTIO_PCI_HH__
+#define __DEV_VIRTIO_PCI_HH__
+
+#include "base/statistics.hh"
+#include "dev/virtio/base.hh"
+#include "dev/pcidev.hh"
+
+struct PciVirtIOParams;
+
+class PciVirtIO : public PciDevice
+{
+ public:
+ typedef PciVirtIOParams Params;
+ PciVirtIO(const Params *params);
+ virtual ~PciVirtIO();
+
+ Tick read(PacketPtr pkt);
+ Tick write(PacketPtr pkt);
+
+ void kick();
+
+ protected:
+ /** @{ */
+ /** Offsets into VirtIO header (BAR0 relative). */
+
+ static const Addr OFF_DEVICE_FEATURES = 0x00;
+ static const Addr OFF_GUEST_FEATURES = 0x04;
+ static const Addr OFF_QUEUE_ADDRESS = 0x08;
+ static const Addr OFF_QUEUE_SIZE = 0x0C;
+ static const Addr OFF_QUEUE_SELECT = 0x0E;
+ static const Addr OFF_QUEUE_NOTIFY = 0x10;
+ static const Addr OFF_DEVICE_STATUS = 0x12;
+ static const Addr OFF_ISR_STATUS = 0x13;
+ static const Addr OFF_VIO_DEVICE = 0x14;
+
+ /** @} */
+
+ static const Addr BAR0_SIZE_BASE = OFF_VIO_DEVICE;
+
+
+ VirtIODeviceBase::QueueID queueNotify;
+
+ bool interruptDeliveryPending;
+
+ VirtIODeviceBase &vio;
+
+ MakeCallback<PciVirtIO, &PciVirtIO::kick> callbackKick;
+};
+
+#endif // __DEV_VIRTIO_PCI_HH__
--- /dev/null
+#ifndef _VIRTIO_RING_H
+#define _VIRTIO_RING_H
+/* An interface for efficient virtio implementation, currently for use by KVM
+ * and lguest, but hopefully others soon. Do NOT change this since it will
+ * break existing servers and clients.
+ *
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright Rusty Russell IBM Corporation 2007. */
+#include <stdint.h>
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me when
+ * you add a buffer. It's unreliable, so it's simply an optimization. Guest
+ * will still kick if it's out of buffers. */
+#define VRING_USED_F_NO_NOTIFY 1
+/* The Guest uses this in avail->flags to advise the Host: don't interrupt me
+ * when you consume a buffer. It's unreliable, so it's simply an
+ * optimization. */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
+/* The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field. */
+/* The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field. */
+#define VIRTIO_RING_F_EVENT_IDX 29
+
+/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
+struct vring_desc {
+ /* Address (guest-physical). */
+ uint64_t addr;
+ /* Length. */
+ uint32_t len;
+ /* The flags as indicated above. */
+ uint16_t flags;
+ /* We chain unused descriptors via this, too */
+ uint16_t next;
+};
+
+struct vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[];
+};
+
+/* u32 is used here for ids for padding reasons. */
+struct vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was used (written to) */
+ uint32_t len;
+};
+
+struct vring_used {
+ uint16_t flags;
+ uint16_t idx;
+ struct vring_used_elem ring[];
+};
+
+struct vring {
+ unsigned int num;
+
+ struct vring_desc *desc;
+
+ struct vring_avail *avail;
+
+ struct vring_used *used;
+};
+
+/* The standard layout for the ring is a continuous chunk of memory which looks
+ * like this. We assume num is a power of 2.
+ *
+ * struct vring
+ * {
+ * // The actual descriptors (16 bytes each)
+ * struct vring_desc desc[num];
+ *
+ * // A ring of available descriptor heads with free-running index.
+ * uint16_t avail_flags;
+ * uint16_t avail_idx;
+ * uint16_t available[num];
+ * uint16_t used_event_idx;
+ *
+ * // Padding to the next align boundary.
+ * char pad[];
+ *
+ * // A ring of used descriptor heads with free-running index.
+ * uint16_t used_flags;
+ * uint16_t used_idx;
+ * struct vring_used_elem used[num];
+ * uint16_t avail_event_idx;
+ * };
+ */
+/* We publish the used event index at the end of the available ring, and vice
+ * versa. They are at the end for backwards compatibility. */
+#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
+#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
+
+static inline void vring_init(struct vring *vr, unsigned int num, void *p,
+ unsigned long align)
+{
+ vr->num = num;
+ vr->desc = (struct vring_desc *)p;
+ vr->avail = (struct vring_avail *)((uint8_t*)p + num*sizeof(struct vring_desc));
+ vr->used = (struct vring_used *)(((unsigned long)&vr->avail->ring[num] + sizeof(uint16_t)
+ + align-1) & ~(align - 1));
+}
+
+static inline unsigned vring_size(unsigned int num, unsigned long align)
+{
+ return ((sizeof(struct vring_desc) * num + sizeof(uint16_t) * (3 + num)
+ + align - 1) & ~(align - 1))
+ + sizeof(uint16_t) * 3 + sizeof(struct vring_used_elem) * num;
+}
+
+/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
+/* Assuming a given event_idx value from the other size, if
+ * we have just incremented index from old to new_idx,
+ * should we trigger an event? */
+static inline int vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+ /* Note: Xen has similar logic for notification hold-off
+ * in include/xen/interface/io/ring.h with req_event and req_prod
+ * corresponding to event_idx + 1 and new_idx respectively.
+ * Note also that req_event and req_prod in Xen start at 1,
+ * event indexes in virtio start at 0. */
+ return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+
+#endif /* _VIRTIO_RING_H */