2 * Copyright (c) 2014 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 * Authors: Andreas Sandberg
40 #include "debug/VIO.hh"
41 #include "dev/virtio/base.hh"
42 #include "params/VirtIODeviceBase.hh"
44 VirtDescriptor::VirtDescriptor(PortProxy
&_memProxy
, VirtQueue
&_queue
,
46 : memProxy(&_memProxy
), queue(&_queue
), _index(descIndex
),
51 VirtDescriptor::VirtDescriptor(VirtDescriptor
&&other
) noexcept
53 *this = std::forward
<VirtDescriptor
>(other
);
56 VirtDescriptor::~VirtDescriptor() noexcept
61 VirtDescriptor::operator=(VirtDescriptor
&&rhs
) noexcept
63 memProxy
= std::move(rhs
.memProxy
);
64 queue
= std::move(rhs
.queue
);
65 _index
= std::move(rhs
._index
);
66 desc
= std::move(rhs
.desc
);
72 VirtDescriptor::update()
74 const Addr
vq_addr(queue
->getAddress());
75 // Check if the queue has been initialized yet
79 assert(_index
< queue
->getSize());
80 const Addr
desc_addr(vq_addr
+ sizeof(desc
) * _index
);
81 vring_desc guest_desc
;
82 memProxy
->readBlob(desc_addr
, (uint8_t *)&guest_desc
, sizeof(guest_desc
));
83 desc
= vtoh_legacy(guest_desc
);
85 "VirtDescriptor(%i): Addr: 0x%x, Len: %i, Flags: 0x%x, "
87 _index
, desc
.addr
, desc
.len
, desc
.flags
, desc
.next
);
91 VirtDescriptor::updateChain()
93 VirtDescriptor
*desc(this);
96 } while ((desc
= desc
->next()) != NULL
&& desc
!= this);
99 panic("Loop in descriptor chain!\n");
103 VirtDescriptor::dump() const
108 DPRINTF(VIO
, "Descriptor[%i]: "
109 "Addr: 0x%x, Len: %i, Flags: 0x%x, Next: 0x%x\n",
110 _index
, desc
.addr
, desc
.len
, desc
.flags
, desc
.next
);
113 uint8_t data
[desc
.len
];
114 read(0, data
, desc
.len
);
115 DDUMP(VIO
, data
, desc
.len
);
120 VirtDescriptor::dumpChain() const
125 const VirtDescriptor
*desc(this);
128 } while ((desc
= desc
->next()) != NULL
);
132 VirtDescriptor::next() const
135 return queue
->getDescriptor(desc
.next
);
142 VirtDescriptor::read(size_t offset
, uint8_t *dst
, size_t size
) const
144 DPRINTF(VIO
, "VirtDescriptor(%p, 0x%x, %i)::read: offset: %i, dst: 0x%x, size: %i\n",
145 this, desc
.addr
, desc
.len
, offset
, (long)dst
, size
);
146 assert(size
<= desc
.len
- offset
);
148 panic("Trying to read from outgoing buffer\n");
150 memProxy
->readBlob(desc
.addr
+ offset
, dst
, size
);
154 VirtDescriptor::write(size_t offset
, const uint8_t *src
, size_t size
)
156 DPRINTF(VIO
, "VirtDescriptor(%p, 0x%x, %i)::write: offset: %i, src: 0x%x, size: %i\n",
157 this, desc
.addr
, desc
.len
, offset
, (long)src
, size
);
158 assert(size
<= desc
.len
- offset
);
160 panic("Trying to write to incoming buffer\n");
162 memProxy
->writeBlob(desc
.addr
+ offset
, const_cast<uint8_t *>(src
), size
);
166 VirtDescriptor::chainRead(size_t offset
, uint8_t *dst
, size_t size
) const
168 const VirtDescriptor
*desc(this);
169 const size_t full_size(size
);
171 if (offset
< desc
->size()) {
172 const size_t chunk_size(std::min(desc
->size() - offset
, size
));
173 desc
->read(offset
, dst
, chunk_size
);
178 offset
-= desc
->size();
180 } while ((desc
= desc
->next()) != NULL
&& desc
->isIncoming() && size
> 0);
183 panic("Failed to read %i bytes from chain of %i bytes @ offset %i\n",
184 full_size
, chainSize(), offset
);
189 VirtDescriptor::chainWrite(size_t offset
, const uint8_t *src
, size_t size
)
191 VirtDescriptor
*desc(this);
192 const size_t full_size(size
);
194 if (offset
< desc
->size()) {
195 const size_t chunk_size(std::min(desc
->size() - offset
, size
));
196 desc
->write(offset
, src
, chunk_size
);
201 offset
-= desc
->size();
203 } while ((desc
= desc
->next()) != NULL
&& size
> 0);
206 panic("Failed to write %i bytes into chain of %i bytes @ offset %i\n",
207 full_size
, chainSize(), offset
);
212 VirtDescriptor::chainSize() const
215 const VirtDescriptor
*desc(this);
217 size
+= desc
->size();
218 } while ((desc
= desc
->next()) != NULL
);
225 VirtQueue::VirtQueue(PortProxy
&proxy
, uint16_t size
)
226 : _size(size
), _address(0), memProxy(proxy
),
227 avail(proxy
, size
), used(proxy
, size
),
230 descriptors
.reserve(_size
);
231 for (int i
= 0; i
< _size
; ++i
)
232 descriptors
.emplace_back(proxy
, *this, i
);
236 VirtQueue::serialize(CheckpointOut
&cp
) const
238 SERIALIZE_SCALAR(_address
);
239 SERIALIZE_SCALAR(_last_avail
);
243 VirtQueue::unserialize(CheckpointIn
&cp
)
247 paramIn(cp
, "_address", addr_in
);
248 UNSERIALIZE_SCALAR(_last_avail
);
250 // Use the address setter to ensure that the ring buffer addresses
251 // are updated as well.
256 VirtQueue::setAddress(Addr address
)
258 const Addr
addr_avail(address
+ _size
* sizeof(struct vring_desc
));
259 const Addr
addr_avail_end(addr_avail
+ sizeof(struct vring_avail
) +
260 _size
* sizeof(uint16_t));
261 const Addr
addr_used((addr_avail_end
+ sizeof(uint16_t) +
262 (ALIGN_SIZE
- 1)) & ~(ALIGN_SIZE
- 1));
264 avail
.setAddress(addr_avail
);
265 used
.setAddress(addr_used
);
269 VirtQueue::consumeDescriptor()
272 DPRINTF(VIO
, "consumeDescriptor: _last_avail: %i, avail.idx: %i (->%i)\n",
273 _last_avail
, avail
.header
.index
,
274 avail
.ring
[_last_avail
% used
.ring
.size()]);
275 if (_last_avail
== avail
.header
.index
)
278 VirtDescriptor::Index
index(avail
.ring
[_last_avail
% used
.ring
.size()]);
281 VirtDescriptor
*d(&descriptors
[index
]);
288 VirtQueue::produceDescriptor(VirtDescriptor
*desc
, uint32_t len
)
291 DPRINTF(VIO
, "produceDescriptor: dscIdx: %i, len: %i, used.idx: %i\n",
292 desc
->index(), len
, used
.header
.index
);
294 struct vring_used_elem
&e(used
.ring
[used
.header
.index
% used
.ring
.size()]);
295 e
.id
= desc
->index();
297 used
.header
.index
+= 1;
302 VirtQueue::dump() const
307 for (const VirtDescriptor
&d
: descriptors
)
312 VirtQueue::onNotify()
314 DPRINTF(VIO
, "onNotify\n");
316 // Consume all pending descriptors from the input queue.
318 while ((d
= consumeDescriptor()) != NULL
)
319 onNotifyDescriptor(d
);
323 VirtIODeviceBase::VirtIODeviceBase(Params
*params
, DeviceId id
,
324 size_t config_size
, FeatureBits features
)
327 deviceId(id
), configSize(config_size
), deviceFeatures(features
),
328 _deviceStatus(0), _queueSelect(0),
334 VirtIODeviceBase::~VirtIODeviceBase()
339 VirtIODeviceBase::serialize(CheckpointOut
&cp
) const
341 SERIALIZE_SCALAR(guestFeatures
);
342 SERIALIZE_SCALAR(_deviceStatus
);
343 SERIALIZE_SCALAR(_queueSelect
);
344 for (QueueID i
= 0; i
< _queues
.size(); ++i
)
345 _queues
[i
]->serializeSection(cp
, csprintf("_queues.%i", i
));
349 VirtIODeviceBase::unserialize(CheckpointIn
&cp
)
351 UNSERIALIZE_SCALAR(guestFeatures
);
352 UNSERIALIZE_SCALAR(_deviceStatus
);
353 UNSERIALIZE_SCALAR(_queueSelect
);
354 for (QueueID i
= 0; i
< _queues
.size(); ++i
)
355 _queues
[i
]->unserializeSection(cp
, csprintf("_queues.%i", i
));
359 VirtIODeviceBase::reset()
365 for (QueueID i
= 0; i
< _queues
.size(); ++i
)
366 _queues
[i
]->setAddress(0);
370 VirtIODeviceBase::onNotify(QueueID idx
)
372 DPRINTF(VIO
, "onNotify: idx: %i\n", idx
);
373 if (idx
>= _queues
.size()) {
374 panic("Guest tried to notify queue (%i), but only %i "
375 "queues registered.\n",
376 idx
, _queues
.size());
378 _queues
[idx
]->onNotify();
382 VirtIODeviceBase::setGuestFeatures(FeatureBits features
)
384 DPRINTF(VIO
, "Setting guest features: 0x%x\n", features
);
385 if (~deviceFeatures
& features
) {
386 panic("Guest tried to enable unsupported features:\n"
387 "Device features: 0x%x\n"
388 "Requested features: 0x%x\n",
389 deviceFeatures
, features
);
391 guestFeatures
= features
;
396 VirtIODeviceBase::setDeviceStatus(DeviceStatus status
)
398 _deviceStatus
= status
;
399 DPRINTF(VIO
, "ACK: %i, DRIVER: %i, DRIVER_OK: %i, FAILED: %i\n",
400 status
.acknowledge
, status
.driver
, status
.driver_ok
, status
.failed
);
406 VirtIODeviceBase::readConfig(PacketPtr pkt
, Addr cfgOffset
)
408 panic("Unhandled device config read (offset: 0x%x).\n", cfgOffset
);
412 VirtIODeviceBase::writeConfig(PacketPtr pkt
, Addr cfgOffset
)
414 panic("Unhandled device config write (offset: 0x%x).\n", cfgOffset
);
418 VirtIODeviceBase::readConfigBlob(PacketPtr pkt
, Addr cfgOffset
, const uint8_t *cfg
)
420 const unsigned size(pkt
->getSize());
422 if (cfgOffset
+ size
> configSize
)
423 panic("Config read out of bounds.\n");
426 pkt
->setData(const_cast<uint8_t *>(cfg
) + cfgOffset
);
430 VirtIODeviceBase::writeConfigBlob(PacketPtr pkt
, Addr cfgOffset
, uint8_t *cfg
)
432 const unsigned size(pkt
->getSize());
434 if (cfgOffset
+ size
> configSize
)
435 panic("Config write out of bounds.\n");
438 pkt
->writeData((uint8_t *)cfg
+ cfgOffset
);
443 VirtIODeviceBase::getCurrentQueue() const
445 if (_queueSelect
>= _queues
.size())
446 panic("Guest tried to access non-existing VirtQueue (%i).\n", _queueSelect
);
448 return *_queues
[_queueSelect
];
452 VirtIODeviceBase::getCurrentQueue()
454 if (_queueSelect
>= _queues
.size())
455 panic("Guest tried to access non-existing VirtQueue (%i).\n", _queueSelect
);
457 return *_queues
[_queueSelect
];
461 VirtIODeviceBase::setQueueAddress(uint32_t address
)
463 getCurrentQueue().setAddress(address
* VirtQueue::ALIGN_SIZE
);
467 VirtIODeviceBase::getQueueAddress() const
469 Addr
address(getCurrentQueue().getAddress());
470 assert(!(address
& ((1 >> VirtQueue::ALIGN_BITS
) - 1)));
471 return address
>> VirtQueue::ALIGN_BITS
;
475 VirtIODeviceBase::registerQueue(VirtQueue
&queue
)
477 _queues
.push_back(&queue
);