You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
158 lines
5.8 KiB
158 lines
5.8 KiB
From 55aad90e347599e88747888ddbefcba33427f386 Mon Sep 17 00:00:00 2001
|
|
From: Jason Wang <jasowang@redhat.com>
|
|
Date: Fri, 16 Dec 2022 11:35:52 +0800
|
|
Subject: [PATCH 12/31] vhost: fix vq dirty bitmap syncing when vIOMMU is
|
|
enabled
|
|
|
|
RH-Author: Eric Auger <eric.auger@redhat.com>
|
|
RH-MergeRequest: 134: vhost: fix vq dirty bitmap syncing when vIOMMU is enabled
|
|
RH-Bugzilla: 2124856
|
|
RH-Acked-by: Peter Xu <peterx@redhat.com>
|
|
RH-Acked-by: Jason Wang <jasowang@redhat.com>
|
|
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
|
|
RH-Commit: [1/1] 57ef499b63dc2cca6e64ee84d1dc127635868ca2 (eauger1/centos-qemu-kvm)
|
|
|
|
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2124856
|
|
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=49989924
|
|
Upstream: yes
|
|
|
|
When vIOMMU is enabled, the vq->used_phys is actually the IOVA not
|
|
GPA. So we need to translate it to GPA before the syncing otherwise we
|
|
may hit the following crash since IOVA could be out of the scope of
|
|
the GPA log size. This could be noted when using virtio-IOMMU with
|
|
vhost using 1G memory.
|
|
|
|
Fixes: c471ad0e9bd46 ("vhost_net: device IOTLB support")
|
|
Cc: qemu-stable@nongnu.org
|
|
Tested-by: Lei Yang <leiyang@redhat.com>
|
|
Reported-by: Yalan Zhang <yalzhang@redhat.com>
|
|
Signed-off-by: Jason Wang <jasowang@redhat.com>
|
|
Message-Id: <20221216033552.77087-1-jasowang@redhat.com>
|
|
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
|
|
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
|
|
(cherry picked from commit 345cc1cbcbce2bab00abc2b88338d7d89c702d6b)
|
|
Signed-off-by: Eric Auger <eric.auger@redhat.com>
|
|
---
|
|
hw/virtio/vhost.c | 84 ++++++++++++++++++++++++++++++++++++-----------
|
|
1 file changed, 64 insertions(+), 20 deletions(-)
|
|
|
|
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
|
|
index 84dbb39e07..2c566dc539 100644
|
|
--- a/hw/virtio/vhost.c
|
|
+++ b/hw/virtio/vhost.c
|
|
@@ -20,6 +20,7 @@
|
|
#include "qemu/range.h"
|
|
#include "qemu/error-report.h"
|
|
#include "qemu/memfd.h"
|
|
+#include "qemu/log.h"
|
|
#include "standard-headers/linux/vhost_types.h"
|
|
#include "hw/virtio/virtio-bus.h"
|
|
#include "hw/virtio/virtio-access.h"
|
|
@@ -106,6 +107,24 @@ static void vhost_dev_sync_region(struct vhost_dev *dev,
|
|
}
|
|
}
|
|
|
|
+static bool vhost_dev_has_iommu(struct vhost_dev *dev)
|
|
+{
|
|
+ VirtIODevice *vdev = dev->vdev;
|
|
+
|
|
+ /*
|
|
+ * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
|
|
+ * incremental memory mapping API via IOTLB API. For platform that
|
|
+ * does not have IOMMU, there's no need to enable this feature
|
|
+ * which may cause unnecessary IOTLB miss/update transactions.
|
|
+ */
|
|
+ if (vdev) {
|
|
+ return virtio_bus_device_iommu_enabled(vdev) &&
|
|
+ virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
|
|
+ } else {
|
|
+ return false;
|
|
+ }
|
|
+}
|
|
+
|
|
static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
|
|
MemoryRegionSection *section,
|
|
hwaddr first,
|
|
@@ -137,8 +156,51 @@ static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
|
|
continue;
|
|
}
|
|
|
|
- vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
|
|
- range_get_last(vq->used_phys, vq->used_size));
|
|
+ if (vhost_dev_has_iommu(dev)) {
|
|
+ IOMMUTLBEntry iotlb;
|
|
+ hwaddr used_phys = vq->used_phys, used_size = vq->used_size;
|
|
+ hwaddr phys, s, offset;
|
|
+
|
|
+ while (used_size) {
|
|
+ rcu_read_lock();
|
|
+ iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
|
|
+ used_phys,
|
|
+ true,
|
|
+ MEMTXATTRS_UNSPECIFIED);
|
|
+ rcu_read_unlock();
|
|
+
|
|
+ if (!iotlb.target_as) {
|
|
+ qemu_log_mask(LOG_GUEST_ERROR, "translation "
|
|
+ "failure for used_iova %"PRIx64"\n",
|
|
+ used_phys);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ offset = used_phys & iotlb.addr_mask;
|
|
+ phys = iotlb.translated_addr + offset;
|
|
+
|
|
+ /*
|
|
+ * Distance from start of used ring until last byte of
|
|
+ * IOMMU page.
|
|
+ */
|
|
+ s = iotlb.addr_mask - offset;
|
|
+ /*
|
|
+ * Size of used ring, or of the part of it until end
|
|
+ * of IOMMU page. To avoid zero result, do the adding
|
|
+ * outside of MIN().
|
|
+ */
|
|
+ s = MIN(s, used_size - 1) + 1;
|
|
+
|
|
+ vhost_dev_sync_region(dev, section, start_addr, end_addr, phys,
|
|
+ range_get_last(phys, s));
|
|
+ used_size -= s;
|
|
+ used_phys += s;
|
|
+ }
|
|
+ } else {
|
|
+ vhost_dev_sync_region(dev, section, start_addr,
|
|
+ end_addr, vq->used_phys,
|
|
+ range_get_last(vq->used_phys, vq->used_size));
|
|
+ }
|
|
}
|
|
return 0;
|
|
}
|
|
@@ -306,24 +368,6 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
|
|
dev->log_size = size;
|
|
}
|
|
|
|
-static bool vhost_dev_has_iommu(struct vhost_dev *dev)
|
|
-{
|
|
- VirtIODevice *vdev = dev->vdev;
|
|
-
|
|
- /*
|
|
- * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
|
|
- * incremental memory mapping API via IOTLB API. For platform that
|
|
- * does not have IOMMU, there's no need to enable this feature
|
|
- * which may cause unnecessary IOTLB miss/update transactions.
|
|
- */
|
|
- if (vdev) {
|
|
- return virtio_bus_device_iommu_enabled(vdev) &&
|
|
- virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
|
|
- } else {
|
|
- return false;
|
|
- }
|
|
-}
|
|
-
|
|
static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
|
|
hwaddr *plen, bool is_write)
|
|
{
|
|
--
|
|
2.31.1
|
|
|