forked from rpms/qemu-kvm
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
269 lines
9.6 KiB
269 lines
9.6 KiB
From 293e249644c14b2bd19dd6a3f08a0e18ec040200 Mon Sep 17 00:00:00 2001
|
|
From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= <eperezma@redhat.com>
|
|
Date: Fri, 3 Mar 2023 18:24:32 +0100
|
|
Subject: [PATCH 1/4] vdpa net: move iova tree creation from init to start
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
RH-Author: Laurent Vivier <lvivier@redhat.com>
|
|
RH-MergeRequest: 298: Fix qemu core dump with "x-svq=on" when hot-plugging a NIC
|
|
RH-Jira: RHEL-1060
|
|
RH-Acked-by: Eugenio Pérez <eperezma@redhat.com>
|
|
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
RH-Commit: [1/3] f85a05eb528820adf4a3c0cad2950b4ab500d5fe
|
|
|
|
JIRA: https://issues.redhat.com/browse/RHEL-1060
|
|
|
|
Only create iova_tree if and when it is needed.
|
|
|
|
The cleanup keeps being responsible for the last VQ but this change
|
|
allows it to merge both cleanup functions.
|
|
|
|
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
|
|
Acked-by: Jason Wang <jasowang@redhat.com>
|
|
Message-Id: <20230303172445.1089785-2-eperezma@redhat.com>
|
|
Tested-by: Lei Yang <leiyang@redhat.com>
|
|
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
|
|
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
|
|
(cherry picked from commit 00ef422e9fbfef1fb40447b08826db0951d788dd)
|
|
|
|
Conflicts
|
|
|
|
because of missing commit
|
|
|
|
bf7a2ad8b6df ("vdpa: harden the error path if get_iova_range failed")
|
|
|
|
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
|
|
---
|
|
net/vhost-vdpa.c | 113 ++++++++++++++++++++++++++++++++++-------------
|
|
1 file changed, 83 insertions(+), 30 deletions(-)
|
|
|
|
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
|
|
index 1b4fec59a2..a914348e2a 100644
|
|
--- a/net/vhost-vdpa.c
|
|
+++ b/net/vhost-vdpa.c
|
|
@@ -178,7 +178,6 @@ err_init:
|
|
static void vhost_vdpa_cleanup(NetClientState *nc)
|
|
{
|
|
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
|
|
- struct vhost_dev *dev = &s->vhost_net->dev;
|
|
|
|
/*
|
|
* If a peer NIC is attached, do not cleanup anything.
|
|
@@ -190,9 +189,6 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
|
|
}
|
|
qemu_vfree(s->cvq_cmd_out_buffer);
|
|
qemu_vfree(s->status);
|
|
- if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
|
|
- g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
|
|
- }
|
|
if (s->vhost_net) {
|
|
vhost_net_cleanup(s->vhost_net);
|
|
g_free(s->vhost_net);
|
|
@@ -242,10 +238,64 @@ static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
|
|
return size;
|
|
}
|
|
|
|
+/** From any vdpa net client, get the netclient of the first queue pair */
|
|
+static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
|
|
+{
|
|
+ NICState *nic = qemu_get_nic(s->nc.peer);
|
|
+ NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
|
|
+
|
|
+ return DO_UPCAST(VhostVDPAState, nc, nc0);
|
|
+}
|
|
+
|
|
+static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
|
|
+{
|
|
+ struct vhost_vdpa *v = &s->vhost_vdpa;
|
|
+
|
|
+ if (v->shadow_vqs_enabled) {
|
|
+ v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
|
|
+ v->iova_range.last);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int vhost_vdpa_net_data_start(NetClientState *nc)
|
|
+{
|
|
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
|
|
+ struct vhost_vdpa *v = &s->vhost_vdpa;
|
|
+
|
|
+ assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
|
|
+
|
|
+ if (v->index == 0) {
|
|
+ vhost_vdpa_net_data_start_first(s);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (v->shadow_vqs_enabled) {
|
|
+ VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
|
|
+ v->iova_tree = s0->vhost_vdpa.iova_tree;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void vhost_vdpa_net_client_stop(NetClientState *nc)
|
|
+{
|
|
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
|
|
+ struct vhost_dev *dev;
|
|
+
|
|
+ assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
|
|
+
|
|
+ dev = s->vhost_vdpa.dev;
|
|
+ if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
|
|
+ g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
|
|
+ }
|
|
+}
|
|
+
|
|
static NetClientInfo net_vhost_vdpa_info = {
|
|
.type = NET_CLIENT_DRIVER_VHOST_VDPA,
|
|
.size = sizeof(VhostVDPAState),
|
|
.receive = vhost_vdpa_receive,
|
|
+ .start = vhost_vdpa_net_data_start,
|
|
+ .stop = vhost_vdpa_net_client_stop,
|
|
.cleanup = vhost_vdpa_cleanup,
|
|
.has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
|
|
.has_ufo = vhost_vdpa_has_ufo,
|
|
@@ -359,7 +409,7 @@ dma_map_err:
|
|
|
|
static int vhost_vdpa_net_cvq_start(NetClientState *nc)
|
|
{
|
|
- VhostVDPAState *s;
|
|
+ VhostVDPAState *s, *s0;
|
|
struct vhost_vdpa *v;
|
|
uint64_t backend_features;
|
|
int64_t cvq_group;
|
|
@@ -423,8 +473,6 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
|
|
return r;
|
|
}
|
|
|
|
- v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
|
|
- v->iova_range.last);
|
|
v->shadow_vqs_enabled = true;
|
|
s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
|
|
|
|
@@ -433,6 +481,27 @@ out:
|
|
return 0;
|
|
}
|
|
|
|
+ s0 = vhost_vdpa_net_first_nc_vdpa(s);
|
|
+ if (s0->vhost_vdpa.iova_tree) {
|
|
+ /*
|
|
+ * SVQ is already configured for all virtqueues. Reuse IOVA tree for
|
|
+ * simplicity, whether CVQ shares ASID with guest or not, because:
|
|
+ * - Memory listener need access to guest's memory addresses allocated
|
|
+ * in the IOVA tree.
|
|
+ * - There should be plenty of IOVA address space for both ASID not to
|
|
+ * worry about collisions between them. Guest's translations are
|
|
+ * still validated with virtio virtqueue_pop so there is no risk for
|
|
+ * the guest to access memory that it shouldn't.
|
|
+ *
|
|
+ * To allocate a iova tree per ASID is doable but it complicates the
|
|
+ * code and it is not worth it for the moment.
|
|
+ */
|
|
+ v->iova_tree = s0->vhost_vdpa.iova_tree;
|
|
+ } else {
|
|
+ v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
|
|
+ v->iova_range.last);
|
|
+ }
|
|
+
|
|
r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
|
|
vhost_vdpa_net_cvq_cmd_page_len(), false);
|
|
if (unlikely(r < 0)) {
|
|
@@ -457,15 +526,9 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
|
|
if (s->vhost_vdpa.shadow_vqs_enabled) {
|
|
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
|
|
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
|
|
- if (!s->always_svq) {
|
|
- /*
|
|
- * If only the CVQ is shadowed we can delete this safely.
|
|
- * If all the VQs are shadows this will be needed by the time the
|
|
- * device is started again to register SVQ vrings and similar.
|
|
- */
|
|
- g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
|
|
- }
|
|
}
|
|
+
|
|
+ vhost_vdpa_net_client_stop(nc);
|
|
}
|
|
|
|
static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
|
|
@@ -675,8 +738,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
|
|
int nvqs,
|
|
bool is_datapath,
|
|
bool svq,
|
|
- struct vhost_vdpa_iova_range iova_range,
|
|
- VhostIOVATree *iova_tree)
|
|
+ struct vhost_vdpa_iova_range iova_range)
|
|
{
|
|
NetClientState *nc = NULL;
|
|
VhostVDPAState *s;
|
|
@@ -698,7 +760,6 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
|
|
s->vhost_vdpa.shadow_vqs_enabled = svq;
|
|
s->vhost_vdpa.iova_range = iova_range;
|
|
s->vhost_vdpa.shadow_data = svq;
|
|
- s->vhost_vdpa.iova_tree = iova_tree;
|
|
if (!is_datapath) {
|
|
s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
|
|
vhost_vdpa_net_cvq_cmd_page_len());
|
|
@@ -776,7 +837,6 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
|
|
uint64_t features;
|
|
int vdpa_device_fd;
|
|
g_autofree NetClientState **ncs = NULL;
|
|
- g_autoptr(VhostIOVATree) iova_tree = NULL;
|
|
struct vhost_vdpa_iova_range iova_range;
|
|
NetClientState *nc;
|
|
int queue_pairs, r, i = 0, has_cvq = 0;
|
|
@@ -822,12 +882,8 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
|
|
}
|
|
|
|
vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
|
|
- if (opts->x_svq) {
|
|
- if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
|
|
- goto err_svq;
|
|
- }
|
|
-
|
|
- iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
|
|
+ if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
|
|
+ goto err;
|
|
}
|
|
|
|
ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
|
|
@@ -835,7 +891,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
|
|
for (i = 0; i < queue_pairs; i++) {
|
|
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
|
|
vdpa_device_fd, i, 2, true, opts->x_svq,
|
|
- iova_range, iova_tree);
|
|
+ iova_range);
|
|
if (!ncs[i])
|
|
goto err;
|
|
}
|
|
@@ -843,13 +899,11 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
|
|
if (has_cvq) {
|
|
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
|
|
vdpa_device_fd, i, 1, false,
|
|
- opts->x_svq, iova_range, iova_tree);
|
|
+ opts->x_svq, iova_range);
|
|
if (!nc)
|
|
goto err;
|
|
}
|
|
|
|
- /* iova_tree ownership belongs to last NetClientState */
|
|
- g_steal_pointer(&iova_tree);
|
|
return 0;
|
|
|
|
err:
|
|
@@ -859,7 +913,6 @@ err:
|
|
}
|
|
}
|
|
|
|
-err_svq:
|
|
qemu_close(vdpa_device_fd);
|
|
|
|
return -1;
|
|
--
|
|
2.39.3
|
|
|