You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
146 lines
5.7 KiB
146 lines
5.7 KiB
2 years ago
|
From 760169d538a4e6ba61006f6796cd55af967a7f1e Mon Sep 17 00:00:00 2001
|
||
|
From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= <eperezma@redhat.com>
|
||
|
Date: Thu, 15 Dec 2022 12:31:38 +0100
|
||
|
Subject: [PATCH 06/14] vdpa: request iova_range only once
|
||
|
MIME-Version: 1.0
|
||
|
Content-Type: text/plain; charset=UTF-8
|
||
|
Content-Transfer-Encoding: 8bit
|
||
|
|
||
|
RH-Author: Eugenio Pérez <eperezma@redhat.com>
|
||
|
RH-MergeRequest: 136: vDPA ASID support in Qemu
|
||
|
RH-Bugzilla: 2104412
|
||
|
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
|
||
|
RH-Acked-by: Cindy Lu <lulu@redhat.com>
|
||
|
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
||
|
RH-Commit: [6/13] 2a8ae2f46ae88f01c5535038f38cb7895098b610 (eperezmartin/qemu-kvm)
|
||
|
|
||
|
Currently iova range is requested once per queue pair in the case of
|
||
|
net. Reduce the number of ioctls asking it once at initialization and
|
||
|
reusing that value for each vhost_vdpa.
|
||
|
|
||
|
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
|
||
|
Message-Id: <20221215113144.322011-7-eperezma@redhat.com>
|
||
|
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
|
||
|
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
|
||
|
Acked-by: Jason Wang <jasonwang@redhat.com>
|
||
|
(cherry picked from commit a585fad26b2e6ccca156d9e65158ad1c5efd268d)
|
||
|
---
|
||
|
hw/virtio/vhost-vdpa.c | 15 ---------------
|
||
|
net/vhost-vdpa.c | 27 ++++++++++++++-------------
|
||
|
2 files changed, 14 insertions(+), 28 deletions(-)
|
||
|
|
||
|
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
|
||
|
index e65603022f..9e7cbf1776 100644
|
||
|
--- a/hw/virtio/vhost-vdpa.c
|
||
|
+++ b/hw/virtio/vhost-vdpa.c
|
||
|
@@ -365,19 +365,6 @@ static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
-static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
|
||
|
-{
|
||
|
- int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
|
||
|
- &v->iova_range);
|
||
|
- if (ret != 0) {
|
||
|
- v->iova_range.first = 0;
|
||
|
- v->iova_range.last = UINT64_MAX;
|
||
|
- }
|
||
|
-
|
||
|
- trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
|
||
|
- v->iova_range.last);
|
||
|
-}
|
||
|
-
|
||
|
/*
|
||
|
* The use of this function is for requests that only need to be
|
||
|
* applied once. Typically such request occurs at the beginning
|
||
|
@@ -465,8 +452,6 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
|
||
|
goto err;
|
||
|
}
|
||
|
|
||
|
- vhost_vdpa_get_iova_range(v);
|
||
|
-
|
||
|
if (!vhost_vdpa_first_dev(dev)) {
|
||
|
return 0;
|
||
|
}
|
||
|
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
|
||
|
index 16a5ebe2dd..8d3ed095d0 100644
|
||
|
--- a/net/vhost-vdpa.c
|
||
|
+++ b/net/vhost-vdpa.c
|
||
|
@@ -549,14 +549,15 @@ static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
|
||
|
};
|
||
|
|
||
|
static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
|
||
|
- const char *device,
|
||
|
- const char *name,
|
||
|
- int vdpa_device_fd,
|
||
|
- int queue_pair_index,
|
||
|
- int nvqs,
|
||
|
- bool is_datapath,
|
||
|
- bool svq,
|
||
|
- VhostIOVATree *iova_tree)
|
||
|
+ const char *device,
|
||
|
+ const char *name,
|
||
|
+ int vdpa_device_fd,
|
||
|
+ int queue_pair_index,
|
||
|
+ int nvqs,
|
||
|
+ bool is_datapath,
|
||
|
+ bool svq,
|
||
|
+ struct vhost_vdpa_iova_range iova_range,
|
||
|
+ VhostIOVATree *iova_tree)
|
||
|
{
|
||
|
NetClientState *nc = NULL;
|
||
|
VhostVDPAState *s;
|
||
|
@@ -575,6 +576,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
|
||
|
s->vhost_vdpa.device_fd = vdpa_device_fd;
|
||
|
s->vhost_vdpa.index = queue_pair_index;
|
||
|
s->vhost_vdpa.shadow_vqs_enabled = svq;
|
||
|
+ s->vhost_vdpa.iova_range = iova_range;
|
||
|
s->vhost_vdpa.iova_tree = iova_tree;
|
||
|
if (!is_datapath) {
|
||
|
s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
|
||
|
@@ -654,6 +656,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
|
||
|
int vdpa_device_fd;
|
||
|
g_autofree NetClientState **ncs = NULL;
|
||
|
g_autoptr(VhostIOVATree) iova_tree = NULL;
|
||
|
+ struct vhost_vdpa_iova_range iova_range;
|
||
|
NetClientState *nc;
|
||
|
int queue_pairs, r, i = 0, has_cvq = 0;
|
||
|
|
||
|
@@ -697,14 +700,12 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
|
||
|
return queue_pairs;
|
||
|
}
|
||
|
|
||
|
+ vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
|
||
|
if (opts->x_svq) {
|
||
|
- struct vhost_vdpa_iova_range iova_range;
|
||
|
-
|
||
|
if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
|
||
|
goto err_svq;
|
||
|
}
|
||
|
|
||
|
- vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
|
||
|
iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
|
||
|
}
|
||
|
|
||
|
@@ -713,7 +714,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
|
||
|
for (i = 0; i < queue_pairs; i++) {
|
||
|
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
|
||
|
vdpa_device_fd, i, 2, true, opts->x_svq,
|
||
|
- iova_tree);
|
||
|
+ iova_range, iova_tree);
|
||
|
if (!ncs[i])
|
||
|
goto err;
|
||
|
}
|
||
|
@@ -721,7 +722,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
|
||
|
if (has_cvq) {
|
||
|
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
|
||
|
vdpa_device_fd, i, 1, false,
|
||
|
- opts->x_svq, iova_tree);
|
||
|
+ opts->x_svq, iova_range, iova_tree);
|
||
|
if (!nc)
|
||
|
goto err;
|
||
|
}
|
||
|
--
|
||
|
2.31.1
|
||
|
|