forked from rpms/qemu-kvm
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
154 lines
5.0 KiB
154 lines
5.0 KiB
From 56f4bebc591893e590481617da7cd7ecffeb166d Mon Sep 17 00:00:00 2001
|
|
From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= <eperezma@redhat.com>
|
|
Date: Tue, 23 Aug 2022 20:30:34 +0200
|
|
Subject: [PATCH 19/23] vdpa: extract vhost_vdpa_net_cvq_add from
|
|
vhost_vdpa_net_handle_ctrl_avail
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
RH-Author: Eugenio Pérez <eperezma@redhat.com>
|
|
RH-MergeRequest: 116: vdpa: Restore device state on destination
|
|
RH-Bugzilla: 2114060
|
|
RH-Acked-by: Cindy Lu <lulu@redhat.com>
|
|
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
RH-Commit: [18/21] 08ab71dbf050f5c2e97c622d1915f71a56c135b8 (eperezmartin/qemu-kvm)
|
|
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2114060
|
|
Upstream status: git@github.com:jasowang/qemu.git net-next
|
|
|
|
So we can reuse it to inject state messages.
|
|
|
|
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
|
|
Acked-by: Jason Wang <jasowang@redhat.com>
|
|
--
|
|
v7:
|
|
* Remove double free error
|
|
|
|
v6:
|
|
* Do not assume in buffer sent to the device is sizeof(virtio_net_ctrl_ack)
|
|
|
|
v5:
|
|
* Do not use an artificial !NULL VirtQueueElement
|
|
* Use only out size instead of iovec dev_buffers for these functions.
|
|
|
|
Signed-off-by: Jason Wang <jasowang@redhat.com>
|
|
(cherry picked from commit d9afb1f0ee4d662ed67d3bc1220b943f7e4cfa6f)
|
|
---
|
|
net/vhost-vdpa.c | 59 +++++++++++++++++++++++++++++++-----------------
|
|
1 file changed, 38 insertions(+), 21 deletions(-)
|
|
|
|
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
|
|
index 17626feb8d..f09f044ec1 100644
|
|
--- a/net/vhost-vdpa.c
|
|
+++ b/net/vhost-vdpa.c
|
|
@@ -331,6 +331,38 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
|
|
}
|
|
}
|
|
|
|
+static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
|
|
+ size_t in_len)
|
|
+{
|
|
+ /* Buffers for the device */
|
|
+ const struct iovec out = {
|
|
+ .iov_base = s->cvq_cmd_out_buffer,
|
|
+ .iov_len = out_len,
|
|
+ };
|
|
+ const struct iovec in = {
|
|
+ .iov_base = s->cvq_cmd_in_buffer,
|
|
+ .iov_len = sizeof(virtio_net_ctrl_ack),
|
|
+ };
|
|
+ VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
|
|
+ int r;
|
|
+
|
|
+ r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
|
|
+ if (unlikely(r != 0)) {
|
|
+ if (unlikely(r == -ENOSPC)) {
|
|
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
|
|
+ __func__);
|
|
+ }
|
|
+ return r;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * We can poll here since we've had BQL from the time we sent the
|
|
+ * descriptor. Also, we need to take the answer before SVQ pulls by itself,
|
|
+ * when BQL is released
|
|
+ */
|
|
+ return vhost_svq_poll(svq);
|
|
+}
|
|
+
|
|
static NetClientInfo net_vhost_vdpa_cvq_info = {
|
|
.type = NET_CLIENT_DRIVER_VHOST_VDPA,
|
|
.size = sizeof(VhostVDPAState),
|
|
@@ -387,23 +419,18 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
|
|
void *opaque)
|
|
{
|
|
VhostVDPAState *s = opaque;
|
|
- size_t in_len, dev_written;
|
|
+ size_t in_len;
|
|
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
|
|
/* Out buffer sent to both the vdpa device and the device model */
|
|
struct iovec out = {
|
|
.iov_base = s->cvq_cmd_out_buffer,
|
|
};
|
|
- /* In buffer sent to the device */
|
|
- const struct iovec dev_in = {
|
|
- .iov_base = s->cvq_cmd_in_buffer,
|
|
- .iov_len = sizeof(virtio_net_ctrl_ack),
|
|
- };
|
|
/* in buffer used for device model */
|
|
const struct iovec in = {
|
|
.iov_base = &status,
|
|
.iov_len = sizeof(status),
|
|
};
|
|
- int r = -EINVAL;
|
|
+ ssize_t dev_written = -EINVAL;
|
|
bool ok;
|
|
|
|
out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
|
|
@@ -414,21 +441,11 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
|
|
goto out;
|
|
}
|
|
|
|
- r = vhost_svq_add(svq, &out, 1, &dev_in, 1, elem);
|
|
- if (unlikely(r != 0)) {
|
|
- if (unlikely(r == -ENOSPC)) {
|
|
- qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
|
|
- __func__);
|
|
- }
|
|
+ dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
|
|
+ if (unlikely(dev_written < 0)) {
|
|
goto out;
|
|
}
|
|
|
|
- /*
|
|
- * We can poll here since we've had BQL from the time we sent the
|
|
- * descriptor. Also, we need to take the answer before SVQ pulls by itself,
|
|
- * when BQL is released
|
|
- */
|
|
- dev_written = vhost_svq_poll(svq);
|
|
if (unlikely(dev_written < sizeof(status))) {
|
|
error_report("Insufficient written data (%zu)", dev_written);
|
|
goto out;
|
|
@@ -436,7 +453,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
|
|
|
|
memcpy(&status, s->cvq_cmd_in_buffer, sizeof(status));
|
|
if (status != VIRTIO_NET_OK) {
|
|
- goto out;
|
|
+ return VIRTIO_NET_ERR;
|
|
}
|
|
|
|
status = VIRTIO_NET_ERR;
|
|
@@ -453,7 +470,7 @@ out:
|
|
}
|
|
vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
|
|
g_free(elem);
|
|
- return r;
|
|
+ return dev_written < 0 ? dev_written : 0;
|
|
}
|
|
|
|
static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
|
|
--
|
|
2.31.1
|
|
|