forked from rpms/qemu-kvm
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
141 lines
4.8 KiB
141 lines
4.8 KiB
From 0c19fb7c4a22a30830152b224b2e66963f829a7a Mon Sep 17 00:00:00 2001
|
|
From: Greg Kurz <groug@kaod.org>
|
|
Date: Thu, 19 Jan 2023 18:24:24 +0100
|
|
Subject: [PATCH 19/20] Revert "vhost-user: Introduce nested event loop in
|
|
vhost_user_read()"
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
RH-Author: Laurent Vivier <lvivier@redhat.com>
|
|
RH-MergeRequest: 146: Fix vhost-user with dpdk
|
|
RH-Bugzilla: 2155173
|
|
RH-Acked-by: Cindy Lu <lulu@redhat.com>
|
|
RH-Acked-by: Greg Kurz (RH) <gkurz@redhat.com>
|
|
RH-Acked-by: Eugenio Pérez <eperezma@redhat.com>
|
|
RH-Commit: [2/2] 9b67041f92f29f70b7ccb41d8087801e4e4e38af (lvivier/qemu-kvm-centos)
|
|
|
|
This reverts commit a7f523c7d114d445c5d83aecdba3efc038e5a692.
|
|
|
|
The nested event loop is broken by design. It's only user was removed.
|
|
Drop the code as well so that nobody ever tries to use it again.
|
|
|
|
I had to fix a couple of trivial conflicts around return values because
|
|
of 025faa872bcf ("vhost-user: stick to -errno error return convention").
|
|
|
|
Signed-off-by: Greg Kurz <groug@kaod.org>
|
|
Message-Id: <20230119172424.478268-3-groug@kaod.org>
|
|
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
|
|
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
|
|
Acked-by: Maxime Coquelin <maxime.coquelin@redhat.com>
|
|
(cherry picked from commit 4382138f642f69fdbc79ebf4e93d84be8061191f)
|
|
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
|
|
---
|
|
hw/virtio/vhost-user.c | 65 ++++--------------------------------------
|
|
1 file changed, 5 insertions(+), 60 deletions(-)
|
|
|
|
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
|
|
index 0ac00eb901..7cb49c50f9 100644
|
|
--- a/hw/virtio/vhost-user.c
|
|
+++ b/hw/virtio/vhost-user.c
|
|
@@ -305,19 +305,8 @@ static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
|
|
return 0;
|
|
}
|
|
|
|
-struct vhost_user_read_cb_data {
|
|
- struct vhost_dev *dev;
|
|
- VhostUserMsg *msg;
|
|
- GMainLoop *loop;
|
|
- int ret;
|
|
-};
|
|
-
|
|
-static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
|
|
- gpointer opaque)
|
|
+static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
|
|
{
|
|
- struct vhost_user_read_cb_data *data = opaque;
|
|
- struct vhost_dev *dev = data->dev;
|
|
- VhostUserMsg *msg = data->msg;
|
|
struct vhost_user *u = dev->opaque;
|
|
CharBackend *chr = u->user->chr;
|
|
uint8_t *p = (uint8_t *) msg;
|
|
@@ -325,8 +314,7 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
|
|
|
|
r = vhost_user_read_header(dev, msg);
|
|
if (r < 0) {
|
|
- data->ret = r;
|
|
- goto end;
|
|
+ return r;
|
|
}
|
|
|
|
/* validate message size is sane */
|
|
@@ -334,8 +322,7 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
|
|
error_report("Failed to read msg header."
|
|
" Size %d exceeds the maximum %zu.", msg->hdr.size,
|
|
VHOST_USER_PAYLOAD_SIZE);
|
|
- data->ret = -EPROTO;
|
|
- goto end;
|
|
+ return -EPROTO;
|
|
}
|
|
|
|
if (msg->hdr.size) {
|
|
@@ -346,53 +333,11 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
|
|
int saved_errno = errno;
|
|
error_report("Failed to read msg payload."
|
|
" Read %d instead of %d.", r, msg->hdr.size);
|
|
- data->ret = r < 0 ? -saved_errno : -EIO;
|
|
- goto end;
|
|
+ return r < 0 ? -saved_errno : -EIO;
|
|
}
|
|
}
|
|
|
|
-end:
|
|
- g_main_loop_quit(data->loop);
|
|
- return G_SOURCE_REMOVE;
|
|
-}
|
|
-
|
|
-static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
|
|
-{
|
|
- struct vhost_user *u = dev->opaque;
|
|
- CharBackend *chr = u->user->chr;
|
|
- GMainContext *prev_ctxt = chr->chr->gcontext;
|
|
- GMainContext *ctxt = g_main_context_new();
|
|
- GMainLoop *loop = g_main_loop_new(ctxt, FALSE);
|
|
- struct vhost_user_read_cb_data data = {
|
|
- .dev = dev,
|
|
- .loop = loop,
|
|
- .msg = msg,
|
|
- .ret = 0
|
|
- };
|
|
-
|
|
- /*
|
|
- * We want to be able to monitor the slave channel fd while waiting
|
|
- * for chr I/O. This requires an event loop, but we can't nest the
|
|
- * one to which chr is currently attached : its fd handlers might not
|
|
- * be prepared for re-entrancy. So we create a new one and switch chr
|
|
- * to use it.
|
|
- */
|
|
- qemu_chr_be_update_read_handlers(chr->chr, ctxt);
|
|
- qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data);
|
|
-
|
|
- g_main_loop_run(loop);
|
|
-
|
|
- /*
|
|
- * Restore the previous event loop context. This also destroys/recreates
|
|
- * event sources : this guarantees that all pending events in the original
|
|
- * context that have been processed by the nested loop are purged.
|
|
- */
|
|
- qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt);
|
|
-
|
|
- g_main_loop_unref(loop);
|
|
- g_main_context_unref(ctxt);
|
|
-
|
|
- return data.ret;
|
|
+ return 0;
|
|
}
|
|
|
|
static int process_message_reply(struct vhost_dev *dev,
|
|
--
|
|
2.31.1
|
|
|