import qemu-kvm-7.2.0-14.el9_2.3

i9 changed/i9c/qemu-kvm-7.2.0-14.el9_2.3
MSVSphere Packaging Team 1 year ago
parent fe66ecbf55
commit 5752c9931d

@ -0,0 +1,155 @@
From 6319eaee8c2206c4eca858a11ed7c9b7a2f3dff9 Mon Sep 17 00:00:00 2001
From: Marcelo Tosatti <mtosatti@redhat.com>
Date: Thu, 29 Jun 2023 15:13:57 -0300
Subject: [PATCH] kvm: reuse per-vcpu stats fd to avoid vcpu interruption
RH-Author: Marcelo Tosatti <None>
RH-MergeRequest: 290: kvm: reuse per-vcpu stats fd to avoid vcpu interruption
RH-Bugzilla: 2221219
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
RH-Commit: [1/1] cf6c3188e26c6eae99b48db1f75837e11d1e4489
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2214884
Commit: 3b6f485275ae95a81eec589d2773b86ca9ddec4d
A regression has been detected in latency testing of KVM guests.
More specifically, it was observed that the cyclictest
numbers inside of an isolated vcpu (running on isolated pcpu) are:
Where a maximum of 50us is acceptable.
The implementation of KVM_GET_STATS_FD uses run_on_cpu to query
per vcpu statistics, which interrupts the vcpu (and is unnecessary).
To fix this, open the per vcpu stats fd on vcpu initialization,
and read from that fd from QEMU's main thread.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
accel/kvm/kvm-all.c | 30 +++++++++++++++---------------
include/hw/core/cpu.h | 1 +
2 files changed, 16 insertions(+), 15 deletions(-)
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 39ed30ab59..c86a6798c6 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -450,6 +450,8 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp)
"kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
kvm_arch_vcpu_id(cpu));
}
+ cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
+
err:
return ret;
}
@@ -3950,7 +3952,7 @@ static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd
/* Read stats header */
kvm_stats_header = &descriptors->kvm_stats_header;
- ret = read(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header));
+ ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0);
if (ret != sizeof(*kvm_stats_header)) {
error_setg(errp, "KVM stats: failed to read stats header: "
"expected %zu actual %zu",
@@ -3981,7 +3983,8 @@ static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd
}
static void query_stats(StatsResultList **result, StatsTarget target,
- strList *names, int stats_fd, Error **errp)
+ strList *names, int stats_fd, CPUState *cpu,
+ Error **errp)
{
struct kvm_stats_desc *kvm_stats_desc;
struct kvm_stats_header *kvm_stats_header;
@@ -4039,7 +4042,7 @@ static void query_stats(StatsResultList **result, StatsTarget target,
break;
case STATS_TARGET_VCPU:
add_stats_entry(result, STATS_PROVIDER_KVM,
- current_cpu->parent_obj.canonical_path,
+ cpu->parent_obj.canonical_path,
stats_list);
break;
default:
@@ -4076,10 +4079,9 @@ static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
}
-static void query_stats_vcpu(CPUState *cpu, run_on_cpu_data data)
+static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
{
- StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
- int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
+ int stats_fd = cpu->kvm_vcpu_stats_fd;
Error *local_err = NULL;
if (stats_fd == -1) {
@@ -4088,14 +4090,13 @@ static void query_stats_vcpu(CPUState *cpu, run_on_cpu_data data)
return;
}
query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
- kvm_stats_args->names, stats_fd, kvm_stats_args->errp);
- close(stats_fd);
+ kvm_stats_args->names, stats_fd, cpu,
+ kvm_stats_args->errp);
}
-static void query_stats_schema_vcpu(CPUState *cpu, run_on_cpu_data data)
+static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
{
- StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
- int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
+ int stats_fd = cpu->kvm_vcpu_stats_fd;
Error *local_err = NULL;
if (stats_fd == -1) {
@@ -4105,7 +4106,6 @@ static void query_stats_schema_vcpu(CPUState *cpu, run_on_cpu_data data)
}
query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
kvm_stats_args->errp);
- close(stats_fd);
}
static void query_stats_cb(StatsResultList **result, StatsTarget target,
@@ -4123,7 +4123,7 @@ static void query_stats_cb(StatsResultList **result, StatsTarget target,
error_setg_errno(errp, errno, "KVM stats: ioctl failed");
return;
}
- query_stats(result, target, names, stats_fd, errp);
+ query_stats(result, target, names, stats_fd, NULL, errp);
close(stats_fd);
break;
}
@@ -4137,7 +4137,7 @@ static void query_stats_cb(StatsResultList **result, StatsTarget target,
if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
continue;
}
- run_on_cpu(cpu, query_stats_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
+ query_stats_vcpu(cpu, &stats_args);
}
break;
}
@@ -4163,6 +4163,6 @@ void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
if (first_cpu) {
stats_args.result.schema = result;
stats_args.errp = errp;
- run_on_cpu(first_cpu, query_stats_schema_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
+ query_stats_schema_vcpu(first_cpu, &stats_args);
}
}
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 2417597236..362f22ca06 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -397,6 +397,7 @@ struct CPUState {
struct kvm_dirty_gfn *kvm_dirty_gfns;
uint32_t kvm_fetch_index;
uint64_t dirty_pages;
+ int kvm_vcpu_stats_fd;
/* Use by accel-block: CPU is executing an ioctl() */
QemuLockCnt in_ioctl_lock;
--
2.39.3

@ -0,0 +1,84 @@
From 757767330abb2c0a650c387a9a5965fee224beee Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= <eperezma@redhat.com>
Date: Thu, 9 Feb 2023 18:00:04 +0100
Subject: [PATCH] vdpa: stop all svq on device deletion
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
RH-Author: Laurent Vivier <lvivier@redhat.com>
RH-MergeRequest: 286: vdpa: stop all svq on device deletion
RH-Bugzilla: 2213864
RH-Acked-by: Cindy Lu <lulu@redhat.com>
RH-Acked-by: Eugenio Pérez <eperezma@redhat.com>
RH-Acked-by: Stefano Garzarella <sgarzare@redhat.com>
RH-Commit: [1/1] b9ff402e4c6e386be3ea867df9358cdaa283cda7
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2213864
Upstream-Status: merged
Not stopping them leave the device in a bad state when virtio-net
fronted device is unplugged with device_del monitor command.
This is not triggable in regular poweroff or qemu forces shutdown
because cleanup is called right after vhost_vdpa_dev_start(false). But
devices hot unplug does not call vdpa device cleanups. This lead to all
the vhost_vdpa devices without stop the SVQ but the last.
Fix it and clean the code, making it symmetric with
vhost_vdpa_svqs_start.
Fixes: dff4426fa656 ("vhost: Add Shadow VirtQueue kick forwarding capabilities")
Reported-by: Lei Yang <leiyang@redhat.com>
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Message-Id: <20230209170004.899472-1-eperezma@redhat.com>
Tested-by: Laurent Vivier <lvivier@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
(cherry picked from commit 2e1a9de96b487cf818a22d681cad8d3f5d18dcca)
---
hw/virtio/vhost-vdpa.c | 17 ++---------------
1 file changed, 2 insertions(+), 15 deletions(-)
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 72ff06673c..46896b7592 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -669,26 +669,11 @@ static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
return ret;
}
-static void vhost_vdpa_reset_svq(struct vhost_vdpa *v)
-{
- if (!v->shadow_vqs_enabled) {
- return;
- }
-
- for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
- VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
- vhost_svq_stop(svq);
- }
-}
-
static int vhost_vdpa_reset_device(struct vhost_dev *dev)
{
- struct vhost_vdpa *v = dev->opaque;
int ret;
uint8_t status = 0;
- vhost_vdpa_reset_svq(v);
-
ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
trace_vhost_vdpa_reset_device(dev, status);
return ret;
@@ -1080,6 +1065,8 @@ static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
+
+ vhost_svq_stop(svq);
vhost_vdpa_svq_unmap_rings(dev, svq);
event_notifier_cleanup(&svq->hdev_kick);
--
2.39.3

@ -148,7 +148,7 @@ Obsoletes: %{name}-block-ssh <= %{epoch}:%{version} \
Summary: QEMU is a machine emulator and virtualizer
Name: qemu-kvm
Version: 7.2.0
Release: 14%{?rcrel}%{?dist}%{?cc_suffix}.1
Release: 14%{?rcrel}%{?dist}%{?cc_suffix}.3
# Epoch because we pushed a qemu-1.0 package. AIUI this can't ever be dropped
# Epoch 15 used for RHEL 8
# Epoch 17 used for RHEL 9 (due to release versioning offset in RHEL 8.5)
@ -418,6 +418,10 @@ Patch134: kvm-target-i386-Fix-BZHI-instruction.patch
Patch135: kvm-intel-iommu-fail-DEVIOTLB_UNMAP-without-dt-mode.patch
# For bz#2203745 - Disk detach is unsuccessful while the guest is still booting [rhel-9.2.0.z]
Patch136: kvm-acpi-pcihp-allow-repeating-hot-unplug-requests.patch
# For bz#2213864 - [mlx vhost_vdpa][rhel 9.2]qemu core dump when hot unplug then hotplug a vdpa interface with multi-queue setting [rhel-9.2.0.z]
Patch137: kvm-vdpa-stop-all-svq-on-device-deletion.patch
# For bz#2221219 - query-stats QMP command interrupts vcpus, the Max Latencies could be more than 100us (rhel 9.3.0 clone) [rhel-9.2.0.z]
Patch138: kvm-kvm-reuse-per-vcpu-stats-fd-to-avoid-vcpu-interrupti.patch
%if %{have_clang}
BuildRequires: clang
@ -1448,6 +1452,16 @@ useradd -r -u 107 -g qemu -G kvm -d / -s /sbin/nologin \
%endif
%changelog
* Tue Jul 11 2023 Miroslav Rezanina <mrezanin@redhat.com> - 7.2.0-14.el9_2.3
- kvm-kvm-reuse-per-vcpu-stats-fd-to-avoid-vcpu-interrupti.patch [bz#2221219]
- Resolves: bz#2221219
(query-stats QMP command interrupts vcpus, the Max Latencies could be more than 100us (rhel 9.3.0 clone) [rhel-9.2.0.z])
* Mon Jun 19 2023 Miroslav Rezanina <mrezanin@redhat.com> - 7.2.0-14.el9_2.2
- kvm-vdpa-stop-all-svq-on-device-deletion.patch [bz#2213864]
- Resolves: bz#2213864
([mlx vhost_vdpa][rhel 9.2]qemu core dump when hot unplug then hotplug a vdpa interface with multi-queue setting [rhel-9.2.0.z])
* Thu May 25 2023 Miroslav Rezanina <mrezanin@redhat.com> - 7.2.0-14.el9_2.1
- kvm-acpi-pcihp-allow-repeating-hot-unplug-requests.patch [bz#2203745]
- Resolves: bz#2203745

Loading…
Cancel
Save