summaryrefslogtreecommitdiffstats
path: root/meta/recipes-devtools/qemu
diff options
context:
space:
mode:
authorRichard Purdie <richard.purdie@linuxfoundation.org>2023-09-06 17:49:07 +0100
committerRichard Purdie <richard.purdie@linuxfoundation.org>2023-09-07 07:53:50 +0100
commitfad769ae8b3b520ea7e881ac7dca6080cbe7c2e0 (patch)
treea2cf0e2ba8f78cc60fa8504627c6ff19ac1f03ca /meta/recipes-devtools/qemu
parentf694084dbc48b49c9d9ebdc0f5b6d13b29176708 (diff)
downloadpoky-fad769ae8b3b520ea7e881ac7dca6080cbe7c2e0.tar.gz
qemu: Add patches to resolve x86 and then mips boot issues
qemu 8.1.0 doesn't boot on x86. After adding tcg fixes for that, mips boot breaks so also add patches for that as well. (From OE-Core rev: 3d3fa94ee6d7ea58e3ec64d28bd6414437806cfd) Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta/recipes-devtools/qemu')
-rw-r--r--meta/recipes-devtools/qemu/qemu.inc4
-rw-r--r--meta/recipes-devtools/qemu/qemu/0001-softmmu-Assert-data-in-bounds-in-iotlb_to_section.patch42
-rw-r--r--meta/recipes-devtools/qemu/qemu/0001-softmmu-Use-async_run_on_cpu-in-tcg_commit.patch157
-rw-r--r--meta/recipes-devtools/qemu/qemu/fixmips.patch18
4 files changed, 221 insertions, 0 deletions
diff --git a/meta/recipes-devtools/qemu/qemu.inc b/meta/recipes-devtools/qemu/qemu.inc
index 131162dd62..15b963d448 100644
--- a/meta/recipes-devtools/qemu/qemu.inc
+++ b/meta/recipes-devtools/qemu/qemu.inc
@@ -29,12 +29,16 @@ SRC_URI = "https://download.qemu.org/${BPN}-${PV}.tar.xz \
29 file://0009-Define-MAP_SYNC-and-MAP_SHARED_VALIDATE-on-needed-li.patch \ 29 file://0009-Define-MAP_SYNC-and-MAP_SHARED_VALIDATE-on-needed-li.patch \
30 file://0010-hw-pvrdma-Protect-against-buggy-or-malicious-guest-d.patch \ 30 file://0010-hw-pvrdma-Protect-against-buggy-or-malicious-guest-d.patch \
31 file://0002-linux-user-Replace-use-of-lfs64-related-functions-an.patch \ 31 file://0002-linux-user-Replace-use-of-lfs64-related-functions-an.patch \
32 file://0001-softmmu-Assert-data-in-bounds-in-iotlb_to_section.patch \
33 file://0001-softmmu-Use-async_run_on_cpu-in-tcg_commit.patch \
32 file://fixedmeson.patch \ 34 file://fixedmeson.patch \
35 file://fixmips.patch \
33 file://qemu-guest-agent.init \ 36 file://qemu-guest-agent.init \
34 file://qemu-guest-agent.udev \ 37 file://qemu-guest-agent.udev \
35 " 38 "
36UPSTREAM_CHECK_REGEX = "qemu-(?P<pver>\d+(\.\d+)+)\.tar" 39UPSTREAM_CHECK_REGEX = "qemu-(?P<pver>\d+(\.\d+)+)\.tar"
37 40
41
38SRC_URI[sha256sum] = "710c101198e334d4762eef65f649bc43fa8a5dd75303554b8acfec3eb25f0e55" 42SRC_URI[sha256sum] = "710c101198e334d4762eef65f649bc43fa8a5dd75303554b8acfec3eb25f0e55"
39 43
40SRC_URI:append:class-target = " file://cross.patch" 44SRC_URI:append:class-target = " file://cross.patch"
diff --git a/meta/recipes-devtools/qemu/qemu/0001-softmmu-Assert-data-in-bounds-in-iotlb_to_section.patch b/meta/recipes-devtools/qemu/qemu/0001-softmmu-Assert-data-in-bounds-in-iotlb_to_section.patch
new file mode 100644
index 0000000000..7380e16ab3
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0001-softmmu-Assert-data-in-bounds-in-iotlb_to_section.patch
@@ -0,0 +1,42 @@
1From 86e4f93d827d3c1efd00cd8a906e38a2c0f2b5bc Mon Sep 17 00:00:00 2001
2From: Richard Henderson <richard.henderson@linaro.org>
3Date: Fri, 25 Aug 2023 14:06:58 -0700
4Subject: [PATCH] softmmu: Assert data in bounds in iotlb_to_section
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9Acked-by: Alex Bennée <alex.bennee@linaro.org>
10Suggested-by: Alex Bennée <alex.bennee@linaro.org>
11Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
13Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/86e4f93d827d3c1efd00cd8a906e38a2c0f2b5bc]
14---
15 softmmu/physmem.c | 10 ++++++++--
16 1 file changed, 8 insertions(+), 2 deletions(-)
17
18diff --git a/softmmu/physmem.c b/softmmu/physmem.c
19index 3df73542e1..7597dc1c39 100644
20--- a/softmmu/physmem.c
21+++ b/softmmu/physmem.c
22@@ -2413,9 +2413,15 @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu,
23 int asidx = cpu_asidx_from_attrs(cpu, attrs);
24 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
25 AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch);
26- MemoryRegionSection *sections = d->map.sections;
27+ int section_index = index & ~TARGET_PAGE_MASK;
28+ MemoryRegionSection *ret;
29+
30+ assert(section_index < d->map.sections_nb);
31+ ret = d->map.sections + section_index;
32+ assert(ret->mr);
33+ assert(ret->mr->ops);
34
35- return &sections[index & ~TARGET_PAGE_MASK];
36+ return ret;
37 }
38
39 static void io_mem_init(void)
40--
412.34.1
42
diff --git a/meta/recipes-devtools/qemu/qemu/0001-softmmu-Use-async_run_on_cpu-in-tcg_commit.patch b/meta/recipes-devtools/qemu/qemu/0001-softmmu-Use-async_run_on_cpu-in-tcg_commit.patch
new file mode 100644
index 0000000000..8289b45991
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/0001-softmmu-Use-async_run_on_cpu-in-tcg_commit.patch
@@ -0,0 +1,157 @@
1From 0d58c660689f6da1e3feff8a997014003d928b3b Mon Sep 17 00:00:00 2001
2From: Richard Henderson <richard.henderson@linaro.org>
3Date: Fri, 25 Aug 2023 16:13:17 -0700
4Subject: [PATCH] softmmu: Use async_run_on_cpu in tcg_commit
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9After system startup, run the update to memory_dispatch
10and the tlb_flush on the cpu. This eliminates a race,
11wherein a running cpu sees the memory_dispatch change
12but has not yet seen the tlb_flush.
13
14Since the update now happens on the cpu, we need not use
15qatomic_rcu_read to protect the read of memory_dispatch.
16
17Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1826
18Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1834
19Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1846
20Tested-by: Alex Bennée <alex.bennee@linaro.org>
21Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
22Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
23
24Upstream-Status: Backport [0d58c660689f6da1e3feff8a997014003d928b3b]
25---
26 accel/tcg/cpu-exec-common.c | 30 ----------------------------
27 include/exec/cpu-common.h | 1 -
28 softmmu/physmem.c | 40 +++++++++++++++++++++++++++----------
29 3 files changed, 29 insertions(+), 42 deletions(-)
30
31Index: qemu-8.1.0/accel/tcg/cpu-exec-common.c
32===================================================================
33--- qemu-8.1.0.orig/accel/tcg/cpu-exec-common.c
34+++ qemu-8.1.0/accel/tcg/cpu-exec-common.c
35@@ -33,36 +33,6 @@ void cpu_loop_exit_noexc(CPUState *cpu)
36 cpu_loop_exit(cpu);
37 }
38
39-#if defined(CONFIG_SOFTMMU)
40-void cpu_reloading_memory_map(void)
41-{
42- if (qemu_in_vcpu_thread() && current_cpu->running) {
43- /* The guest can in theory prolong the RCU critical section as long
44- * as it feels like. The major problem with this is that because it
45- * can do multiple reconfigurations of the memory map within the
46- * critical section, we could potentially accumulate an unbounded
47- * collection of memory data structures awaiting reclamation.
48- *
49- * Because the only thing we're currently protecting with RCU is the
50- * memory data structures, it's sufficient to break the critical section
51- * in this callback, which we know will get called every time the
52- * memory map is rearranged.
53- *
54- * (If we add anything else in the system that uses RCU to protect
55- * its data structures, we will need to implement some other mechanism
56- * to force TCG CPUs to exit the critical section, at which point this
57- * part of this callback might become unnecessary.)
58- *
59- * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
60- * only protects cpu->as->dispatch. Since we know our caller is about
61- * to reload it, it's safe to split the critical section.
62- */
63- rcu_read_unlock();
64- rcu_read_lock();
65- }
66-}
67-#endif
68-
69 void cpu_loop_exit(CPUState *cpu)
70 {
71 /* Undo the setting in cpu_tb_exec. */
72Index: qemu-8.1.0/include/exec/cpu-common.h
73===================================================================
74--- qemu-8.1.0.orig/include/exec/cpu-common.h
75+++ qemu-8.1.0/include/exec/cpu-common.h
76@@ -133,7 +133,6 @@ static inline void cpu_physical_memory_w
77 {
78 cpu_physical_memory_rw(addr, (void *)buf, len, true);
79 }
80-void cpu_reloading_memory_map(void);
81 void *cpu_physical_memory_map(hwaddr addr,
82 hwaddr *plen,
83 bool is_write);
84Index: qemu-8.1.0/softmmu/physmem.c
85===================================================================
86--- qemu-8.1.0.orig/softmmu/physmem.c
87+++ qemu-8.1.0/softmmu/physmem.c
88@@ -680,8 +680,7 @@ address_space_translate_for_iotlb(CPUSta
89 IOMMUTLBEntry iotlb;
90 int iommu_idx;
91 hwaddr addr = orig_addr;
92- AddressSpaceDispatch *d =
93- qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
94+ AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
95
96 for (;;) {
97 section = address_space_translate_internal(d, addr, &addr, plen, false);
98@@ -2412,7 +2411,7 @@ MemoryRegionSection *iotlb_to_section(CP
99 {
100 int asidx = cpu_asidx_from_attrs(cpu, attrs);
101 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
102- AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch);
103+ AddressSpaceDispatch *d = cpuas->memory_dispatch;
104 int section_index = index & ~TARGET_PAGE_MASK;
105 MemoryRegionSection *ret;
106
107@@ -2487,23 +2486,42 @@ static void tcg_log_global_after_sync(Me
108 }
109 }
110
111+static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data)
112+{
113+ CPUAddressSpace *cpuas = data.host_ptr;
114+
115+ cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as);
116+ tlb_flush(cpu);
117+}
118+
119 static void tcg_commit(MemoryListener *listener)
120 {
121 CPUAddressSpace *cpuas;
122- AddressSpaceDispatch *d;
123+ CPUState *cpu;
124
125 assert(tcg_enabled());
126 /* since each CPU stores ram addresses in its TLB cache, we must
127 reset the modified entries */
128 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
129- cpu_reloading_memory_map();
130- /* The CPU and TLB are protected by the iothread lock.
131- * We reload the dispatch pointer now because cpu_reloading_memory_map()
132- * may have split the RCU critical section.
133+ cpu = cpuas->cpu;
134+
135+ /*
136+ * Defer changes to as->memory_dispatch until the cpu is quiescent.
137+ * Otherwise we race between (1) other cpu threads and (2) ongoing
138+ * i/o for the current cpu thread, with data cached by mmu_lookup().
139+ *
140+ * In addition, queueing the work function will kick the cpu back to
141+ * the main loop, which will end the RCU critical section and reclaim
142+ * the memory data structures.
143+ *
144+ * That said, the listener is also called during realize, before
145+ * all of the tcg machinery for run-on is initialized: thus halt_cond.
146 */
147- d = address_space_to_dispatch(cpuas->as);
148- qatomic_rcu_set(&cpuas->memory_dispatch, d);
149- tlb_flush(cpuas->cpu);
150+ if (cpu->halt_cond) {
151+ async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas));
152+ } else {
153+ tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas));
154+ }
155 }
156
157 static void memory_map_init(void)
diff --git a/meta/recipes-devtools/qemu/qemu/fixmips.patch b/meta/recipes-devtools/qemu/qemu/fixmips.patch
new file mode 100644
index 0000000000..01546d1030
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/fixmips.patch
@@ -0,0 +1,18 @@
1Patch to fix mips boot hangs where virtio appears broken. Patch under discussion upstream.
2Regression is introduced by other fixes to 8.1.0 to get x86 boots working.
3
4Upstream-Status: Pending [https://lore.kernel.org/qemu-devel/6c956b90-5a13-db96-9c02-9834a512fe6f@linaro.org/]
5
6Index: qemu-8.1.0/softmmu/physmem.c
7===================================================================
8--- qemu-8.1.0.orig/softmmu/physmem.c
9+++ qemu-8.1.0/softmmu/physmem.c
10@@ -2517,7 +2517,7 @@ static void tcg_commit(MemoryListener *l
11 * That said, the listener is also called during realize, before
12 * all of the tcg machinery for run-on is initialized: thus halt_cond.
13 */
14- if (cpu->halt_cond) {
15+ if (cpu->halt_cond && !qemu_cpu_is_self(cpu)) {
16 async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas));
17 } else {
18 tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas));