diff options
-rw-r--r-- | meta/recipes-devtools/qemu/qemu.inc | 1 | ||||
-rw-r--r-- | meta/recipes-devtools/qemu/qemu/CVE-2020-24165.patch | 94 |
2 files changed, 95 insertions, 0 deletions
diff --git a/meta/recipes-devtools/qemu/qemu.inc b/meta/recipes-devtools/qemu/qemu.inc index 2669ba4ec8..e6b26aba88 100644 --- a/meta/recipes-devtools/qemu/qemu.inc +++ b/meta/recipes-devtools/qemu/qemu.inc | |||
@@ -141,6 +141,7 @@ SRC_URI = "https://download.qemu.org/${BPN}-${PV}.tar.xz \ | |||
141 | file://CVE-2023-0330_2.patch \ | 141 | file://CVE-2023-0330_2.patch \ |
142 | file://CVE-2023-3354.patch \ | 142 | file://CVE-2023-3354.patch \ |
143 | file://CVE-2023-3180.patch \ | 143 | file://CVE-2023-3180.patch \ |
144 | file://CVE-2020-24165.patch \ | ||
144 | " | 145 | " |
145 | UPSTREAM_CHECK_REGEX = "qemu-(?P<pver>\d+(\.\d+)+)\.tar" | 146 | UPSTREAM_CHECK_REGEX = "qemu-(?P<pver>\d+(\.\d+)+)\.tar" |
146 | 147 | ||
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-24165.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-24165.patch new file mode 100644 index 0000000000..e0a27331a8 --- /dev/null +++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-24165.patch | |||
@@ -0,0 +1,94 @@ | |||
1 | CVE: CVE-2020-24165 | ||
2 | Upstream-Status: Backport [https://github.com/qemu/qemu/commit/886cc68943ebe8cf7e5f970be33459f95068a441 ] | ||
3 | Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com> | ||
4 | |||
5 | From 886cc68943ebe8cf7e5f970be33459f95068a441 Mon Sep 17 00:00:00 2001 | ||
6 | From: =?UTF-8?q?Alex=20Benn=C3=A9e?= <alex.bennee@linaro.org> | ||
7 | Date: Fri, 14 Feb 2020 14:49:52 +0000 | ||
8 | Subject: [PATCH] accel/tcg: fix race in cpu_exec_step_atomic (bug 1863025) | ||
9 | MIME-Version: 1.0 | ||
10 | Content-Type: text/plain; charset=UTF-8 | ||
11 | Content-Transfer-Encoding: 8bit | ||
12 | |||
13 | The bug describes a race whereby cpu_exec_step_atomic can acquire a TB | ||
14 | which is invalidated by a tb_flush before we execute it. This doesn't | ||
15 | affect the other cpu_exec modes as a tb_flush by it's nature can only | ||
16 | occur on a quiescent system. The race was described as: | ||
17 | |||
18 | B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code | ||
19 | B3. tcg_tb_alloc obtains a new TB | ||
20 | |||
21 | C3. TB obtained with tb_lookup__cpu_state or tb_gen_code | ||
22 | (same TB as B2) | ||
23 | |||
24 | A3. start_exclusive critical section entered | ||
25 | A4. do_tb_flush is called, TB memory freed/re-allocated | ||
26 | A5. end_exclusive exits critical section | ||
27 | |||
28 | B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code | ||
29 | B3. tcg_tb_alloc reallocates TB from B2 | ||
30 | |||
31 | C4. start_exclusive critical section entered | ||
32 | C5. cpu_tb_exec executes the TB code that was free in A4 | ||
33 | |||
34 | The simplest fix is to widen the exclusive period to include the TB | ||
35 | lookup. As a result we can drop the complication of checking we are in | ||
36 | the exclusive region before we end it. | ||
37 | |||
38 | Cc: Yifan <me@yifanlu.com> | ||
39 | Buglink: https://bugs.launchpad.net/qemu/+bug/1863025 | ||
40 | Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> | ||
41 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
42 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | ||
43 | Message-Id: <20200214144952.15502-1-alex.bennee@linaro.org> | ||
44 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
45 | --- | ||
46 | accel/tcg/cpu-exec.c | 21 +++++++++++---------- | ||
47 | 1 file changed, 11 insertions(+), 10 deletions(-) | ||
48 | |||
49 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
50 | index 2560c90eec79..d95c4848a47b 100644 | ||
51 | --- a/accel/tcg/cpu-exec.c | ||
52 | +++ b/accel/tcg/cpu-exec.c | ||
53 | @@ -240,6 +240,8 @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
54 | uint32_t cf_mask = cflags & CF_HASH_MASK; | ||
55 | |||
56 | if (sigsetjmp(cpu->jmp_env, 0) == 0) { | ||
57 | + start_exclusive(); | ||
58 | + | ||
59 | tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); | ||
60 | if (tb == NULL) { | ||
61 | mmap_lock(); | ||
62 | @@ -247,8 +249,6 @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
63 | mmap_unlock(); | ||
64 | } | ||
65 | |||
66 | - start_exclusive(); | ||
67 | - | ||
68 | /* Since we got here, we know that parallel_cpus must be true. */ | ||
69 | parallel_cpus = false; | ||
70 | cc->cpu_exec_enter(cpu); | ||
71 | @@ -271,14 +271,15 @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
72 | qemu_plugin_disable_mem_helpers(cpu); | ||
73 | } | ||
74 | |||
75 | - if (cpu_in_exclusive_context(cpu)) { | ||
76 | - /* We might longjump out of either the codegen or the | ||
77 | - * execution, so must make sure we only end the exclusive | ||
78 | - * region if we started it. | ||
79 | - */ | ||
80 | - parallel_cpus = true; | ||
81 | - end_exclusive(); | ||
82 | - } | ||
83 | + | ||
84 | + /* | ||
85 | + * As we start the exclusive region before codegen we must still | ||
86 | + * be in the region if we longjump out of either the codegen or | ||
87 | + * the execution. | ||
88 | + */ | ||
89 | + g_assert(cpu_in_exclusive_context(cpu)); | ||
90 | + parallel_cpus = true; | ||
91 | + end_exclusive(); | ||
92 | } | ||
93 | |||
94 | struct tb_desc { | ||