summaryrefslogtreecommitdiffstats
path: root/meta
diff options
context:
space:
mode:
authorGaurav Gupta <gauragup@cisco.com>2023-03-30 12:51:43 -0700
committerSteve Sakoman <steve@sakoman.com>2023-04-19 04:32:59 -1000
commita526ef88ee960f6fce8ccae0230d5469309fd03a (patch)
tree01ec12421a94f42de9b524d6c4f1caf034fd8a84 /meta
parent0c1e54eee110754633628a36b6564af9acdbf905 (diff)
downloadpoky-a526ef88ee960f6fce8ccae0230d5469309fd03a.tar.gz
qemu: fix build error introduced by CVE-2021-3929 fix
The patch for CVE-2021-3929 applied on dunfell returns a value for a void function. This results in the following compiler warning/error: hw/block/nvme.c:77:6: error: void function 'nvme_addr_read' should not return a value [-Wreturn-type] return NVME_DATA_TRAS_ERROR; ^ ~~~~~~~~~~~~~~~~~~~~ In newer versions of qemu, the functions is changed to have a return value, but that is not present in the version of qemu used in “dunfell”. Backport some of the patches to correct this. (From OE-Core rev: 4ad98f0b27615ad59ae61110657cf69004c61ef4) Signed-off-by: Gaurav Gupta <gauragup@cisco.com> Signed-off-by: Gaurav Gupta <gauragup@cisco.com> Signed-off-by: Steve Sakoman <steve@sakoman.com>
Diffstat (limited to 'meta')
-rw-r--r--meta/recipes-devtools/qemu/qemu.inc2
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch33
-rw-r--r--meta/recipes-devtools/qemu/qemu/hw-block-nvme-handle-dma-errors.patch146
-rw-r--r--meta/recipes-devtools/qemu/qemu/hw-block-nvme-refactor-nvme_addr_read.patch55
4 files changed, 221 insertions, 15 deletions
diff --git a/meta/recipes-devtools/qemu/qemu.inc b/meta/recipes-devtools/qemu/qemu.inc
index 5466303c94..3b1bd3b656 100644
--- a/meta/recipes-devtools/qemu/qemu.inc
+++ b/meta/recipes-devtools/qemu/qemu.inc
@@ -115,6 +115,8 @@ SRC_URI = "https://download.qemu.org/${BPN}-${PV}.tar.xz \
115 file://CVE-2021-3638.patch \ 115 file://CVE-2021-3638.patch \
116 file://CVE-2021-20196.patch \ 116 file://CVE-2021-20196.patch \
117 file://CVE-2021-3507.patch \ 117 file://CVE-2021-3507.patch \
118 file://hw-block-nvme-refactor-nvme_addr_read.patch \
119 file://hw-block-nvme-handle-dma-errors.patch \
118 file://CVE-2021-3929.patch \ 120 file://CVE-2021-3929.patch \
119 file://CVE-2022-4144.patch \ 121 file://CVE-2022-4144.patch \
120 file://CVE-2020-15859.patch \ 122 file://CVE-2020-15859.patch \
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch
index 3df2f8886a..a1862f1226 100644
--- a/meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch
@@ -1,7 +1,8 @@
1From 736b01642d85be832385063f278fe7cd4ffb5221 Mon Sep 17 00:00:00 2001 1From 2c682b5975b41495f98cc34b8243042c446eec44 Mon Sep 17 00:00:00 2001
2From: Klaus Jensen <k.jensen@samsung.com> 2From: Gaurav Gupta <gauragup@cisco.com>
3Date: Fri, 17 Dec 2021 10:44:01 +0100 3Date: Wed, 29 Mar 2023 14:36:16 -0700
4Subject: [PATCH] hw/nvme: fix CVE-2021-3929 4Subject: [PATCH] hw/nvme: fix CVE-2021-3929 MIME-Version: 1.0 Content-Type:
5 text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit
5MIME-Version: 1.0 6MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8 7Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit 8Content-Transfer-Encoding: 8bit
@@ -17,21 +18,23 @@ Reviewed-by: Keith Busch <kbusch@kernel.org>
17Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> 18Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
18Signed-off-by: Klaus Jensen <k.jensen@samsung.com> 19Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
19 20
20Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/736b01642d85be832385] 21Upstream-Status: Backport
22[https://gitlab.com/qemu-project/qemu/-/commit/736b01642d85be832385]
21CVE: CVE-2021-3929 23CVE: CVE-2021-3929
22Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com> 24Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
25Signed-off-by: Gaurav Gupta <gauragup@cisco.com>
23--- 26---
24 hw/block/nvme.c | 23 +++++++++++++++++++++++ 27 hw/block/nvme.c | 23 +++++++++++++++++++++++
25 hw/block/nvme.h | 1 + 28 hw/block/nvme.h | 1 +
26 2 files changed, 24 insertions(+) 29 2 files changed, 24 insertions(+)
27 30
28diff --git a/hw/block/nvme.c b/hw/block/nvme.c 31diff --git a/hw/block/nvme.c b/hw/block/nvme.c
29index 12d82542..e7d0750c 100644 32index bda446d..ae9b19f 100644
30--- a/hw/block/nvme.c 33--- a/hw/block/nvme.c
31+++ b/hw/block/nvme.c 34+++ b/hw/block/nvme.c
32@@ -52,8 +52,31 @@ 35@@ -60,8 +60,31 @@ static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
33 36 return addr >= low && addr < hi;
34 static void nvme_process_sq(void *opaque); 37 }
35 38
36+static inline bool nvme_addr_is_iomem(NvmeCtrl *n, hwaddr addr) 39+static inline bool nvme_addr_is_iomem(NvmeCtrl *n, hwaddr addr)
37+{ 40+{
@@ -51,18 +54,18 @@ index 12d82542..e7d0750c 100644
51+ return addr >= lo && addr < hi; 54+ return addr >= lo && addr < hi;
52+} 55+}
53+ 56+
54 static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) 57 static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
55 { 58 {
56+ 59+
57+ if (nvme_addr_is_iomem(n, addr)) { 60+ if (nvme_addr_is_iomem(n, addr)) {
58+ return NVME_DATA_TRAS_ERROR; 61+ return NVME_DATA_TRAS_ERROR;
59+ } 62+ }
60+ 63+
61 if (n->cmbsz && addr >= n->ctrl_mem.addr && 64 if (n->cmbsz && nvme_addr_is_cmb(n, addr)) {
62 addr < (n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size))) {
63 memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size); 65 memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
66 return 0;
64diff --git a/hw/block/nvme.h b/hw/block/nvme.h 67diff --git a/hw/block/nvme.h b/hw/block/nvme.h
65index 557194ee..5a2b119c 100644 68index 557194e..5a2b119 100644
66--- a/hw/block/nvme.h 69--- a/hw/block/nvme.h
67+++ b/hw/block/nvme.h 70+++ b/hw/block/nvme.h
68@@ -59,6 +59,7 @@ typedef struct NvmeNamespace { 71@@ -59,6 +59,7 @@ typedef struct NvmeNamespace {
@@ -74,5 +77,5 @@ index 557194ee..5a2b119c 100644
74 MemoryRegion ctrl_mem; 77 MemoryRegion ctrl_mem;
75 NvmeBar bar; 78 NvmeBar bar;
76-- 79--
772.30.2 801.8.3.1
78 81
diff --git a/meta/recipes-devtools/qemu/qemu/hw-block-nvme-handle-dma-errors.patch b/meta/recipes-devtools/qemu/qemu/hw-block-nvme-handle-dma-errors.patch
new file mode 100644
index 0000000000..0fdae8351a
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/hw-block-nvme-handle-dma-errors.patch
@@ -0,0 +1,146 @@
1From ea2a7c7676d8eb9d1458eaa4b717df46782dcb3a Mon Sep 17 00:00:00 2001
2From: Gaurav Gupta <gauragup@cisco.com>
3Date: Wed, 29 Mar 2023 14:07:17 -0700
4Subject: [PATCH 2/2] hw/block/nvme: handle dma errors
5
6Handling DMA errors gracefully is required for the device to pass the
7block/011 test ("disable PCI device while doing I/O") in the blktests
8suite.
9
10With this patch the device sets the Controller Fatal Status bit in the
11CSTS register when failing to read from a submission queue or writing to
12a completion queue; expecting the host to reset the controller.
13
14If DMA errors occur at any other point in the execution of the command
15(say, while mapping the PRPs), the command is aborted with a Data
16Transfer Error status code.
17
18Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
19Signed-off-by: Gaurav Gupta <gauragup@cisco.com>
20---
21 hw/block/nvme.c | 41 +++++++++++++++++++++++++++++++----------
22 hw/block/trace-events | 3 +++
23 2 files changed, 34 insertions(+), 10 deletions(-)
24
25diff --git a/hw/block/nvme.c b/hw/block/nvme.c
26index e6f24a6..bda446d 100644
27--- a/hw/block/nvme.c
28+++ b/hw/block/nvme.c
29@@ -60,14 +60,14 @@ static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
30 return addr >= low && addr < hi;
31 }
32
33-static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
34+static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
35 {
36 if (n->cmbsz && nvme_addr_is_cmb(n, addr)) {
37 memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
38- return;
39+ return 0;
40 }
41
42- pci_dma_read(&n->parent_obj, addr, buf, size);
43+ return pci_dma_read(&n->parent_obj, addr, buf, size);
44 }
45
46 static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
47@@ -152,6 +152,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
48 hwaddr trans_len = n->page_size - (prp1 % n->page_size);
49 trans_len = MIN(len, trans_len);
50 int num_prps = (len >> n->page_bits) + 1;
51+ int ret;
52
53 if (unlikely(!prp1)) {
54 trace_nvme_err_invalid_prp();
55@@ -178,7 +179,11 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
56
57 nents = (len + n->page_size - 1) >> n->page_bits;
58 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
59- nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
60+ ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
61+ if (ret) {
62+ trace_pci_nvme_err_addr_read(prp2);
63+ return NVME_DATA_TRAS_ERROR;
64+ }
65 while (len != 0) {
66 uint64_t prp_ent = le64_to_cpu(prp_list[i]);
67
68@@ -191,8 +196,12 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
69 i = 0;
70 nents = (len + n->page_size - 1) >> n->page_bits;
71 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
72- nvme_addr_read(n, prp_ent, (void *)prp_list,
73- prp_trans);
74+ ret = nvme_addr_read(n, prp_ent, (void *)prp_list,
75+ prp_trans);
76+ if (ret) {
77+ trace_pci_nvme_err_addr_read(prp_ent);
78+ return NVME_DATA_TRAS_ERROR;
79+ }
80 prp_ent = le64_to_cpu(prp_list[i]);
81 }
82
83@@ -286,6 +295,7 @@ static void nvme_post_cqes(void *opaque)
84 NvmeCQueue *cq = opaque;
85 NvmeCtrl *n = cq->ctrl;
86 NvmeRequest *req, *next;
87+ int ret;
88
89 QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
90 NvmeSQueue *sq;
91@@ -295,15 +305,21 @@ static void nvme_post_cqes(void *opaque)
92 break;
93 }
94
95- QTAILQ_REMOVE(&cq->req_list, req, entry);
96 sq = req->sq;
97 req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase);
98 req->cqe.sq_id = cpu_to_le16(sq->sqid);
99 req->cqe.sq_head = cpu_to_le16(sq->head);
100 addr = cq->dma_addr + cq->tail * n->cqe_size;
101+ ret = pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
102+ sizeof(req->cqe));
103+ if (ret) {
104+ trace_pci_nvme_err_addr_write(addr);
105+ trace_pci_nvme_err_cfs();
106+ n->bar.csts = NVME_CSTS_FAILED;
107+ break;
108+ }
109+ QTAILQ_REMOVE(&cq->req_list, req, entry);
110 nvme_inc_cq_tail(cq);
111- pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
112- sizeof(req->cqe));
113 QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
114 }
115 if (cq->tail != cq->head) {
116@@ -888,7 +904,12 @@ static void nvme_process_sq(void *opaque)
117
118 while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
119 addr = sq->dma_addr + sq->head * n->sqe_size;
120- nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd));
121+ if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) {
122+ trace_pci_nvme_err_addr_read(addr);
123+ trace_pci_nvme_err_cfs();
124+ n->bar.csts = NVME_CSTS_FAILED;
125+ break;
126+ }
127 nvme_inc_sq_head(sq);
128
129 req = QTAILQ_FIRST(&sq->req_list);
130diff --git a/hw/block/trace-events b/hw/block/trace-events
131index c03e80c..4e4ad4e 100644
132--- a/hw/block/trace-events
133+++ b/hw/block/trace-events
134@@ -60,6 +60,9 @@ nvme_mmio_shutdown_set(void) "shutdown bit set"
135 nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
136
137 # nvme traces for error conditions
138+pci_nvme_err_addr_read(uint64_t addr) "addr 0x%"PRIx64""
139+pci_nvme_err_addr_write(uint64_t addr) "addr 0x%"PRIx64""
140+pci_nvme_err_cfs(void) "controller fatal status"
141 nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size"
142 nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is null or not page aligned: 0x%"PRIx64""
143 nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64""
144--
1451.8.3.1
146
diff --git a/meta/recipes-devtools/qemu/qemu/hw-block-nvme-refactor-nvme_addr_read.patch b/meta/recipes-devtools/qemu/qemu/hw-block-nvme-refactor-nvme_addr_read.patch
new file mode 100644
index 0000000000..66ada52efb
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/hw-block-nvme-refactor-nvme_addr_read.patch
@@ -0,0 +1,55 @@
1From 55428706d5b0b8889b8e009eac77137bb556a4f0 Mon Sep 17 00:00:00 2001
2From: Klaus Jensen <k.jensen@samsung.com>
3Date: Tue, 9 Jun 2020 21:03:17 +0200
4Subject: [PATCH 1/2] hw/block/nvme: refactor nvme_addr_read
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9Pull the controller memory buffer check to its own function. The check
10will be used on its own in later patches.
11
12Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
13Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
15Reviewed-by: Keith Busch <kbusch@kernel.org>
16Message-Id: <20200609190333.59390-7-its@irrelevant.dk>
17Signed-off-by: Kevin Wolf <kwolf@redhat.com>
18---
19 hw/block/nvme.c | 16 ++++++++++++----
20 1 file changed, 12 insertions(+), 4 deletions(-)
21
22diff --git a/hw/block/nvme.c b/hw/block/nvme.c
23index 12d8254..e6f24a6 100644
24--- a/hw/block/nvme.c
25+++ b/hw/block/nvme.c
26@@ -52,14 +52,22 @@
27
28 static void nvme_process_sq(void *opaque);
29
30+static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
31+{
32+ hwaddr low = n->ctrl_mem.addr;
33+ hwaddr hi = n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size);
34+
35+ return addr >= low && addr < hi;
36+}
37+
38 static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
39 {
40- if (n->cmbsz && addr >= n->ctrl_mem.addr &&
41- addr < (n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size))) {
42+ if (n->cmbsz && nvme_addr_is_cmb(n, addr)) {
43 memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
44- } else {
45- pci_dma_read(&n->parent_obj, addr, buf, size);
46+ return;
47 }
48+
49+ pci_dma_read(&n->parent_obj, addr, buf, size);
50 }
51
52 static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
53--
541.8.3.1
55