From 09a07b5b39c87423df9e8f6574c19a14d36beac5 Mon Sep 17 00:00:00 2001 From: Hitendra Prajapati Date: Wed, 27 Jul 2022 10:34:12 +0530 Subject: [PATCH] CVE-2022-35414 Upstream-Status: Backport [https://github.com/qemu/qemu/commit/418ade7849ce7641c0f7333718caf5091a02fd4c] CVE: CVE-2022-35414 Signed-off-by: Hitendra Prajapati --- exec.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/exec.c b/exec.c index 43c70ffb..2d6add46 100644 --- a/exec.c +++ b/exec.c @@ -685,7 +685,7 @@ static void tcg_iommu_free_notifier_list(CPUState *cpu) /* Called from RCU critical section */ MemoryRegionSection * -address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, +address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr, hwaddr *xlat, hwaddr *plen, MemTxAttrs attrs, int *prot) { @@ -694,6 +694,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, IOMMUMemoryRegionClass *imrc; IOMMUTLBEntry iotlb; int iommu_idx; + hwaddr addr = orig_addr; AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); for (;;) { @@ -737,6 +738,16 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, return section; translate_fail: + /* + * We should be given a page-aligned address -- certainly + * tlb_set_page_with_attrs() does so. The page offset of xlat + * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0. + * The page portion of xlat will be logged by memory_region_access_valid() + * when this memory access is rejected, so use the original untranslated + * physical address. + */ + assert((orig_addr & ~TARGET_PAGE_MASK) == 0); + *xlat = orig_addr; return &d->map.sections[PHYS_SECTION_UNASSIGNED]; } #endif -- 2.25.1