diff options
| -rw-r--r-- | recipes-extended/xen/files/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch | 176 | ||||
| -rw-r--r-- | recipes-extended/xen/files/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch | 109 | ||||
| -rw-r--r-- | recipes-extended/xen/files/fix-libxc-xc_dom_arm-missing-initialization.patch | 36 | ||||
| -rw-r--r-- | recipes-extended/xen/files/xsa246-4.9.patch | 74 | ||||
| -rw-r--r-- | recipes-extended/xen/files/xsa248.patch | 164 | ||||
| -rw-r--r-- | recipes-extended/xen/files/xsa249.patch | 42 | ||||
| -rw-r--r-- | recipes-extended/xen/files/xsa250.patch | 67 | ||||
| -rw-r--r-- | recipes-extended/xen/files/xsa251.patch | 21 | ||||
| -rw-r--r-- | recipes-extended/xen/xen_4.9.0.bb | 12 | ||||
| -rw-r--r-- | recipes-extended/xen/xen_4.9.1.bb | 18 |
10 files changed, 671 insertions, 48 deletions
diff --git a/recipes-extended/xen/files/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch b/recipes-extended/xen/files/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch new file mode 100644 index 00000000..ad9524a3 --- /dev/null +++ b/recipes-extended/xen/files/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch | |||
| @@ -0,0 +1,176 @@ | |||
| 1 | From ad208b8b7e45fb2b7c572b86c61c26412609e82d Mon Sep 17 00:00:00 2001 | ||
| 2 | From: George Dunlap <george.dunlap@citrix.com> | ||
| 3 | Date: Fri, 10 Nov 2017 16:53:54 +0000 | ||
| 4 | Subject: [PATCH 1/2] p2m: Always check to see if removing a p2m entry actually | ||
| 5 | worked | ||
| 6 | |||
| 7 | The PoD zero-check functions speculatively remove memory from the p2m, | ||
| 8 | then check to see if it's completely zeroed, before putting it in the | ||
| 9 | cache. | ||
| 10 | |||
| 11 | Unfortunately, the p2m_set_entry() calls may fail if the underlying | ||
| 12 | pagetable structure needs to change and the domain has exhausted its | ||
| 13 | p2m memory pool: for instance, if we're removing a 2MiB region out of | ||
| 14 | a 1GiB entry (in the p2m_pod_zero_check_superpage() case), or a 4k | ||
| 15 | region out of a 2MiB or larger entry (in the p2m_pod_zero_check() | ||
| 16 | case); and the return value is not checked. | ||
| 17 | |||
| 18 | The underlying mfn will then be added into the PoD cache, and at some | ||
| 19 | point mapped into another location in the p2m. If the guest | ||
| 20 | afterwards ballons out this memory, it will be freed to the hypervisor | ||
| 21 | and potentially reused by another domain, in spite of the fact that | ||
| 22 | the original domain still has writable mappings to it. | ||
| 23 | |||
| 24 | There are several places where p2m_set_entry() shouldn't be able to | ||
| 25 | fail, as it is guaranteed to write an entry of the same order that | ||
| 26 | succeeded before. Add a backstop of crashing the domain just in case, | ||
| 27 | and an ASSERT_UNREACHABLE() to flag up the broken assumption on debug | ||
| 28 | builds. | ||
| 29 | |||
| 30 | While we're here, use PAGE_ORDER_2M rather than a magic constant. | ||
| 31 | |||
| 32 | This is part of XSA-247. | ||
| 33 | |||
| 34 | Reported-by: George Dunlap <george.dunlap.com> | ||
| 35 | Signed-off-by: George Dunlap <george.dunlap@citrix.com> | ||
| 36 | Reviewed-by: Jan Beulich <jbeulich@suse.com> | ||
| 37 | --- | ||
| 38 | v4: | ||
| 39 | - Removed some training whitespace | ||
| 40 | v3: | ||
| 41 | - Reformat reset clause to be more compact | ||
| 42 | - Make sure to set map[i] = NULL when unmapping in case we need to bail | ||
| 43 | v2: | ||
| 44 | - Crash a domain if a p2m_set_entry we think cannot fail fails anyway. | ||
| 45 | --- | ||
| 46 | xen/arch/x86/mm/p2m-pod.c | 77 +++++++++++++++++++++++++++++++++++++---------- | ||
| 47 | 1 file changed, 61 insertions(+), 16 deletions(-) | ||
| 48 | |||
| 49 | diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c | ||
| 50 | index 730a48f928..f2ed751892 100644 | ||
| 51 | --- a/xen/arch/x86/mm/p2m-pod.c | ||
| 52 | +++ b/xen/arch/x86/mm/p2m-pod.c | ||
| 53 | @@ -752,8 +752,10 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn) | ||
| 54 | } | ||
| 55 | |||
| 56 | /* Try to remove the page, restoring old mapping if it fails. */ | ||
| 57 | - p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M, | ||
| 58 | - p2m_populate_on_demand, p2m->default_access); | ||
| 59 | + if ( p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M, | ||
| 60 | + p2m_populate_on_demand, p2m->default_access) ) | ||
| 61 | + goto out; | ||
| 62 | + | ||
| 63 | p2m_tlb_flush_sync(p2m); | ||
| 64 | |||
| 65 | /* Make none of the MFNs are used elsewhere... for example, mapped | ||
| 66 | @@ -810,9 +812,18 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn) | ||
| 67 | ret = SUPERPAGE_PAGES; | ||
| 68 | |||
| 69 | out_reset: | ||
| 70 | - if ( reset ) | ||
| 71 | - p2m_set_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access); | ||
| 72 | - | ||
| 73 | + /* | ||
| 74 | + * This p2m_set_entry() call shouldn't be able to fail, since the same order | ||
| 75 | + * on the same gfn succeeded above. If that turns out to be false, crashing | ||
| 76 | + * the domain should be the safest way of making sure we don't leak memory. | ||
| 77 | + */ | ||
| 78 | + if ( reset && p2m_set_entry(p2m, gfn, mfn0, PAGE_ORDER_2M, | ||
| 79 | + type0, p2m->default_access) ) | ||
| 80 | + { | ||
| 81 | + ASSERT_UNREACHABLE(); | ||
| 82 | + domain_crash(d); | ||
| 83 | + } | ||
| 84 | + | ||
| 85 | out: | ||
| 86 | gfn_unlock(p2m, gfn, SUPERPAGE_ORDER); | ||
| 87 | return ret; | ||
| 88 | @@ -869,19 +880,30 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count) | ||
| 89 | } | ||
| 90 | |||
| 91 | /* Try to remove the page, restoring old mapping if it fails. */ | ||
| 92 | - p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K, | ||
| 93 | - p2m_populate_on_demand, p2m->default_access); | ||
| 94 | + if ( p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K, | ||
| 95 | + p2m_populate_on_demand, p2m->default_access) ) | ||
| 96 | + goto skip; | ||
| 97 | |||
| 98 | /* See if the page was successfully unmapped. (Allow one refcount | ||
| 99 | * for being allocated to a domain.) */ | ||
| 100 | if ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) > 1 ) | ||
| 101 | { | ||
| 102 | + /* | ||
| 103 | + * If the previous p2m_set_entry call succeeded, this one shouldn't | ||
| 104 | + * be able to fail. If it does, crashing the domain should be safe. | ||
| 105 | + */ | ||
| 106 | + if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K, | ||
| 107 | + types[i], p2m->default_access) ) | ||
| 108 | + { | ||
| 109 | + ASSERT_UNREACHABLE(); | ||
| 110 | + domain_crash(d); | ||
| 111 | + goto out_unmap; | ||
| 112 | + } | ||
| 113 | + | ||
| 114 | + skip: | ||
| 115 | unmap_domain_page(map[i]); | ||
| 116 | map[i] = NULL; | ||
| 117 | |||
| 118 | - p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K, | ||
| 119 | - types[i], p2m->default_access); | ||
| 120 | - | ||
| 121 | continue; | ||
| 122 | } | ||
| 123 | } | ||
| 124 | @@ -900,12 +922,25 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count) | ||
| 125 | |||
| 126 | unmap_domain_page(map[i]); | ||
| 127 | |||
| 128 | - /* See comment in p2m_pod_zero_check_superpage() re gnttab | ||
| 129 | - * check timing. */ | ||
| 130 | - if ( j < PAGE_SIZE/sizeof(*map[i]) ) | ||
| 131 | + map[i] = NULL; | ||
| 132 | + | ||
| 133 | + /* | ||
| 134 | + * See comment in p2m_pod_zero_check_superpage() re gnttab | ||
| 135 | + * check timing. | ||
| 136 | + */ | ||
| 137 | + if ( j < (PAGE_SIZE / sizeof(*map[i])) ) | ||
| 138 | { | ||
| 139 | - p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K, | ||
| 140 | - types[i], p2m->default_access); | ||
| 141 | + /* | ||
| 142 | + * If the previous p2m_set_entry call succeeded, this one shouldn't | ||
| 143 | + * be able to fail. If it does, crashing the domain should be safe. | ||
| 144 | + */ | ||
| 145 | + if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K, | ||
| 146 | + types[i], p2m->default_access) ) | ||
| 147 | + { | ||
| 148 | + ASSERT_UNREACHABLE(); | ||
| 149 | + domain_crash(d); | ||
| 150 | + goto out_unmap; | ||
| 151 | + } | ||
| 152 | } | ||
| 153 | else | ||
| 154 | { | ||
| 155 | @@ -929,7 +964,17 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count) | ||
| 156 | p2m->pod.entry_count++; | ||
| 157 | } | ||
| 158 | } | ||
| 159 | - | ||
| 160 | + | ||
| 161 | + return; | ||
| 162 | + | ||
| 163 | +out_unmap: | ||
| 164 | + /* | ||
| 165 | + * Something went wrong, probably crashing the domain. Unmap | ||
| 166 | + * everything and return. | ||
| 167 | + */ | ||
| 168 | + for ( i = 0; i < count; i++ ) | ||
| 169 | + if ( map[i] ) | ||
| 170 | + unmap_domain_page(map[i]); | ||
| 171 | } | ||
| 172 | |||
| 173 | #define POD_SWEEP_LIMIT 1024 | ||
| 174 | -- | ||
| 175 | 2.15.0 | ||
| 176 | |||
diff --git a/recipes-extended/xen/files/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch b/recipes-extended/xen/files/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch new file mode 100644 index 00000000..8c850bd7 --- /dev/null +++ b/recipes-extended/xen/files/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch | |||
| @@ -0,0 +1,109 @@ | |||
| 1 | From d4bc7833707351a5341a6bdf04c752a028d9560d Mon Sep 17 00:00:00 2001 | ||
| 2 | From: George Dunlap <george.dunlap@citrix.com> | ||
| 3 | Date: Fri, 10 Nov 2017 16:53:55 +0000 | ||
| 4 | Subject: [PATCH 2/2] p2m: Check return value of p2m_set_entry() when | ||
| 5 | decreasing reservation | ||
| 6 | |||
| 7 | If the entire range specified to p2m_pod_decrease_reservation() is marked | ||
| 8 | populate-on-demand, then it will make a single p2m_set_entry() call, | ||
| 9 | reducing its PoD entry count. | ||
| 10 | |||
| 11 | Unfortunately, in the right circumstances, this p2m_set_entry() call | ||
| 12 | may fail. It that case, repeated calls to decrease_reservation() may | ||
| 13 | cause p2m->pod.entry_count to fall below zero, potentially tripping | ||
| 14 | over BUG_ON()s to the contrary. | ||
| 15 | |||
| 16 | Instead, check to see if the entry succeeded, and return false if not. | ||
| 17 | The caller will then call guest_remove_page() on the gfns, which will | ||
| 18 | return -EINVAL upon finding no valid memory there to return. | ||
| 19 | |||
| 20 | Unfortunately if the order > 0, the entry may have partially changed. | ||
| 21 | A domain_crash() is probably the safest thing in that case. | ||
| 22 | |||
| 23 | Other p2m_set_entry() calls in the same function should be fine, | ||
| 24 | because they are writing the entry at its current order. Nonetheless, | ||
| 25 | check the return value and crash if our assumption turns otu to be | ||
| 26 | wrong. | ||
| 27 | |||
| 28 | This is part of XSA-247. | ||
| 29 | |||
| 30 | Reported-by: George Dunlap <george.dunlap.com> | ||
| 31 | Signed-off-by: George Dunlap <george.dunlap@citrix.com> | ||
| 32 | Reviewed-by: Jan Beulich <jbeulich@suse.com> | ||
| 33 | --- | ||
| 34 | v2: Crash the domain if we're not sure it's safe (or if we think it | ||
| 35 | can't happen) | ||
| 36 | --- | ||
| 37 | xen/arch/x86/mm/p2m-pod.c | 42 +++++++++++++++++++++++++++++++++--------- | ||
| 38 | 1 file changed, 33 insertions(+), 9 deletions(-) | ||
| 39 | |||
| 40 | diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c | ||
| 41 | index f2ed751892..473d6a6dbf 100644 | ||
| 42 | --- a/xen/arch/x86/mm/p2m-pod.c | ||
| 43 | +++ b/xen/arch/x86/mm/p2m-pod.c | ||
| 44 | @@ -555,11 +555,23 @@ p2m_pod_decrease_reservation(struct domain *d, | ||
| 45 | |||
| 46 | if ( !nonpod ) | ||
| 47 | { | ||
| 48 | - /* All PoD: Mark the whole region invalid and tell caller | ||
| 49 | - * we're done. */ | ||
| 50 | - p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid, | ||
| 51 | - p2m->default_access); | ||
| 52 | - p2m->pod.entry_count-=(1<<order); | ||
| 53 | + /* | ||
| 54 | + * All PoD: Mark the whole region invalid and tell caller | ||
| 55 | + * we're done. | ||
| 56 | + */ | ||
| 57 | + if ( p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid, | ||
| 58 | + p2m->default_access) ) | ||
| 59 | + { | ||
| 60 | + /* | ||
| 61 | + * If this fails, we can't tell how much of the range was changed. | ||
| 62 | + * Best to crash the domain unless we're sure a partial change is | ||
| 63 | + * impossible. | ||
| 64 | + */ | ||
| 65 | + if ( order != 0 ) | ||
| 66 | + domain_crash(d); | ||
| 67 | + goto out_unlock; | ||
| 68 | + } | ||
| 69 | + p2m->pod.entry_count -= 1UL << order; | ||
| 70 | BUG_ON(p2m->pod.entry_count < 0); | ||
| 71 | ret = 1; | ||
| 72 | goto out_entry_check; | ||
| 73 | @@ -600,8 +612,14 @@ p2m_pod_decrease_reservation(struct domain *d, | ||
| 74 | n = 1UL << cur_order; | ||
| 75 | if ( t == p2m_populate_on_demand ) | ||
| 76 | { | ||
| 77 | - p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order, | ||
| 78 | - p2m_invalid, p2m->default_access); | ||
| 79 | + /* This shouldn't be able to fail */ | ||
| 80 | + if ( p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order, | ||
| 81 | + p2m_invalid, p2m->default_access) ) | ||
| 82 | + { | ||
| 83 | + ASSERT_UNREACHABLE(); | ||
| 84 | + domain_crash(d); | ||
| 85 | + goto out_unlock; | ||
| 86 | + } | ||
| 87 | p2m->pod.entry_count -= n; | ||
| 88 | BUG_ON(p2m->pod.entry_count < 0); | ||
| 89 | pod -= n; | ||
| 90 | @@ -622,8 +640,14 @@ p2m_pod_decrease_reservation(struct domain *d, | ||
| 91 | |||
| 92 | page = mfn_to_page(mfn); | ||
| 93 | |||
| 94 | - p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order, | ||
| 95 | - p2m_invalid, p2m->default_access); | ||
| 96 | + /* This shouldn't be able to fail */ | ||
| 97 | + if ( p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order, | ||
| 98 | + p2m_invalid, p2m->default_access) ) | ||
| 99 | + { | ||
| 100 | + ASSERT_UNREACHABLE(); | ||
| 101 | + domain_crash(d); | ||
| 102 | + goto out_unlock; | ||
| 103 | + } | ||
| 104 | p2m_tlb_flush_sync(p2m); | ||
| 105 | for ( j = 0; j < n; ++j ) | ||
| 106 | set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY); | ||
| 107 | -- | ||
| 108 | 2.15.0 | ||
| 109 | |||
diff --git a/recipes-extended/xen/files/fix-libxc-xc_dom_arm-missing-initialization.patch b/recipes-extended/xen/files/fix-libxc-xc_dom_arm-missing-initialization.patch deleted file mode 100644 index 05016a7a..00000000 --- a/recipes-extended/xen/files/fix-libxc-xc_dom_arm-missing-initialization.patch +++ /dev/null | |||
| @@ -1,36 +0,0 @@ | |||
| 1 | commit 88bfbf90e35f1213f9967a97dee0b2039f9998a4 | ||
| 2 | Author: Bernd Kuhls <bernd.kuhls@t-online.de> | ||
| 3 | Date: Sat Aug 19 16:21:42 2017 +0200 | ||
| 4 | |||
| 5 | tools/libxc/xc_dom_arm: add missing variable initialization | ||
| 6 | |||
| 7 | The variable domctl.u.address_size.size may remain uninitialized if | ||
| 8 | guest_type is not one of xen-3.0-aarch64 or xen-3.0-armv7l. And the | ||
| 9 | code precisely checks if this variable is still 0 to decide if the | ||
| 10 | guest type is supported or not. | ||
| 11 | |||
| 12 | This fixes the following build failure with gcc 7.x: | ||
| 13 | |||
| 14 | xc_dom_arm.c:229:31: error: 'domctl.u.address_size.size' may be used uninitialized in this function [-Werror=maybe-uninitialized] | ||
| 15 | if ( domctl.u.address_size.size == 0 ) | ||
| 16 | |||
| 17 | Patch originally taken from | ||
| 18 | https://www.mail-archive.com/xen-devel@lists.xen.org/msg109313.html. | ||
| 19 | |||
| 20 | Signed-off-by: Bernd Kuhls <bernd.kuhls@t-online.de> | ||
| 21 | Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | ||
| 22 | Acked-by: Wei Liu <wei.liu2@citrix.com> | ||
| 23 | |||
| 24 | diff --git a/tools/libxc/xc_dom_arm.c b/tools/libxc/xc_dom_arm.c | ||
| 25 | index e7d4bd0..e669fb0 100644 | ||
| 26 | --- a/tools/libxc/xc_dom_arm.c | ||
| 27 | +++ b/tools/libxc/xc_dom_arm.c | ||
| 28 | @@ -223,6 +223,8 @@ static int set_mode(xc_interface *xch, domid_t domid, char *guest_type) | ||
| 29 | |||
| 30 | domctl.domain = domid; | ||
| 31 | domctl.cmd = XEN_DOMCTL_set_address_size; | ||
| 32 | + domctl.u.address_size.size = 0; | ||
| 33 | + | ||
| 34 | for ( i = 0; i < ARRAY_SIZE(types); i++ ) | ||
| 35 | if ( !strcmp(types[i].guest, guest_type) ) | ||
| 36 | domctl.u.address_size.size = types[i].size; | ||
diff --git a/recipes-extended/xen/files/xsa246-4.9.patch b/recipes-extended/xen/files/xsa246-4.9.patch new file mode 100644 index 00000000..6370a106 --- /dev/null +++ b/recipes-extended/xen/files/xsa246-4.9.patch | |||
| @@ -0,0 +1,74 @@ | |||
| 1 | From: Julien Grall <julien.grall@linaro.org> | ||
| 2 | Subject: x86/pod: prevent infinite loop when shattering large pages | ||
| 3 | |||
| 4 | When populating pages, the PoD may need to split large ones using | ||
| 5 | p2m_set_entry and request the caller to retry (see ept_get_entry for | ||
| 6 | instance). | ||
| 7 | |||
| 8 | p2m_set_entry may fail to shatter if it is not possible to allocate | ||
| 9 | memory for the new page table. However, the error is not propagated | ||
| 10 | resulting to the callers to retry infinitely the PoD. | ||
| 11 | |||
| 12 | Prevent the infinite loop by return false when it is not possible to | ||
| 13 | shatter the large mapping. | ||
| 14 | |||
| 15 | This is XSA-246. | ||
| 16 | |||
| 17 | Signed-off-by: Julien Grall <julien.grall@linaro.org> | ||
| 18 | Signed-off-by: Jan Beulich <jbeulich@suse.com> | ||
| 19 | Reviewed-by: George Dunlap <george.dunlap@citrix.com> | ||
| 20 | |||
| 21 | --- a/xen/arch/x86/mm/p2m-pod.c | ||
| 22 | +++ b/xen/arch/x86/mm/p2m-pod.c | ||
| 23 | @@ -1071,9 +1071,8 @@ p2m_pod_demand_populate(struct p2m_domai | ||
| 24 | * NOTE: In a fine-grained p2m locking scenario this operation | ||
| 25 | * may need to promote its locking from gfn->1g superpage | ||
| 26 | */ | ||
| 27 | - p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M, | ||
| 28 | - p2m_populate_on_demand, p2m->default_access); | ||
| 29 | - return 0; | ||
| 30 | + return p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M, | ||
| 31 | + p2m_populate_on_demand, p2m->default_access); | ||
| 32 | } | ||
| 33 | |||
| 34 | /* Only reclaim if we're in actual need of more cache. */ | ||
| 35 | @@ -1104,8 +1103,12 @@ p2m_pod_demand_populate(struct p2m_domai | ||
| 36 | |||
| 37 | gfn_aligned = (gfn >> order) << order; | ||
| 38 | |||
| 39 | - p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw, | ||
| 40 | - p2m->default_access); | ||
| 41 | + if ( p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw, | ||
| 42 | + p2m->default_access) ) | ||
| 43 | + { | ||
| 44 | + p2m_pod_cache_add(p2m, p, order); | ||
| 45 | + goto out_fail; | ||
| 46 | + } | ||
| 47 | |||
| 48 | for( i = 0; i < (1UL << order); i++ ) | ||
| 49 | { | ||
| 50 | @@ -1150,13 +1153,18 @@ remap_and_retry: | ||
| 51 | BUG_ON(order != PAGE_ORDER_2M); | ||
| 52 | pod_unlock(p2m); | ||
| 53 | |||
| 54 | - /* Remap this 2-meg region in singleton chunks */ | ||
| 55 | - /* NOTE: In a p2m fine-grained lock scenario this might | ||
| 56 | - * need promoting the gfn lock from gfn->2M superpage */ | ||
| 57 | + /* | ||
| 58 | + * Remap this 2-meg region in singleton chunks. See the comment on the | ||
| 59 | + * 1G page splitting path above for why a single call suffices. | ||
| 60 | + * | ||
| 61 | + * NOTE: In a p2m fine-grained lock scenario this might | ||
| 62 | + * need promoting the gfn lock from gfn->2M superpage. | ||
| 63 | + */ | ||
| 64 | gfn_aligned = (gfn>>order)<<order; | ||
| 65 | - for(i=0; i<(1<<order); i++) | ||
| 66 | - p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K, | ||
| 67 | - p2m_populate_on_demand, p2m->default_access); | ||
| 68 | + if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K, | ||
| 69 | + p2m_populate_on_demand, p2m->default_access) ) | ||
| 70 | + return -1; | ||
| 71 | + | ||
| 72 | if ( tb_init_done ) | ||
| 73 | { | ||
| 74 | struct { | ||
diff --git a/recipes-extended/xen/files/xsa248.patch b/recipes-extended/xen/files/xsa248.patch new file mode 100644 index 00000000..966c16e0 --- /dev/null +++ b/recipes-extended/xen/files/xsa248.patch | |||
| @@ -0,0 +1,164 @@ | |||
| 1 | From: Jan Beulich <jbeulich@suse.com> | ||
| 2 | Subject: x86/mm: don't wrongly set page ownership | ||
| 3 | |||
| 4 | PV domains can obtain mappings of any pages owned by the correct domain, | ||
| 5 | including ones that aren't actually assigned as "normal" RAM, but used | ||
| 6 | by Xen internally. At the moment such "internal" pages marked as owned | ||
| 7 | by a guest include pages used to track logdirty bits, as well as p2m | ||
| 8 | pages and the "unpaged pagetable" for HVM guests. Since the PV memory | ||
| 9 | management and shadow code conflict in their use of struct page_info | ||
| 10 | fields, and since shadow code is being used for log-dirty handling for | ||
| 11 | PV domains, pages coming from the shadow pool must, for PV domains, not | ||
| 12 | have the domain set as their owner. | ||
| 13 | |||
| 14 | While the change could be done conditionally for just the PV case in | ||
| 15 | shadow code, do it unconditionally (and for consistency also for HAP), | ||
| 16 | just to be on the safe side. | ||
| 17 | |||
| 18 | There's one special case though for shadow code: The page table used for | ||
| 19 | running a HVM guest in unpaged mode is subject to get_page() (in | ||
| 20 | set_shadow_status()) and hence must have its owner set. | ||
| 21 | |||
| 22 | This is XSA-248. | ||
| 23 | |||
| 24 | Signed-off-by: Jan Beulich <jbeulich@suse.com> | ||
| 25 | Reviewed-by: Tim Deegan <tim@xen.org> | ||
| 26 | Reviewed-by: George Dunlap <george.dunlap@citrix.com> | ||
| 27 | --- | ||
| 28 | v2: Drop PGC_page_table related pieces. | ||
| 29 | |||
| 30 | --- a/xen/arch/x86/mm/hap/hap.c | ||
| 31 | +++ b/xen/arch/x86/mm/hap/hap.c | ||
| 32 | @@ -286,8 +286,7 @@ static struct page_info *hap_alloc_p2m_p | ||
| 33 | { | ||
| 34 | d->arch.paging.hap.total_pages--; | ||
| 35 | d->arch.paging.hap.p2m_pages++; | ||
| 36 | - page_set_owner(pg, d); | ||
| 37 | - pg->count_info |= 1; | ||
| 38 | + ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask)); | ||
| 39 | } | ||
| 40 | else if ( !d->arch.paging.p2m_alloc_failed ) | ||
| 41 | { | ||
| 42 | @@ -302,21 +301,23 @@ static struct page_info *hap_alloc_p2m_p | ||
| 43 | |||
| 44 | static void hap_free_p2m_page(struct domain *d, struct page_info *pg) | ||
| 45 | { | ||
| 46 | + struct domain *owner = page_get_owner(pg); | ||
| 47 | + | ||
| 48 | /* This is called both from the p2m code (which never holds the | ||
| 49 | * paging lock) and the log-dirty code (which always does). */ | ||
| 50 | paging_lock_recursive(d); | ||
| 51 | |||
| 52 | - ASSERT(page_get_owner(pg) == d); | ||
| 53 | - /* Should have just the one ref we gave it in alloc_p2m_page() */ | ||
| 54 | - if ( (pg->count_info & PGC_count_mask) != 1 ) { | ||
| 55 | - HAP_ERROR("Odd p2m page %p count c=%#lx t=%"PRtype_info"\n", | ||
| 56 | - pg, pg->count_info, pg->u.inuse.type_info); | ||
| 57 | + /* Should still have no owner and count zero. */ | ||
| 58 | + if ( owner || (pg->count_info & PGC_count_mask) ) | ||
| 59 | + { | ||
| 60 | + HAP_ERROR("d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n", | ||
| 61 | + d->domain_id, mfn_x(page_to_mfn(pg)), | ||
| 62 | + owner ? owner->domain_id : DOMID_INVALID, | ||
| 63 | + pg->count_info, pg->u.inuse.type_info); | ||
| 64 | WARN(); | ||
| 65 | + pg->count_info &= ~PGC_count_mask; | ||
| 66 | + page_set_owner(pg, NULL); | ||
| 67 | } | ||
| 68 | - pg->count_info &= ~PGC_count_mask; | ||
| 69 | - /* Free should not decrement domain's total allocation, since | ||
| 70 | - * these pages were allocated without an owner. */ | ||
| 71 | - page_set_owner(pg, NULL); | ||
| 72 | d->arch.paging.hap.p2m_pages--; | ||
| 73 | d->arch.paging.hap.total_pages++; | ||
| 74 | hap_free(d, page_to_mfn(pg)); | ||
| 75 | --- a/xen/arch/x86/mm/shadow/common.c | ||
| 76 | +++ b/xen/arch/x86/mm/shadow/common.c | ||
| 77 | @@ -1503,32 +1503,29 @@ shadow_alloc_p2m_page(struct domain *d) | ||
| 78 | pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0)); | ||
| 79 | d->arch.paging.shadow.p2m_pages++; | ||
| 80 | d->arch.paging.shadow.total_pages--; | ||
| 81 | + ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask)); | ||
| 82 | |||
| 83 | paging_unlock(d); | ||
| 84 | |||
| 85 | - /* Unlike shadow pages, mark p2m pages as owned by the domain. | ||
| 86 | - * Marking the domain as the owner would normally allow the guest to | ||
| 87 | - * create mappings of these pages, but these p2m pages will never be | ||
| 88 | - * in the domain's guest-physical address space, and so that is not | ||
| 89 | - * believed to be a concern. */ | ||
| 90 | - page_set_owner(pg, d); | ||
| 91 | - pg->count_info |= 1; | ||
| 92 | return pg; | ||
| 93 | } | ||
| 94 | |||
| 95 | static void | ||
| 96 | shadow_free_p2m_page(struct domain *d, struct page_info *pg) | ||
| 97 | { | ||
| 98 | - ASSERT(page_get_owner(pg) == d); | ||
| 99 | - /* Should have just the one ref we gave it in alloc_p2m_page() */ | ||
| 100 | - if ( (pg->count_info & PGC_count_mask) != 1 ) | ||
| 101 | + struct domain *owner = page_get_owner(pg); | ||
| 102 | + | ||
| 103 | + /* Should still have no owner and count zero. */ | ||
| 104 | + if ( owner || (pg->count_info & PGC_count_mask) ) | ||
| 105 | { | ||
| 106 | - SHADOW_ERROR("Odd p2m page count c=%#lx t=%"PRtype_info"\n", | ||
| 107 | + SHADOW_ERROR("d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n", | ||
| 108 | + d->domain_id, mfn_x(page_to_mfn(pg)), | ||
| 109 | + owner ? owner->domain_id : DOMID_INVALID, | ||
| 110 | pg->count_info, pg->u.inuse.type_info); | ||
| 111 | + pg->count_info &= ~PGC_count_mask; | ||
| 112 | + page_set_owner(pg, NULL); | ||
| 113 | } | ||
| 114 | - pg->count_info &= ~PGC_count_mask; | ||
| 115 | pg->u.sh.type = SH_type_p2m_table; /* p2m code reuses type-info */ | ||
| 116 | - page_set_owner(pg, NULL); | ||
| 117 | |||
| 118 | /* This is called both from the p2m code (which never holds the | ||
| 119 | * paging lock) and the log-dirty code (which always does). */ | ||
| 120 | @@ -3132,7 +3129,9 @@ int shadow_enable(struct domain *d, u32 | ||
| 121 | e = __map_domain_page(pg); | ||
| 122 | write_32bit_pse_identmap(e); | ||
| 123 | unmap_domain_page(e); | ||
| 124 | + pg->count_info = 1; | ||
| 125 | pg->u.inuse.type_info = PGT_l2_page_table | 1 | PGT_validated; | ||
| 126 | + page_set_owner(pg, d); | ||
| 127 | } | ||
| 128 | |||
| 129 | paging_lock(d); | ||
| 130 | @@ -3170,7 +3169,11 @@ int shadow_enable(struct domain *d, u32 | ||
| 131 | if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) ) | ||
| 132 | p2m_teardown(p2m); | ||
| 133 | if ( rv != 0 && pg != NULL ) | ||
| 134 | + { | ||
| 135 | + pg->count_info &= ~PGC_count_mask; | ||
| 136 | + page_set_owner(pg, NULL); | ||
| 137 | shadow_free_p2m_page(d, pg); | ||
| 138 | + } | ||
| 139 | domain_unpause(d); | ||
| 140 | return rv; | ||
| 141 | } | ||
| 142 | @@ -3279,7 +3282,22 @@ out: | ||
| 143 | |||
| 144 | /* Must be called outside the lock */ | ||
| 145 | if ( unpaged_pagetable ) | ||
| 146 | + { | ||
| 147 | + if ( page_get_owner(unpaged_pagetable) == d && | ||
| 148 | + (unpaged_pagetable->count_info & PGC_count_mask) == 1 ) | ||
| 149 | + { | ||
| 150 | + unpaged_pagetable->count_info &= ~PGC_count_mask; | ||
| 151 | + page_set_owner(unpaged_pagetable, NULL); | ||
| 152 | + } | ||
| 153 | + /* Complain here in cases where shadow_free_p2m_page() won't. */ | ||
| 154 | + else if ( !page_get_owner(unpaged_pagetable) && | ||
| 155 | + !(unpaged_pagetable->count_info & PGC_count_mask) ) | ||
| 156 | + SHADOW_ERROR("d%d: Odd unpaged pt %"PRI_mfn" c=%lx t=%"PRtype_info"\n", | ||
| 157 | + d->domain_id, mfn_x(page_to_mfn(unpaged_pagetable)), | ||
| 158 | + unpaged_pagetable->count_info, | ||
| 159 | + unpaged_pagetable->u.inuse.type_info); | ||
| 160 | shadow_free_p2m_page(d, unpaged_pagetable); | ||
| 161 | + } | ||
| 162 | } | ||
| 163 | |||
| 164 | void shadow_final_teardown(struct domain *d) | ||
diff --git a/recipes-extended/xen/files/xsa249.patch b/recipes-extended/xen/files/xsa249.patch new file mode 100644 index 00000000..ecfa4305 --- /dev/null +++ b/recipes-extended/xen/files/xsa249.patch | |||
| @@ -0,0 +1,42 @@ | |||
| 1 | From: Jan Beulich <jbeulich@suse.com> | ||
| 2 | Subject: x86/shadow: fix refcount overflow check | ||
| 3 | |||
| 4 | Commit c385d27079 ("x86 shadow: for multi-page shadows, explicitly track | ||
| 5 | the first page") reduced the refcount width to 25, without adjusting the | ||
| 6 | overflow check. Eliminate the disconnect by using a manifest constant. | ||
| 7 | |||
| 8 | Interestingly, up to commit 047782fa01 ("Out-of-sync L1 shadows: OOS | ||
| 9 | snapshot") the refcount was 27 bits wide, yet the check was already | ||
| 10 | using 26. | ||
| 11 | |||
| 12 | This is XSA-249. | ||
| 13 | |||
| 14 | Signed-off-by: Jan Beulich <jbeulich@suse.com> | ||
| 15 | Reviewed-by: George Dunlap <george.dunlap@citrix.com> | ||
| 16 | Reviewed-by: Tim Deegan <tim@xen.org> | ||
| 17 | --- | ||
| 18 | v2: Simplify expression back to the style it was. | ||
| 19 | |||
| 20 | --- a/xen/arch/x86/mm/shadow/private.h | ||
| 21 | +++ b/xen/arch/x86/mm/shadow/private.h | ||
| 22 | @@ -529,7 +529,7 @@ static inline int sh_get_ref(struct doma | ||
| 23 | x = sp->u.sh.count; | ||
| 24 | nx = x + 1; | ||
| 25 | |||
| 26 | - if ( unlikely(nx >= 1U<<26) ) | ||
| 27 | + if ( unlikely(nx >= (1U << PAGE_SH_REFCOUNT_WIDTH)) ) | ||
| 28 | { | ||
| 29 | SHADOW_PRINTK("shadow ref overflow, gmfn=%lx smfn=%lx\n", | ||
| 30 | __backpointer(sp), mfn_x(smfn)); | ||
| 31 | --- a/xen/include/asm-x86/mm.h | ||
| 32 | +++ b/xen/include/asm-x86/mm.h | ||
| 33 | @@ -82,7 +82,8 @@ struct page_info | ||
| 34 | unsigned long type:5; /* What kind of shadow is this? */ | ||
| 35 | unsigned long pinned:1; /* Is the shadow pinned? */ | ||
| 36 | unsigned long head:1; /* Is this the first page of the shadow? */ | ||
| 37 | - unsigned long count:25; /* Reference count */ | ||
| 38 | +#define PAGE_SH_REFCOUNT_WIDTH 25 | ||
| 39 | + unsigned long count:PAGE_SH_REFCOUNT_WIDTH; /* Reference count */ | ||
| 40 | } sh; | ||
| 41 | |||
| 42 | /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */ | ||
diff --git a/recipes-extended/xen/files/xsa250.patch b/recipes-extended/xen/files/xsa250.patch new file mode 100644 index 00000000..26aeb33f --- /dev/null +++ b/recipes-extended/xen/files/xsa250.patch | |||
| @@ -0,0 +1,67 @@ | |||
| 1 | From: Jan Beulich <jbeulich@suse.com> | ||
| 2 | Subject: x86/shadow: fix ref-counting error handling | ||
| 3 | |||
| 4 | The old-Linux handling in shadow_set_l4e() mistakenly ORed together the | ||
| 5 | results of sh_get_ref() and sh_pin(). As the latter failing is not a | ||
| 6 | correctness problem, simply ignore its return value. | ||
| 7 | |||
| 8 | In sh_set_toplevel_shadow() a failing sh_get_ref() must not be | ||
| 9 | accompanied by installing the entry, despite the domain being crashed. | ||
| 10 | |||
| 11 | This is XSA-250. | ||
| 12 | |||
| 13 | Signed-off-by: Jan Beulich <jbeulich@suse.com> | ||
| 14 | Reviewed-by: Tim Deegan <tim@xen.org> | ||
| 15 | |||
| 16 | --- a/xen/arch/x86/mm/shadow/multi.c | ||
| 17 | +++ b/xen/arch/x86/mm/shadow/multi.c | ||
| 18 | @@ -923,7 +923,7 @@ static int shadow_set_l4e(struct domain | ||
| 19 | shadow_l4e_t new_sl4e, | ||
| 20 | mfn_t sl4mfn) | ||
| 21 | { | ||
| 22 | - int flags = 0, ok; | ||
| 23 | + int flags = 0; | ||
| 24 | shadow_l4e_t old_sl4e; | ||
| 25 | paddr_t paddr; | ||
| 26 | ASSERT(sl4e != NULL); | ||
| 27 | @@ -938,15 +938,16 @@ static int shadow_set_l4e(struct domain | ||
| 28 | { | ||
| 29 | /* About to install a new reference */ | ||
| 30 | mfn_t sl3mfn = shadow_l4e_get_mfn(new_sl4e); | ||
| 31 | - ok = sh_get_ref(d, sl3mfn, paddr); | ||
| 32 | - /* Are we pinning l3 shadows to handle wierd linux behaviour? */ | ||
| 33 | - if ( sh_type_is_pinnable(d, SH_type_l3_64_shadow) ) | ||
| 34 | - ok |= sh_pin(d, sl3mfn); | ||
| 35 | - if ( !ok ) | ||
| 36 | + | ||
| 37 | + if ( !sh_get_ref(d, sl3mfn, paddr) ) | ||
| 38 | { | ||
| 39 | domain_crash(d); | ||
| 40 | return SHADOW_SET_ERROR; | ||
| 41 | } | ||
| 42 | + | ||
| 43 | + /* Are we pinning l3 shadows to handle weird Linux behaviour? */ | ||
| 44 | + if ( sh_type_is_pinnable(d, SH_type_l3_64_shadow) ) | ||
| 45 | + sh_pin(d, sl3mfn); | ||
| 46 | } | ||
| 47 | |||
| 48 | /* Write the new entry */ | ||
| 49 | @@ -3965,14 +3966,15 @@ sh_set_toplevel_shadow(struct vcpu *v, | ||
| 50 | |||
| 51 | /* Take a ref to this page: it will be released in sh_detach_old_tables() | ||
| 52 | * or the next call to set_toplevel_shadow() */ | ||
| 53 | - if ( !sh_get_ref(d, smfn, 0) ) | ||
| 54 | + if ( sh_get_ref(d, smfn, 0) ) | ||
| 55 | + new_entry = pagetable_from_mfn(smfn); | ||
| 56 | + else | ||
| 57 | { | ||
| 58 | SHADOW_ERROR("can't install %#lx as toplevel shadow\n", mfn_x(smfn)); | ||
| 59 | domain_crash(d); | ||
| 60 | + new_entry = pagetable_null(); | ||
| 61 | } | ||
| 62 | |||
| 63 | - new_entry = pagetable_from_mfn(smfn); | ||
| 64 | - | ||
| 65 | install_new_entry: | ||
| 66 | /* Done. Install it */ | ||
| 67 | SHADOW_PRINTK("%u/%u [%u] gmfn %#"PRI_mfn" smfn %#"PRI_mfn"\n", | ||
diff --git a/recipes-extended/xen/files/xsa251.patch b/recipes-extended/xen/files/xsa251.patch new file mode 100644 index 00000000..582ef622 --- /dev/null +++ b/recipes-extended/xen/files/xsa251.patch | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | From: Jan Beulich <jbeulich@suse.com> | ||
| 2 | Subject: x86/paging: don't unconditionally BUG() on finding SHARED_M2P_ENTRY | ||
| 3 | |||
| 4 | PV guests can fully control the values written into the P2M. | ||
| 5 | |||
| 6 | This is XSA-251. | ||
| 7 | |||
| 8 | Signed-off-by: Jan Beulich <jbeulich@suse.com> | ||
| 9 | Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> | ||
| 10 | |||
| 11 | --- a/xen/arch/x86/mm/paging.c | ||
| 12 | +++ b/xen/arch/x86/mm/paging.c | ||
| 13 | @@ -274,7 +274,7 @@ void paging_mark_pfn_dirty(struct domain | ||
| 14 | return; | ||
| 15 | |||
| 16 | /* Shared MFNs should NEVER be marked dirty */ | ||
| 17 | - BUG_ON(SHARED_M2P(pfn_x(pfn))); | ||
| 18 | + BUG_ON(paging_mode_translate(d) && SHARED_M2P(pfn_x(pfn))); | ||
| 19 | |||
| 20 | /* | ||
| 21 | * Values with the MSB set denote MFNs that aren't really part of the | ||
diff --git a/recipes-extended/xen/xen_4.9.0.bb b/recipes-extended/xen/xen_4.9.0.bb deleted file mode 100644 index 8e9c8024..00000000 --- a/recipes-extended/xen/xen_4.9.0.bb +++ /dev/null | |||
| @@ -1,12 +0,0 @@ | |||
| 1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
| 2 | require xen.inc | ||
| 3 | |||
| 4 | SRC_URI = " \ | ||
| 5 | https://downloads.xenproject.org/release/xen/${PV}/xen-${PV}.tar.gz \ | ||
| 6 | file://fix-libxc-xc_dom_arm-missing-initialization.patch \ | ||
| 7 | " | ||
| 8 | |||
| 9 | SRC_URI[md5sum] = "f0a753637630f982dfbdb64121fd71e1" | ||
| 10 | SRC_URI[sha256sum] = "cade643fe3310d4d6f97d0c215c6fa323bc1130d7e64d7e2043ffaa73a96f33b" | ||
| 11 | |||
| 12 | S = "${WORKDIR}/xen-${PV}" | ||
diff --git a/recipes-extended/xen/xen_4.9.1.bb b/recipes-extended/xen/xen_4.9.1.bb new file mode 100644 index 00000000..5c18bb00 --- /dev/null +++ b/recipes-extended/xen/xen_4.9.1.bb | |||
| @@ -0,0 +1,18 @@ | |||
| 1 | FILESEXTRAPATHS_prepend := "${THISDIR}/files:" | ||
| 2 | require xen.inc | ||
| 3 | |||
| 4 | SRC_URI = " \ | ||
| 5 | https://downloads.xenproject.org/release/xen/${PV}/xen-${PV}.tar.gz \ | ||
| 6 | file://xsa246-4.9.patch \ | ||
| 7 | file://0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch \ | ||
| 8 | file://0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch \ | ||
| 9 | file://xsa248.patch \ | ||
| 10 | file://xsa249.patch \ | ||
| 11 | file://xsa250.patch \ | ||
| 12 | file://xsa251.patch \ | ||
| 13 | " | ||
| 14 | |||
| 15 | SRC_URI[md5sum] = "8b9d6104694b164d54334194135f7217" | ||
| 16 | SRC_URI[sha256sum] = "ecf88b01f44cd8f4ef208af3f999dceb69bdd2a316d88dd9a9535ea7b49ed356" | ||
| 17 | |||
| 18 | S = "${WORKDIR}/xen-${PV}" | ||
