summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristopher Clark <christopher.w.clark@gmail.com>2018-07-31 12:35:47 -0700
committerBruce Ashfield <bruce.ashfield@windriver.com>2018-08-02 22:57:14 -0400
commitc91d3dca725f703815dbf4fbdd83bdc70ef60686 (patch)
treeab0ff0dd97910a87f509fceb27cb5621688e0480
parent6160783cc05769784781ecac7e04dbd4a15ad8f0 (diff)
downloadmeta-virtualization-c91d3dca725f703815dbf4fbdd83bdc70ef60686.tar.gz
xen: remove 4.9.1 recipe and patches no longer required
Signed-off-by: Christopher Clark <christopher.clark6@baesystems.com> Signed-off-by: Bruce Ashfield <bruce.ashfield@windriver.com>
-rw-r--r--recipes-extended/xen/files/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch176
-rw-r--r--recipes-extended/xen/files/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch109
-rw-r--r--recipes-extended/xen/files/xsa246-4.9.patch74
-rw-r--r--recipes-extended/xen/files/xsa248.patch164
-rw-r--r--recipes-extended/xen/files/xsa249.patch42
-rw-r--r--recipes-extended/xen/files/xsa250.patch67
-rw-r--r--recipes-extended/xen/files/xsa251.patch21
-rw-r--r--recipes-extended/xen/xen_4.9.1.bb18
8 files changed, 0 insertions, 671 deletions
diff --git a/recipes-extended/xen/files/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch b/recipes-extended/xen/files/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
deleted file mode 100644
index ad9524a3..00000000
--- a/recipes-extended/xen/files/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
+++ /dev/null
@@ -1,176 +0,0 @@
1From ad208b8b7e45fb2b7c572b86c61c26412609e82d Mon Sep 17 00:00:00 2001
2From: George Dunlap <george.dunlap@citrix.com>
3Date: Fri, 10 Nov 2017 16:53:54 +0000
4Subject: [PATCH 1/2] p2m: Always check to see if removing a p2m entry actually
5 worked
6
7The PoD zero-check functions speculatively remove memory from the p2m,
8then check to see if it's completely zeroed, before putting it in the
9cache.
10
11Unfortunately, the p2m_set_entry() calls may fail if the underlying
12pagetable structure needs to change and the domain has exhausted its
13p2m memory pool: for instance, if we're removing a 2MiB region out of
14a 1GiB entry (in the p2m_pod_zero_check_superpage() case), or a 4k
15region out of a 2MiB or larger entry (in the p2m_pod_zero_check()
16case); and the return value is not checked.
17
18The underlying mfn will then be added into the PoD cache, and at some
19point mapped into another location in the p2m. If the guest
20afterwards ballons out this memory, it will be freed to the hypervisor
21and potentially reused by another domain, in spite of the fact that
22the original domain still has writable mappings to it.
23
24There are several places where p2m_set_entry() shouldn't be able to
25fail, as it is guaranteed to write an entry of the same order that
26succeeded before. Add a backstop of crashing the domain just in case,
27and an ASSERT_UNREACHABLE() to flag up the broken assumption on debug
28builds.
29
30While we're here, use PAGE_ORDER_2M rather than a magic constant.
31
32This is part of XSA-247.
33
34Reported-by: George Dunlap <george.dunlap.com>
35Signed-off-by: George Dunlap <george.dunlap@citrix.com>
36Reviewed-by: Jan Beulich <jbeulich@suse.com>
37---
38v4:
39- Removed some training whitespace
40v3:
41- Reformat reset clause to be more compact
42- Make sure to set map[i] = NULL when unmapping in case we need to bail
43v2:
44- Crash a domain if a p2m_set_entry we think cannot fail fails anyway.
45---
46 xen/arch/x86/mm/p2m-pod.c | 77 +++++++++++++++++++++++++++++++++++++----------
47 1 file changed, 61 insertions(+), 16 deletions(-)
48
49diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
50index 730a48f928..f2ed751892 100644
51--- a/xen/arch/x86/mm/p2m-pod.c
52+++ b/xen/arch/x86/mm/p2m-pod.c
53@@ -752,8 +752,10 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
54 }
55
56 /* Try to remove the page, restoring old mapping if it fails. */
57- p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
58- p2m_populate_on_demand, p2m->default_access);
59+ if ( p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
60+ p2m_populate_on_demand, p2m->default_access) )
61+ goto out;
62+
63 p2m_tlb_flush_sync(p2m);
64
65 /* Make none of the MFNs are used elsewhere... for example, mapped
66@@ -810,9 +812,18 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
67 ret = SUPERPAGE_PAGES;
68
69 out_reset:
70- if ( reset )
71- p2m_set_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
72-
73+ /*
74+ * This p2m_set_entry() call shouldn't be able to fail, since the same order
75+ * on the same gfn succeeded above. If that turns out to be false, crashing
76+ * the domain should be the safest way of making sure we don't leak memory.
77+ */
78+ if ( reset && p2m_set_entry(p2m, gfn, mfn0, PAGE_ORDER_2M,
79+ type0, p2m->default_access) )
80+ {
81+ ASSERT_UNREACHABLE();
82+ domain_crash(d);
83+ }
84+
85 out:
86 gfn_unlock(p2m, gfn, SUPERPAGE_ORDER);
87 return ret;
88@@ -869,19 +880,30 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
89 }
90
91 /* Try to remove the page, restoring old mapping if it fails. */
92- p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
93- p2m_populate_on_demand, p2m->default_access);
94+ if ( p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
95+ p2m_populate_on_demand, p2m->default_access) )
96+ goto skip;
97
98 /* See if the page was successfully unmapped. (Allow one refcount
99 * for being allocated to a domain.) */
100 if ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) > 1 )
101 {
102+ /*
103+ * If the previous p2m_set_entry call succeeded, this one shouldn't
104+ * be able to fail. If it does, crashing the domain should be safe.
105+ */
106+ if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
107+ types[i], p2m->default_access) )
108+ {
109+ ASSERT_UNREACHABLE();
110+ domain_crash(d);
111+ goto out_unmap;
112+ }
113+
114+ skip:
115 unmap_domain_page(map[i]);
116 map[i] = NULL;
117
118- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
119- types[i], p2m->default_access);
120-
121 continue;
122 }
123 }
124@@ -900,12 +922,25 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
125
126 unmap_domain_page(map[i]);
127
128- /* See comment in p2m_pod_zero_check_superpage() re gnttab
129- * check timing. */
130- if ( j < PAGE_SIZE/sizeof(*map[i]) )
131+ map[i] = NULL;
132+
133+ /*
134+ * See comment in p2m_pod_zero_check_superpage() re gnttab
135+ * check timing.
136+ */
137+ if ( j < (PAGE_SIZE / sizeof(*map[i])) )
138 {
139- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
140- types[i], p2m->default_access);
141+ /*
142+ * If the previous p2m_set_entry call succeeded, this one shouldn't
143+ * be able to fail. If it does, crashing the domain should be safe.
144+ */
145+ if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
146+ types[i], p2m->default_access) )
147+ {
148+ ASSERT_UNREACHABLE();
149+ domain_crash(d);
150+ goto out_unmap;
151+ }
152 }
153 else
154 {
155@@ -929,7 +964,17 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
156 p2m->pod.entry_count++;
157 }
158 }
159-
160+
161+ return;
162+
163+out_unmap:
164+ /*
165+ * Something went wrong, probably crashing the domain. Unmap
166+ * everything and return.
167+ */
168+ for ( i = 0; i < count; i++ )
169+ if ( map[i] )
170+ unmap_domain_page(map[i]);
171 }
172
173 #define POD_SWEEP_LIMIT 1024
174--
1752.15.0
176
diff --git a/recipes-extended/xen/files/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch b/recipes-extended/xen/files/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
deleted file mode 100644
index 8c850bd7..00000000
--- a/recipes-extended/xen/files/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
+++ /dev/null
@@ -1,109 +0,0 @@
1From d4bc7833707351a5341a6bdf04c752a028d9560d Mon Sep 17 00:00:00 2001
2From: George Dunlap <george.dunlap@citrix.com>
3Date: Fri, 10 Nov 2017 16:53:55 +0000
4Subject: [PATCH 2/2] p2m: Check return value of p2m_set_entry() when
5 decreasing reservation
6
7If the entire range specified to p2m_pod_decrease_reservation() is marked
8populate-on-demand, then it will make a single p2m_set_entry() call,
9reducing its PoD entry count.
10
11Unfortunately, in the right circumstances, this p2m_set_entry() call
12may fail. It that case, repeated calls to decrease_reservation() may
13cause p2m->pod.entry_count to fall below zero, potentially tripping
14over BUG_ON()s to the contrary.
15
16Instead, check to see if the entry succeeded, and return false if not.
17The caller will then call guest_remove_page() on the gfns, which will
18return -EINVAL upon finding no valid memory there to return.
19
20Unfortunately if the order > 0, the entry may have partially changed.
21A domain_crash() is probably the safest thing in that case.
22
23Other p2m_set_entry() calls in the same function should be fine,
24because they are writing the entry at its current order. Nonetheless,
25check the return value and crash if our assumption turns otu to be
26wrong.
27
28This is part of XSA-247.
29
30Reported-by: George Dunlap <george.dunlap.com>
31Signed-off-by: George Dunlap <george.dunlap@citrix.com>
32Reviewed-by: Jan Beulich <jbeulich@suse.com>
33---
34v2: Crash the domain if we're not sure it's safe (or if we think it
35can't happen)
36---
37 xen/arch/x86/mm/p2m-pod.c | 42 +++++++++++++++++++++++++++++++++---------
38 1 file changed, 33 insertions(+), 9 deletions(-)
39
40diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
41index f2ed751892..473d6a6dbf 100644
42--- a/xen/arch/x86/mm/p2m-pod.c
43+++ b/xen/arch/x86/mm/p2m-pod.c
44@@ -555,11 +555,23 @@ p2m_pod_decrease_reservation(struct domain *d,
45
46 if ( !nonpod )
47 {
48- /* All PoD: Mark the whole region invalid and tell caller
49- * we're done. */
50- p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid,
51- p2m->default_access);
52- p2m->pod.entry_count-=(1<<order);
53+ /*
54+ * All PoD: Mark the whole region invalid and tell caller
55+ * we're done.
56+ */
57+ if ( p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid,
58+ p2m->default_access) )
59+ {
60+ /*
61+ * If this fails, we can't tell how much of the range was changed.
62+ * Best to crash the domain unless we're sure a partial change is
63+ * impossible.
64+ */
65+ if ( order != 0 )
66+ domain_crash(d);
67+ goto out_unlock;
68+ }
69+ p2m->pod.entry_count -= 1UL << order;
70 BUG_ON(p2m->pod.entry_count < 0);
71 ret = 1;
72 goto out_entry_check;
73@@ -600,8 +612,14 @@ p2m_pod_decrease_reservation(struct domain *d,
74 n = 1UL << cur_order;
75 if ( t == p2m_populate_on_demand )
76 {
77- p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
78- p2m_invalid, p2m->default_access);
79+ /* This shouldn't be able to fail */
80+ if ( p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
81+ p2m_invalid, p2m->default_access) )
82+ {
83+ ASSERT_UNREACHABLE();
84+ domain_crash(d);
85+ goto out_unlock;
86+ }
87 p2m->pod.entry_count -= n;
88 BUG_ON(p2m->pod.entry_count < 0);
89 pod -= n;
90@@ -622,8 +640,14 @@ p2m_pod_decrease_reservation(struct domain *d,
91
92 page = mfn_to_page(mfn);
93
94- p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
95- p2m_invalid, p2m->default_access);
96+ /* This shouldn't be able to fail */
97+ if ( p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
98+ p2m_invalid, p2m->default_access) )
99+ {
100+ ASSERT_UNREACHABLE();
101+ domain_crash(d);
102+ goto out_unlock;
103+ }
104 p2m_tlb_flush_sync(p2m);
105 for ( j = 0; j < n; ++j )
106 set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
107--
1082.15.0
109
diff --git a/recipes-extended/xen/files/xsa246-4.9.patch b/recipes-extended/xen/files/xsa246-4.9.patch
deleted file mode 100644
index 6370a106..00000000
--- a/recipes-extended/xen/files/xsa246-4.9.patch
+++ /dev/null
@@ -1,74 +0,0 @@
1From: Julien Grall <julien.grall@linaro.org>
2Subject: x86/pod: prevent infinite loop when shattering large pages
3
4When populating pages, the PoD may need to split large ones using
5p2m_set_entry and request the caller to retry (see ept_get_entry for
6instance).
7
8p2m_set_entry may fail to shatter if it is not possible to allocate
9memory for the new page table. However, the error is not propagated
10resulting to the callers to retry infinitely the PoD.
11
12Prevent the infinite loop by return false when it is not possible to
13shatter the large mapping.
14
15This is XSA-246.
16
17Signed-off-by: Julien Grall <julien.grall@linaro.org>
18Signed-off-by: Jan Beulich <jbeulich@suse.com>
19Reviewed-by: George Dunlap <george.dunlap@citrix.com>
20
21--- a/xen/arch/x86/mm/p2m-pod.c
22+++ b/xen/arch/x86/mm/p2m-pod.c
23@@ -1071,9 +1071,8 @@ p2m_pod_demand_populate(struct p2m_domai
24 * NOTE: In a fine-grained p2m locking scenario this operation
25 * may need to promote its locking from gfn->1g superpage
26 */
27- p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
28- p2m_populate_on_demand, p2m->default_access);
29- return 0;
30+ return p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
31+ p2m_populate_on_demand, p2m->default_access);
32 }
33
34 /* Only reclaim if we're in actual need of more cache. */
35@@ -1104,8 +1103,12 @@ p2m_pod_demand_populate(struct p2m_domai
36
37 gfn_aligned = (gfn >> order) << order;
38
39- p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
40- p2m->default_access);
41+ if ( p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
42+ p2m->default_access) )
43+ {
44+ p2m_pod_cache_add(p2m, p, order);
45+ goto out_fail;
46+ }
47
48 for( i = 0; i < (1UL << order); i++ )
49 {
50@@ -1150,13 +1153,18 @@ remap_and_retry:
51 BUG_ON(order != PAGE_ORDER_2M);
52 pod_unlock(p2m);
53
54- /* Remap this 2-meg region in singleton chunks */
55- /* NOTE: In a p2m fine-grained lock scenario this might
56- * need promoting the gfn lock from gfn->2M superpage */
57+ /*
58+ * Remap this 2-meg region in singleton chunks. See the comment on the
59+ * 1G page splitting path above for why a single call suffices.
60+ *
61+ * NOTE: In a p2m fine-grained lock scenario this might
62+ * need promoting the gfn lock from gfn->2M superpage.
63+ */
64 gfn_aligned = (gfn>>order)<<order;
65- for(i=0; i<(1<<order); i++)
66- p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K,
67- p2m_populate_on_demand, p2m->default_access);
68+ if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K,
69+ p2m_populate_on_demand, p2m->default_access) )
70+ return -1;
71+
72 if ( tb_init_done )
73 {
74 struct {
diff --git a/recipes-extended/xen/files/xsa248.patch b/recipes-extended/xen/files/xsa248.patch
deleted file mode 100644
index 966c16e0..00000000
--- a/recipes-extended/xen/files/xsa248.patch
+++ /dev/null
@@ -1,164 +0,0 @@
1From: Jan Beulich <jbeulich@suse.com>
2Subject: x86/mm: don't wrongly set page ownership
3
4PV domains can obtain mappings of any pages owned by the correct domain,
5including ones that aren't actually assigned as "normal" RAM, but used
6by Xen internally. At the moment such "internal" pages marked as owned
7by a guest include pages used to track logdirty bits, as well as p2m
8pages and the "unpaged pagetable" for HVM guests. Since the PV memory
9management and shadow code conflict in their use of struct page_info
10fields, and since shadow code is being used for log-dirty handling for
11PV domains, pages coming from the shadow pool must, for PV domains, not
12have the domain set as their owner.
13
14While the change could be done conditionally for just the PV case in
15shadow code, do it unconditionally (and for consistency also for HAP),
16just to be on the safe side.
17
18There's one special case though for shadow code: The page table used for
19running a HVM guest in unpaged mode is subject to get_page() (in
20set_shadow_status()) and hence must have its owner set.
21
22This is XSA-248.
23
24Signed-off-by: Jan Beulich <jbeulich@suse.com>
25Reviewed-by: Tim Deegan <tim@xen.org>
26Reviewed-by: George Dunlap <george.dunlap@citrix.com>
27---
28v2: Drop PGC_page_table related pieces.
29
30--- a/xen/arch/x86/mm/hap/hap.c
31+++ b/xen/arch/x86/mm/hap/hap.c
32@@ -286,8 +286,7 @@ static struct page_info *hap_alloc_p2m_p
33 {
34 d->arch.paging.hap.total_pages--;
35 d->arch.paging.hap.p2m_pages++;
36- page_set_owner(pg, d);
37- pg->count_info |= 1;
38+ ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask));
39 }
40 else if ( !d->arch.paging.p2m_alloc_failed )
41 {
42@@ -302,21 +301,23 @@ static struct page_info *hap_alloc_p2m_p
43
44 static void hap_free_p2m_page(struct domain *d, struct page_info *pg)
45 {
46+ struct domain *owner = page_get_owner(pg);
47+
48 /* This is called both from the p2m code (which never holds the
49 * paging lock) and the log-dirty code (which always does). */
50 paging_lock_recursive(d);
51
52- ASSERT(page_get_owner(pg) == d);
53- /* Should have just the one ref we gave it in alloc_p2m_page() */
54- if ( (pg->count_info & PGC_count_mask) != 1 ) {
55- HAP_ERROR("Odd p2m page %p count c=%#lx t=%"PRtype_info"\n",
56- pg, pg->count_info, pg->u.inuse.type_info);
57+ /* Should still have no owner and count zero. */
58+ if ( owner || (pg->count_info & PGC_count_mask) )
59+ {
60+ HAP_ERROR("d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n",
61+ d->domain_id, mfn_x(page_to_mfn(pg)),
62+ owner ? owner->domain_id : DOMID_INVALID,
63+ pg->count_info, pg->u.inuse.type_info);
64 WARN();
65+ pg->count_info &= ~PGC_count_mask;
66+ page_set_owner(pg, NULL);
67 }
68- pg->count_info &= ~PGC_count_mask;
69- /* Free should not decrement domain's total allocation, since
70- * these pages were allocated without an owner. */
71- page_set_owner(pg, NULL);
72 d->arch.paging.hap.p2m_pages--;
73 d->arch.paging.hap.total_pages++;
74 hap_free(d, page_to_mfn(pg));
75--- a/xen/arch/x86/mm/shadow/common.c
76+++ b/xen/arch/x86/mm/shadow/common.c
77@@ -1503,32 +1503,29 @@ shadow_alloc_p2m_page(struct domain *d)
78 pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0));
79 d->arch.paging.shadow.p2m_pages++;
80 d->arch.paging.shadow.total_pages--;
81+ ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask));
82
83 paging_unlock(d);
84
85- /* Unlike shadow pages, mark p2m pages as owned by the domain.
86- * Marking the domain as the owner would normally allow the guest to
87- * create mappings of these pages, but these p2m pages will never be
88- * in the domain's guest-physical address space, and so that is not
89- * believed to be a concern. */
90- page_set_owner(pg, d);
91- pg->count_info |= 1;
92 return pg;
93 }
94
95 static void
96 shadow_free_p2m_page(struct domain *d, struct page_info *pg)
97 {
98- ASSERT(page_get_owner(pg) == d);
99- /* Should have just the one ref we gave it in alloc_p2m_page() */
100- if ( (pg->count_info & PGC_count_mask) != 1 )
101+ struct domain *owner = page_get_owner(pg);
102+
103+ /* Should still have no owner and count zero. */
104+ if ( owner || (pg->count_info & PGC_count_mask) )
105 {
106- SHADOW_ERROR("Odd p2m page count c=%#lx t=%"PRtype_info"\n",
107+ SHADOW_ERROR("d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n",
108+ d->domain_id, mfn_x(page_to_mfn(pg)),
109+ owner ? owner->domain_id : DOMID_INVALID,
110 pg->count_info, pg->u.inuse.type_info);
111+ pg->count_info &= ~PGC_count_mask;
112+ page_set_owner(pg, NULL);
113 }
114- pg->count_info &= ~PGC_count_mask;
115 pg->u.sh.type = SH_type_p2m_table; /* p2m code reuses type-info */
116- page_set_owner(pg, NULL);
117
118 /* This is called both from the p2m code (which never holds the
119 * paging lock) and the log-dirty code (which always does). */
120@@ -3132,7 +3129,9 @@ int shadow_enable(struct domain *d, u32
121 e = __map_domain_page(pg);
122 write_32bit_pse_identmap(e);
123 unmap_domain_page(e);
124+ pg->count_info = 1;
125 pg->u.inuse.type_info = PGT_l2_page_table | 1 | PGT_validated;
126+ page_set_owner(pg, d);
127 }
128
129 paging_lock(d);
130@@ -3170,7 +3169,11 @@ int shadow_enable(struct domain *d, u32
131 if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) )
132 p2m_teardown(p2m);
133 if ( rv != 0 && pg != NULL )
134+ {
135+ pg->count_info &= ~PGC_count_mask;
136+ page_set_owner(pg, NULL);
137 shadow_free_p2m_page(d, pg);
138+ }
139 domain_unpause(d);
140 return rv;
141 }
142@@ -3279,7 +3282,22 @@ out:
143
144 /* Must be called outside the lock */
145 if ( unpaged_pagetable )
146+ {
147+ if ( page_get_owner(unpaged_pagetable) == d &&
148+ (unpaged_pagetable->count_info & PGC_count_mask) == 1 )
149+ {
150+ unpaged_pagetable->count_info &= ~PGC_count_mask;
151+ page_set_owner(unpaged_pagetable, NULL);
152+ }
153+ /* Complain here in cases where shadow_free_p2m_page() won't. */
154+ else if ( !page_get_owner(unpaged_pagetable) &&
155+ !(unpaged_pagetable->count_info & PGC_count_mask) )
156+ SHADOW_ERROR("d%d: Odd unpaged pt %"PRI_mfn" c=%lx t=%"PRtype_info"\n",
157+ d->domain_id, mfn_x(page_to_mfn(unpaged_pagetable)),
158+ unpaged_pagetable->count_info,
159+ unpaged_pagetable->u.inuse.type_info);
160 shadow_free_p2m_page(d, unpaged_pagetable);
161+ }
162 }
163
164 void shadow_final_teardown(struct domain *d)
diff --git a/recipes-extended/xen/files/xsa249.patch b/recipes-extended/xen/files/xsa249.patch
deleted file mode 100644
index ecfa4305..00000000
--- a/recipes-extended/xen/files/xsa249.patch
+++ /dev/null
@@ -1,42 +0,0 @@
1From: Jan Beulich <jbeulich@suse.com>
2Subject: x86/shadow: fix refcount overflow check
3
4Commit c385d27079 ("x86 shadow: for multi-page shadows, explicitly track
5the first page") reduced the refcount width to 25, without adjusting the
6overflow check. Eliminate the disconnect by using a manifest constant.
7
8Interestingly, up to commit 047782fa01 ("Out-of-sync L1 shadows: OOS
9snapshot") the refcount was 27 bits wide, yet the check was already
10using 26.
11
12This is XSA-249.
13
14Signed-off-by: Jan Beulich <jbeulich@suse.com>
15Reviewed-by: George Dunlap <george.dunlap@citrix.com>
16Reviewed-by: Tim Deegan <tim@xen.org>
17---
18v2: Simplify expression back to the style it was.
19
20--- a/xen/arch/x86/mm/shadow/private.h
21+++ b/xen/arch/x86/mm/shadow/private.h
22@@ -529,7 +529,7 @@ static inline int sh_get_ref(struct doma
23 x = sp->u.sh.count;
24 nx = x + 1;
25
26- if ( unlikely(nx >= 1U<<26) )
27+ if ( unlikely(nx >= (1U << PAGE_SH_REFCOUNT_WIDTH)) )
28 {
29 SHADOW_PRINTK("shadow ref overflow, gmfn=%lx smfn=%lx\n",
30 __backpointer(sp), mfn_x(smfn));
31--- a/xen/include/asm-x86/mm.h
32+++ b/xen/include/asm-x86/mm.h
33@@ -82,7 +82,8 @@ struct page_info
34 unsigned long type:5; /* What kind of shadow is this? */
35 unsigned long pinned:1; /* Is the shadow pinned? */
36 unsigned long head:1; /* Is this the first page of the shadow? */
37- unsigned long count:25; /* Reference count */
38+#define PAGE_SH_REFCOUNT_WIDTH 25
39+ unsigned long count:PAGE_SH_REFCOUNT_WIDTH; /* Reference count */
40 } sh;
41
42 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
diff --git a/recipes-extended/xen/files/xsa250.patch b/recipes-extended/xen/files/xsa250.patch
deleted file mode 100644
index 26aeb33f..00000000
--- a/recipes-extended/xen/files/xsa250.patch
+++ /dev/null
@@ -1,67 +0,0 @@
1From: Jan Beulich <jbeulich@suse.com>
2Subject: x86/shadow: fix ref-counting error handling
3
4The old-Linux handling in shadow_set_l4e() mistakenly ORed together the
5results of sh_get_ref() and sh_pin(). As the latter failing is not a
6correctness problem, simply ignore its return value.
7
8In sh_set_toplevel_shadow() a failing sh_get_ref() must not be
9accompanied by installing the entry, despite the domain being crashed.
10
11This is XSA-250.
12
13Signed-off-by: Jan Beulich <jbeulich@suse.com>
14Reviewed-by: Tim Deegan <tim@xen.org>
15
16--- a/xen/arch/x86/mm/shadow/multi.c
17+++ b/xen/arch/x86/mm/shadow/multi.c
18@@ -923,7 +923,7 @@ static int shadow_set_l4e(struct domain
19 shadow_l4e_t new_sl4e,
20 mfn_t sl4mfn)
21 {
22- int flags = 0, ok;
23+ int flags = 0;
24 shadow_l4e_t old_sl4e;
25 paddr_t paddr;
26 ASSERT(sl4e != NULL);
27@@ -938,15 +938,16 @@ static int shadow_set_l4e(struct domain
28 {
29 /* About to install a new reference */
30 mfn_t sl3mfn = shadow_l4e_get_mfn(new_sl4e);
31- ok = sh_get_ref(d, sl3mfn, paddr);
32- /* Are we pinning l3 shadows to handle wierd linux behaviour? */
33- if ( sh_type_is_pinnable(d, SH_type_l3_64_shadow) )
34- ok |= sh_pin(d, sl3mfn);
35- if ( !ok )
36+
37+ if ( !sh_get_ref(d, sl3mfn, paddr) )
38 {
39 domain_crash(d);
40 return SHADOW_SET_ERROR;
41 }
42+
43+ /* Are we pinning l3 shadows to handle weird Linux behaviour? */
44+ if ( sh_type_is_pinnable(d, SH_type_l3_64_shadow) )
45+ sh_pin(d, sl3mfn);
46 }
47
48 /* Write the new entry */
49@@ -3965,14 +3966,15 @@ sh_set_toplevel_shadow(struct vcpu *v,
50
51 /* Take a ref to this page: it will be released in sh_detach_old_tables()
52 * or the next call to set_toplevel_shadow() */
53- if ( !sh_get_ref(d, smfn, 0) )
54+ if ( sh_get_ref(d, smfn, 0) )
55+ new_entry = pagetable_from_mfn(smfn);
56+ else
57 {
58 SHADOW_ERROR("can't install %#lx as toplevel shadow\n", mfn_x(smfn));
59 domain_crash(d);
60+ new_entry = pagetable_null();
61 }
62
63- new_entry = pagetable_from_mfn(smfn);
64-
65 install_new_entry:
66 /* Done. Install it */
67 SHADOW_PRINTK("%u/%u [%u] gmfn %#"PRI_mfn" smfn %#"PRI_mfn"\n",
diff --git a/recipes-extended/xen/files/xsa251.patch b/recipes-extended/xen/files/xsa251.patch
deleted file mode 100644
index 582ef622..00000000
--- a/recipes-extended/xen/files/xsa251.patch
+++ /dev/null
@@ -1,21 +0,0 @@
1From: Jan Beulich <jbeulich@suse.com>
2Subject: x86/paging: don't unconditionally BUG() on finding SHARED_M2P_ENTRY
3
4PV guests can fully control the values written into the P2M.
5
6This is XSA-251.
7
8Signed-off-by: Jan Beulich <jbeulich@suse.com>
9Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
10
11--- a/xen/arch/x86/mm/paging.c
12+++ b/xen/arch/x86/mm/paging.c
13@@ -274,7 +274,7 @@ void paging_mark_pfn_dirty(struct domain
14 return;
15
16 /* Shared MFNs should NEVER be marked dirty */
17- BUG_ON(SHARED_M2P(pfn_x(pfn)));
18+ BUG_ON(paging_mode_translate(d) && SHARED_M2P(pfn_x(pfn)));
19
20 /*
21 * Values with the MSB set denote MFNs that aren't really part of the
diff --git a/recipes-extended/xen/xen_4.9.1.bb b/recipes-extended/xen/xen_4.9.1.bb
deleted file mode 100644
index 5c18bb00..00000000
--- a/recipes-extended/xen/xen_4.9.1.bb
+++ /dev/null
@@ -1,18 +0,0 @@
1FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
2require xen.inc
3
4SRC_URI = " \
5 https://downloads.xenproject.org/release/xen/${PV}/xen-${PV}.tar.gz \
6 file://xsa246-4.9.patch \
7 file://0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch \
8 file://0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch \
9 file://xsa248.patch \
10 file://xsa249.patch \
11 file://xsa250.patch \
12 file://xsa251.patch \
13 "
14
15SRC_URI[md5sum] = "8b9d6104694b164d54334194135f7217"
16SRC_URI[sha256sum] = "ecf88b01f44cd8f4ef208af3f999dceb69bdd2a316d88dd9a9535ea7b49ed356"
17
18S = "${WORKDIR}/xen-${PV}"