summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Wellving <andreas.wellving@enea.com>2018-10-17 14:06:25 +0200
committerAndreas Wellving <andreas.wellving@enea.com>2018-10-17 14:06:25 +0200
commit0368c4076f4017571a5e8a617763859a84277427 (patch)
tree166c891fd6962dc890c20a00215e4bb3267ae486
parentbbbaee9150a62c2965710ff2245b65da32f35eb5 (diff)
downloadenea-kernel-cache-0368c4076f4017571a5e8a617763859a84277427.tar.gz
net: CVE-2016-7039
net: add recursion limit to GRO References: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.1.y&id=fabaaaa96d54077b4a9f2c811e55dc09ff2874db Change-Id: Ice78062187e95abdffb700d3f247e98173886a22 Signed-off-by: Andreas Wellving <andreas.wellving@enea.com>
-rw-r--r--patches/cve/4.1.x.scc2
-rw-r--r--patches/cve/CVE-2016-7039-net-add-recursion-limit-to-GRO.patch209
2 files changed, 211 insertions, 0 deletions
diff --git a/patches/cve/4.1.x.scc b/patches/cve/4.1.x.scc
new file mode 100644
index 0000000..613c8d9
--- /dev/null
+++ b/patches/cve/4.1.x.scc
@@ -0,0 +1,2 @@
1#fixed in 4.1.37
2patch CVE-2016-7039-net-add-recursion-limit-to-GRO.patch
diff --git a/patches/cve/CVE-2016-7039-net-add-recursion-limit-to-GRO.patch b/patches/cve/CVE-2016-7039-net-add-recursion-limit-to-GRO.patch
new file mode 100644
index 0000000..5bfe1df
--- /dev/null
+++ b/patches/cve/CVE-2016-7039-net-add-recursion-limit-to-GRO.patch
@@ -0,0 +1,209 @@
1From fabaaaa96d54077b4a9f2c811e55dc09ff2874db Mon Sep 17 00:00:00 2001
2From: Sabrina Dubroca <sd@queasysnail.net>
3Date: Wed, 14 Dec 2016 13:24:55 +0100
4Subject: [PATCH] net: add recursion limit to GRO
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9[ Debian: net-add-recursion-limit-to-gro.patch ]
10
11Currently, GRO can do unlimited recursion through the gro_receive
12handlers. This was fixed for tunneling protocols by limiting tunnel GRO
13to one level with encap_mark, but both VLAN and TEB still have this
14problem. Thus, the kernel is vulnerable to a stack overflow, if we
15receive a packet composed entirely of VLAN headers.
16
17This patch adds a recursion counter to the GRO layer to prevent stack
18overflow. When a gro_receive function hits the recursion limit, GRO is
19aborted for this skb and it is processed normally.
20
21Thanks to Vladimír Beneš <vbenes@redhat.com> for the initial bug report.
22
23Fixes: CVE-2016-7039
24Upstream-Status: Backport
25
26Fixes: 9b174d88c257 ("net: Add Transparent Ethernet Bridging GRO support.")
27Fixes: 66e5133f19e9 ("vlan: Add GRO support for non hardware accelerated vlan")
28Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
29Reviewed-by: Jiri Benc <jbenc@redhat.com>
30Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
31Signed-off-by: Philipp Hahn <hahn@univention.de>
32Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
33Signed-off-by: Andreas Wellving <andreas.wellving@enea.com>
34---
35 drivers/net/vxlan.c | 2 +-
36 include/linux/netdevice.h | 24 +++++++++++++++++++++++-
37 net/core/dev.c | 1 +
38 net/ethernet/eth.c | 2 +-
39 net/ipv4/af_inet.c | 2 +-
40 net/ipv4/fou.c | 4 ++--
41 net/ipv4/gre_offload.c | 2 +-
42 net/ipv4/udp_offload.c | 9 +++++++--
43 net/ipv6/ip6_offload.c | 2 +-
44 9 files changed, 38 insertions(+), 10 deletions(-)
45
46diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
47index 940f78e..d9e873c 100644
48--- a/drivers/net/vxlan.c
49+++ b/drivers/net/vxlan.c
50@@ -635,7 +635,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
51 }
52 }
53
54- pp = eth_gro_receive(head, skb);
55+ pp = call_gro_receive(eth_gro_receive, head, skb);
56
57 out:
58 skb_gro_remcsum_cleanup(skb, &grc);
59diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
60index 6c86c7e..ddd47c3a 100644
61--- a/include/linux/netdevice.h
62+++ b/include/linux/netdevice.h
63@@ -1957,7 +1957,10 @@ struct napi_gro_cb {
64 /* Used in foo-over-udp, set in udp[46]_gro_receive */
65 u8 is_ipv6:1;
66
67- /* 7 bit hole */
68+ /* Number of gro_receive callbacks this packet already went through */
69+ u8 recursion_counter:4;
70+
71+ /* 3 bit hole */
72
73 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
74 __wsum csum;
75@@ -1968,6 +1971,25 @@ struct napi_gro_cb {
76
77 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
78
79+#define GRO_RECURSION_LIMIT 15
80+static inline int gro_recursion_inc_test(struct sk_buff *skb)
81+{
82+ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
83+}
84+
85+typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
86+static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
87+ struct sk_buff **head,
88+ struct sk_buff *skb)
89+{
90+ if (gro_recursion_inc_test(skb)) {
91+ NAPI_GRO_CB(skb)->flush |= 1;
92+ return NULL;
93+ }
94+
95+ return cb(head, skb);
96+}
97+
98 struct packet_type {
99 __be16 type; /* This is really htons(ether_type). */
100 struct net_device *dev; /* NULL is wildcarded here */
101diff --git a/net/core/dev.c b/net/core/dev.c
102index 185a339..56d820f 100644
103--- a/net/core/dev.c
104+++ b/net/core/dev.c
105@@ -4060,6 +4060,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
106 NAPI_GRO_CB(skb)->flush = 0;
107 NAPI_GRO_CB(skb)->free = 0;
108 NAPI_GRO_CB(skb)->udp_mark = 0;
109+ NAPI_GRO_CB(skb)->recursion_counter = 0;
110 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
111
112 /* Setup for GRO checksum validation */
113diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
114index f3bad41..76f8389 100644
115--- a/net/ethernet/eth.c
116+++ b/net/ethernet/eth.c
117@@ -434,7 +434,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
118
119 skb_gro_pull(skb, sizeof(*eh));
120 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
121- pp = ptype->callbacks.gro_receive(head, skb);
122+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
123
124 out_unlock:
125 rcu_read_unlock();
126diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
127index 0cc98b1..2095cd6 100644
128--- a/net/ipv4/af_inet.c
129+++ b/net/ipv4/af_inet.c
130@@ -1377,7 +1377,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
131 skb_gro_pull(skb, sizeof(*iph));
132 skb_set_transport_header(skb, skb_gro_offset(skb));
133
134- pp = ops->callbacks.gro_receive(head, skb);
135+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
136
137 out_unlock:
138 rcu_read_unlock();
139diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
140index 4b67937..b22a75c0 100644
141--- a/net/ipv4/fou.c
142+++ b/net/ipv4/fou.c
143@@ -188,7 +188,7 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
144 if (!ops || !ops->callbacks.gro_receive)
145 goto out_unlock;
146
147- pp = ops->callbacks.gro_receive(head, skb);
148+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
149
150 out_unlock:
151 rcu_read_unlock();
152@@ -355,7 +355,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
153 if (WARN_ON(!ops || !ops->callbacks.gro_receive))
154 goto out_unlock;
155
156- pp = ops->callbacks.gro_receive(head, skb);
157+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
158
159 out_unlock:
160 rcu_read_unlock();
161diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
162index 5a8ee32..53300b8 100644
163--- a/net/ipv4/gre_offload.c
164+++ b/net/ipv4/gre_offload.c
165@@ -214,7 +214,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
166 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
167 skb_gro_postpull_rcsum(skb, greh, grehlen);
168
169- pp = ptype->callbacks.gro_receive(head, skb);
170+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
171
172 out_unlock:
173 rcu_read_unlock();
174diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
175index f938616..2af7b7e 100644
176--- a/net/ipv4/udp_offload.c
177+++ b/net/ipv4/udp_offload.c
178@@ -339,8 +339,13 @@ unflush:
179 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
180 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
181 NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
182- pp = uo_priv->offload->callbacks.gro_receive(head, skb,
183- uo_priv->offload);
184+
185+ if (gro_recursion_inc_test(skb)) {
186+ pp = NULL;
187+ } else {
188+ pp = uo_priv->offload->callbacks.gro_receive(head, skb,
189+ uo_priv->offload);
190+ }
191
192 out_unlock:
193 rcu_read_unlock();
194diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
195index 08b6204..db0b842 100644
196--- a/net/ipv6/ip6_offload.c
197+++ b/net/ipv6/ip6_offload.c
198@@ -247,7 +247,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
199
200 skb_gro_postpull_rcsum(skb, iph, nlen);
201
202- pp = ops->callbacks.gro_receive(head, skb);
203+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
204
205 out_unlock:
206 rcu_read_unlock();
207--
2082.7.4
209