summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Wellving <andreas.wellving@enea.com>2019-07-10 11:13:59 +0200
committerAdrian Stratulat <adrian.stratulat@enea.com>2019-07-12 14:21:28 +0200
commit726a4b413d426f2209264501fe0f56c88588988f (patch)
tree64be7a21cb57cf6ba34e61a55e959ec9ebb66835
parent86b3d79e95edbf9d95f39717ec4d03503c34fe4d (diff)
downloadenea-kernel-cache-726a4b413d426f2209264501fe0f56c88588988f.tar.gz
tcp: CVE-2019-11477
tcp: limit payload size of sacked skbs References: https://nvd.nist.gov/vuln/detail/CVE-2019-11477 https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.9.y&id=cc1b58ccb78e0de51bcec1f2914d9296260668bd Change-Id: Ic95aaf292571c662f0772467277450c59dc8f8b3 Signed-off-by: Andreas Wellving <andreas.wellving@enea.com>
-rw-r--r--patches/cve/4.9.x.scc3
-rw-r--r--patches/cve/CVE-2019-11477-tcp-limit-payload-size-of-sacked-skbs.patch187
2 files changed, 190 insertions, 0 deletions
diff --git a/patches/cve/4.9.x.scc b/patches/cve/4.9.x.scc
index 76d3db0..e3a9067 100644
--- a/patches/cve/4.9.x.scc
+++ b/patches/cve/4.9.x.scc
@@ -43,3 +43,6 @@ patch CVE-2019-11815-net-rds-force-to-destroy-connection-if-t_sock-is-NUL.patch
43 43
44#CVEs fixed in 4.9.175: 44#CVEs fixed in 4.9.175:
45patch CVE-2018-20836-scsi-libsas-fix-a-race-condition-when-smp-task-timeo.patch 45patch CVE-2018-20836-scsi-libsas-fix-a-race-condition-when-smp-task-timeo.patch
46
47#CVEs fixed in 4.9.182:
48patch CVE-2019-11477-tcp-limit-payload-size-of-sacked-skbs.patch
diff --git a/patches/cve/CVE-2019-11477-tcp-limit-payload-size-of-sacked-skbs.patch b/patches/cve/CVE-2019-11477-tcp-limit-payload-size-of-sacked-skbs.patch
new file mode 100644
index 0000000..cff87d7
--- /dev/null
+++ b/patches/cve/CVE-2019-11477-tcp-limit-payload-size-of-sacked-skbs.patch
@@ -0,0 +1,187 @@
1From cc1b58ccb78e0de51bcec1f2914d9296260668bd Mon Sep 17 00:00:00 2001
2From: Eric Dumazet <edumazet@google.com>
3Date: Sat, 15 Jun 2019 17:31:03 -0700
4Subject: [PATCH] tcp: limit payload size of sacked skbs
5
6commit 3b4929f65b0d8249f19a50245cd88ed1a2f78cff upstream.
7
8Jonathan Looney reported that TCP can trigger the following crash
9in tcp_shifted_skb() :
10
11 BUG_ON(tcp_skb_pcount(skb) < pcount);
12
13This can happen if the remote peer has advertized the smallest
14MSS that linux TCP accepts : 48
15
16An skb can hold 17 fragments, and each fragment can hold 32KB
17on x86, or 64KB on PowerPC.
18
19This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
20can overflow.
21
22Note that tcp_sendmsg() builds skbs with less than 64KB
23of payload, so this problem needs SACK to be enabled.
24SACK blocks allow TCP to coalesce multiple skbs in the retransmit
25queue, thus filling the 17 fragments to maximal capacity.
26
27CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs
28
29Backport notes, provided by Joao Martins <joao.m.martins@oracle.com>
30
31v4.15 or since commit 737ff314563 ("tcp: use sequence distance to
32detect reordering") had switched from the packet-based FACK tracking and
33switched to sequence-based.
34
35v4.14 and older still have the old logic and hence on
36tcp_skb_shift_data() needs to retain its original logic and have
37@fack_count in sync. In other words, we keep the increment of pcount with
38tcp_skb_pcount(skb) to later used that to update fack_count. To make it
39more explicit we track the new skb that gets incremented to pcount in
40@next_pcount, and we get to avoid the constant invocation of
41tcp_skb_pcount(skb) all together.
42
43CVE: CVE-2019-11477
44Upstream-Status: Backport [https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.9.y&id=cc1b58ccb78e0de51bcec1f2914d9296260668bd]
45
46Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
47Signed-off-by: Eric Dumazet <edumazet@google.com>
48Reported-by: Jonathan Looney <jtl@netflix.com>
49Acked-by: Neal Cardwell <ncardwell@google.com>
50Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
51Cc: Yuchung Cheng <ycheng@google.com>
52Cc: Bruce Curtis <brucec@netflix.com>
53Cc: Jonathan Lemon <jonathan.lemon@gmail.com>
54Signed-off-by: David S. Miller <davem@davemloft.net>
55Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
56Signed-off-by: Andreas Wellving <andreas.wellving@enea.com>
57---
58 include/linux/tcp.h | 3 +++
59 include/net/tcp.h | 2 ++
60 net/ipv4/tcp.c | 1 +
61 net/ipv4/tcp_input.c | 28 ++++++++++++++++++++++------
62 net/ipv4/tcp_output.c | 4 ++--
63 5 files changed, 30 insertions(+), 8 deletions(-)
64
65diff --git a/include/linux/tcp.h b/include/linux/tcp.h
66index d0c3615f9050..7f517458c64f 100644
67--- a/include/linux/tcp.h
68+++ b/include/linux/tcp.h
69@@ -433,4 +433,7 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
70 tp->saved_syn = NULL;
71 }
72
73+int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
74+ int shiftlen);
75+
76 #endif /* _LINUX_TCP_H */
77diff --git a/include/net/tcp.h b/include/net/tcp.h
78index fed2a78fb8cb..d7047de952f0 100644
79--- a/include/net/tcp.h
80+++ b/include/net/tcp.h
81@@ -53,6 +53,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
82
83 #define MAX_TCP_HEADER (128 + MAX_HEADER)
84 #define MAX_TCP_OPTION_SPACE 40
85+#define TCP_MIN_SND_MSS 48
86+#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
87
88 /*
89 * Never offer a window over 32767 without using window scaling. Some
90diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
91index 2ededb32b754..ee2822a411f9 100644
92--- a/net/ipv4/tcp.c
93+++ b/net/ipv4/tcp.c
94@@ -3307,6 +3307,7 @@ void __init tcp_init(void)
95 unsigned long limit;
96 unsigned int i;
97
98+ BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
99 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
100 FIELD_SIZEOF(struct sk_buff, cb));
101
102diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
103index 80a0cdbabd40..e2e58bc42ba4 100644
104--- a/net/ipv4/tcp_input.c
105+++ b/net/ipv4/tcp_input.c
106@@ -1320,7 +1320,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
107 TCP_SKB_CB(skb)->seq += shifted;
108
109 tcp_skb_pcount_add(prev, pcount);
110- BUG_ON(tcp_skb_pcount(skb) < pcount);
111+ WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
112 tcp_skb_pcount_add(skb, -pcount);
113
114 /* When we're adding to gso_segs == 1, gso_size will be zero,
115@@ -1387,6 +1387,21 @@ static int skb_can_shift(const struct sk_buff *skb)
116 return !skb_headlen(skb) && skb_is_nonlinear(skb);
117 }
118
119+int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from,
120+ int pcount, int shiftlen)
121+{
122+ /* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE)
123+ * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need
124+ * to make sure not storing more than 65535 * 8 bytes per skb,
125+ * even if current MSS is bigger.
126+ */
127+ if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE))
128+ return 0;
129+ if (unlikely(tcp_skb_pcount(to) + pcount > 65535))
130+ return 0;
131+ return skb_shift(to, from, shiftlen);
132+}
133+
134 /* Try collapsing SACK blocks spanning across multiple skbs to a single
135 * skb.
136 */
137@@ -1398,6 +1413,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
138 struct tcp_sock *tp = tcp_sk(sk);
139 struct sk_buff *prev;
140 int mss;
141+ int next_pcount;
142 int pcount = 0;
143 int len;
144 int in_sack;
145@@ -1495,7 +1511,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
146 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
147 goto fallback;
148
149- if (!skb_shift(prev, skb, len))
150+ if (!tcp_skb_shift(prev, skb, pcount, len))
151 goto fallback;
152 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
153 goto out;
154@@ -1514,11 +1530,11 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
155 goto out;
156
157 len = skb->len;
158- if (skb_shift(prev, skb, len)) {
159- pcount += tcp_skb_pcount(skb);
160- tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
161+ next_pcount = tcp_skb_pcount(skb);
162+ if (tcp_skb_shift(prev, skb, next_pcount, len)) {
163+ pcount += next_pcount;
164+ tcp_shifted_skb(sk, skb, state, next_pcount, len, mss, 0);
165 }
166-
167 out:
168 state->fack_count += pcount;
169 return prev;
170diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
171index 6f35cdd5f2f0..2f166662682e 100644
172--- a/net/ipv4/tcp_output.c
173+++ b/net/ipv4/tcp_output.c
174@@ -1355,8 +1355,8 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
175 mss_now -= icsk->icsk_ext_hdr_len;
176
177 /* Then reserve room for full set of TCP options and 8 bytes of data */
178- if (mss_now < 48)
179- mss_now = 48;
180+ if (mss_now < TCP_MIN_SND_MSS)
181+ mss_now = TCP_MIN_SND_MSS;
182 return mss_now;
183 }
184
185--
1862.20.1
187