From bac630923de2f10b62e8f2635f192b7fec8165ba Mon Sep 17 00:00:00 2001 From: Sona Sarmadi Date: Tue, 30 Aug 2016 13:52:49 +0200 Subject: kernel-net: CVE-2016-5696 tcp: make challenge acks less predictable net/ipv4/tcp_input.c in the Linux kernel before 4.7 does not properly determine the rate of challenge ACK segments, which makes it easier for man-in-the-middle attackers to hijack TCP sessions via a blind in-window attack. References: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-5696 https://git.kernel.org/cgit/linux/kernel/git/stable/linux-stable.git/ patch/?id=5413f1a526d2d51d7a5768133c90936c017165c6 https://git.kernel.org/cgit/linux/kernel/git/stable/linux-stable.git/ patch/?id=72c2d3bccaba4a0a4de354f9d2d24eccd05bfccf (This a follow-up to "tcp: make challenge acks less predictable) Signen-off-by: Sona Sarmadi Signed-off-by: Martin Borg --- .../CVE-2016-5696-limiting-of-all-challenge.patch | 109 +++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 recipes-kernel/linux/files/CVE-2016-5696-limiting-of-all-challenge.patch (limited to 'recipes-kernel/linux/files/CVE-2016-5696-limiting-of-all-challenge.patch') diff --git a/recipes-kernel/linux/files/CVE-2016-5696-limiting-of-all-challenge.patch b/recipes-kernel/linux/files/CVE-2016-5696-limiting-of-all-challenge.patch new file mode 100644 index 0000000..f2c2364 --- /dev/null +++ b/recipes-kernel/linux/files/CVE-2016-5696-limiting-of-all-challenge.patch @@ -0,0 +1,109 @@ +From 5413f1a526d2d51d7a5768133c90936c017165c6 Mon Sep 17 00:00:00 2001 +From: Jason Baron +Date: Thu, 14 Jul 2016 11:38:40 -0400 +Subject: [PATCH] tcp: enable per-socket rate limiting of all 'challenge acks' + +[ Upstream commit 083ae308280d13d187512b9babe3454342a7987e ] + +The per-socket rate limit for 'challenge acks' was introduced in the +context of limiting ack loops: + +commit f2b2c582e824 ("tcp: mitigate ACK loops for connections as tcp_sock") + +And I think it can be extended to rate limit all 'challenge acks' on a +per-socket basis. + +Since we have the global tcp_challenge_ack_limit, this patch allows for +tcp_challenge_ack_limit to be set to a large value and effectively rely on +the per-socket limit, or set tcp_challenge_ack_limit to a lower value and +still prevents a single connections from consuming the entire challenge ack +quota. + +It further moves in the direction of eliminating the global limit at some +point, as Eric Dumazet has suggested. This a follow-up to: +Subject: tcp: make challenge acks less predictable + +CVE: CVE-2016-5696 +Upstream-Status: Backport + +Cc: Eric Dumazet +Cc: David S. Miller +Cc: Neal Cardwell +Cc: Yuchung Cheng +Cc: Yue Cao +Signed-off-by: Jason Baron +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sona Sarmadi +--- + net/ipv4/tcp_input.c | 39 ++++++++++++++++++++++----------------- + 1 file changed, 22 insertions(+), 17 deletions(-) + +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 05f10df..12b98e2 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -3390,6 +3390,23 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 + return flag; + } + ++static bool __tcp_oow_rate_limited(struct net *net, int mib_idx, ++ u32 *last_oow_ack_time) ++{ ++ if (*last_oow_ack_time) { ++ s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); ++ ++ if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { ++ NET_INC_STATS_BH(net, mib_idx); ++ return true; /* rate-limited: don't send yet! */ ++ } ++ } ++ ++ *last_oow_ack_time = tcp_time_stamp; ++ ++ return false; /* not rate-limited: go ahead, send dupack now! */ ++} ++ + /* Return true if we're currently rate-limiting out-of-window ACKs and + * thus shouldn't send a dupack right now. We rate-limit dupacks in + * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS +@@ -3403,21 +3420,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, + /* Data packets without SYNs are not likely part of an ACK loop. */ + if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && + !tcp_hdr(skb)->syn) +- goto not_rate_limited; +- +- if (*last_oow_ack_time) { +- s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); +- +- if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { +- NET_INC_STATS_BH(net, mib_idx); +- return true; /* rate-limited: don't send yet! */ +- } +- } +- +- *last_oow_ack_time = tcp_time_stamp; ++ return false; + +-not_rate_limited: +- return false; /* not rate-limited: go ahead, send dupack now! */ ++ return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time); + } + + /* RFC 5961 7 [ACK Throttling] */ +@@ -3430,9 +3435,9 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) + u32 count, now; + + /* First check our per-socket dupack rate limit. */ +- if (tcp_oow_rate_limited(sock_net(sk), skb, +- LINUX_MIB_TCPACKSKIPPEDCHALLENGE, +- &tp->last_oow_ack_time)) ++ if (__tcp_oow_rate_limited(sock_net(sk), ++ LINUX_MIB_TCPACKSKIPPEDCHALLENGE, ++ &tp->last_oow_ack_time)) + return; + + /* Then check host-wide RFC 5961 rate limit. */ +-- +1.9.1 + -- cgit v1.2.3-54-g00ecf