diff options
Diffstat (limited to 'meta/recipes-devtools/perl/perl-5.14.3/0001-Prevent-premature-hsplit-calls-and-only-trigger-REHA.patch')
-rw-r--r-- | meta/recipes-devtools/perl/perl-5.14.3/0001-Prevent-premature-hsplit-calls-and-only-trigger-REHA.patch | 178 |
1 files changed, 0 insertions, 178 deletions
diff --git a/meta/recipes-devtools/perl/perl-5.14.3/0001-Prevent-premature-hsplit-calls-and-only-trigger-REHA.patch b/meta/recipes-devtools/perl/perl-5.14.3/0001-Prevent-premature-hsplit-calls-and-only-trigger-REHA.patch deleted file mode 100644 index 4357c2ef58..0000000000 --- a/meta/recipes-devtools/perl/perl-5.14.3/0001-Prevent-premature-hsplit-calls-and-only-trigger-REHA.patch +++ /dev/null | |||
@@ -1,178 +0,0 @@ | |||
1 | From d59e31fc729d8a39a774f03bc6bc457029a7aef2 Mon Sep 17 00:00:00 2001 | ||
2 | From: Yves Orton <demerphq@gmail.com> | ||
3 | Date: Tue, 12 Feb 2013 10:53:05 +0100 | ||
4 | Subject: [PATCH] Prevent premature hsplit() calls, and only trigger REHASH | ||
5 | after hsplit() | ||
6 | |||
7 | Triggering a hsplit due to long chain length allows an attacker | ||
8 | to create a carefully chosen set of keys which can cause the hash | ||
9 | to use 2 * (2**32) * sizeof(void *) bytes ram. AKA a DOS via memory | ||
10 | exhaustion. Doing so also takes non trivial time. | ||
11 | |||
12 | Eliminating this check, and only inspecting chain length after a | ||
13 | normal hsplit() (triggered when keys>buckets) prevents the attack | ||
14 | entirely, and makes such attacks relatively benign. | ||
15 | |||
16 | (cherry picked from commit f1220d61455253b170e81427c9d0357831ca0fac) | ||
17 | |||
18 | Upstream-Status: Backport | ||
19 | |||
20 | Signed-off-by: Saul Wold <sgw@linux.intel.com> | ||
21 | |||
22 | |||
23 | --- | ||
24 | ext/Hash-Util-FieldHash/t/10_hash.t | 18 ++++++++++++++++-- | ||
25 | hv.c | 35 ++++++++--------------------------- | ||
26 | t/op/hash.t | 20 +++++++++++++++++--- | ||
27 | 3 files changed, 41 insertions(+), 32 deletions(-) | ||
28 | |||
29 | diff --git a/ext/Hash-Util-FieldHash/t/10_hash.t b/ext/Hash-Util-FieldHash/t/10_hash.t | ||
30 | index 2cfb4e8..d58f053 100644 | ||
31 | --- a/ext/Hash-Util-FieldHash/t/10_hash.t | ||
32 | +++ b/ext/Hash-Util-FieldHash/t/10_hash.t | ||
33 | @@ -38,15 +38,29 @@ use constant START => "a"; | ||
34 | |||
35 | # some initial hash data | ||
36 | fieldhash my %h2; | ||
37 | -%h2 = map {$_ => 1} 'a'..'cc'; | ||
38 | +my $counter= "a"; | ||
39 | +$h2{$counter++}++ while $counter ne 'cd'; | ||
40 | |||
41 | ok (!Internals::HvREHASH(%h2), | ||
42 | "starting with pre-populated non-pathological hash (rehash flag if off)"); | ||
43 | |||
44 | my @keys = get_keys(\%h2); | ||
45 | +my $buckets= buckets(\%h2); | ||
46 | $h2{$_}++ for @keys; | ||
47 | +$h2{$counter++}++ while buckets(\%h2) == $buckets; # force a split | ||
48 | ok (Internals::HvREHASH(%h2), | ||
49 | - scalar(@keys) . " colliding into the same bucket keys are triggering rehash"); | ||
50 | + scalar(@keys) . " colliding into the same bucket keys are triggering rehash after split"); | ||
51 | + | ||
52 | +# returns the number of buckets in a hash | ||
53 | +sub buckets { | ||
54 | + my $hr = shift; | ||
55 | + my $keys_buckets= scalar(%$hr); | ||
56 | + if ($keys_buckets=~m!/([0-9]+)\z!) { | ||
57 | + return 0+$1; | ||
58 | + } else { | ||
59 | + return 8; | ||
60 | + } | ||
61 | +} | ||
62 | |||
63 | sub get_keys { | ||
64 | my $hr = shift; | ||
65 | diff --git a/hv.c b/hv.c | ||
66 | index 2be1feb..abb9d76 100644 | ||
67 | --- a/hv.c | ||
68 | +++ b/hv.c | ||
69 | @@ -35,7 +35,8 @@ holds the key and hash value. | ||
70 | #define PERL_HASH_INTERNAL_ACCESS | ||
71 | #include "perl.h" | ||
72 | |||
73 | -#define HV_MAX_LENGTH_BEFORE_SPLIT 14 | ||
74 | +#define HV_MAX_LENGTH_BEFORE_REHASH 14 | ||
75 | +#define SHOULD_DO_HSPLIT(xhv) ((xhv)->xhv_keys > (xhv)->xhv_max) /* HvTOTALKEYS(hv) > HvMAX(hv) */ | ||
76 | |||
77 | static const char S_strtab_error[] | ||
78 | = "Cannot modify shared string table in hv_%s"; | ||
79 | @@ -794,29 +795,9 @@ Perl_hv_common(pTHX_ HV *hv, SV *keysv, const char *key, STRLEN klen, | ||
80 | if (masked_flags & HVhek_ENABLEHVKFLAGS) | ||
81 | HvHASKFLAGS_on(hv); | ||
82 | |||
83 | - { | ||
84 | - const HE *counter = HeNEXT(entry); | ||
85 | - | ||
86 | - xhv->xhv_keys++; /* HvTOTALKEYS(hv)++ */ | ||
87 | - if (!counter) { /* initial entry? */ | ||
88 | - } else if (xhv->xhv_keys > xhv->xhv_max) { | ||
89 | - /* Use only the old HvKEYS(hv) > HvMAX(hv) condition to limit | ||
90 | - bucket splits on a rehashed hash, as we're not going to | ||
91 | - split it again, and if someone is lucky (evil) enough to | ||
92 | - get all the keys in one list they could exhaust our memory | ||
93 | - as we repeatedly double the number of buckets on every | ||
94 | - entry. Linear search feels a less worse thing to do. */ | ||
95 | - hsplit(hv); | ||
96 | - } else if(!HvREHASH(hv)) { | ||
97 | - U32 n_links = 1; | ||
98 | - | ||
99 | - while ((counter = HeNEXT(counter))) | ||
100 | - n_links++; | ||
101 | - | ||
102 | - if (n_links > HV_MAX_LENGTH_BEFORE_SPLIT) { | ||
103 | - hsplit(hv); | ||
104 | - } | ||
105 | - } | ||
106 | + xhv->xhv_keys++; /* HvTOTALKEYS(hv)++ */ | ||
107 | + if ( SHOULD_DO_HSPLIT(xhv) ) { | ||
108 | + hsplit(hv); | ||
109 | } | ||
110 | |||
111 | if (return_svp) { | ||
112 | @@ -1192,7 +1173,7 @@ S_hsplit(pTHX_ HV *hv) | ||
113 | |||
114 | |||
115 | /* Pick your policy for "hashing isn't working" here: */ | ||
116 | - if (longest_chain <= HV_MAX_LENGTH_BEFORE_SPLIT /* split worked? */ | ||
117 | + if (longest_chain <= HV_MAX_LENGTH_BEFORE_REHASH /* split worked? */ | ||
118 | || HvREHASH(hv)) { | ||
119 | return; | ||
120 | } | ||
121 | @@ -2831,8 +2812,8 @@ S_share_hek_flags(pTHX_ const char *str, I32 len, register U32 hash, int flags) | ||
122 | |||
123 | xhv->xhv_keys++; /* HvTOTALKEYS(hv)++ */ | ||
124 | if (!next) { /* initial entry? */ | ||
125 | - } else if (xhv->xhv_keys > xhv->xhv_max /* HvKEYS(hv) > HvMAX(hv) */) { | ||
126 | - hsplit(PL_strtab); | ||
127 | + } else if ( SHOULD_DO_HSPLIT(xhv) ) { | ||
128 | + hsplit(PL_strtab); | ||
129 | } | ||
130 | } | ||
131 | |||
132 | diff --git a/t/op/hash.t b/t/op/hash.t | ||
133 | index 278bea7..201260a 100644 | ||
134 | --- a/t/op/hash.t | ||
135 | +++ b/t/op/hash.t | ||
136 | @@ -39,22 +39,36 @@ use constant THRESHOLD => 14; | ||
137 | use constant START => "a"; | ||
138 | |||
139 | # some initial hash data | ||
140 | -my %h2 = map {$_ => 1} 'a'..'cc'; | ||
141 | +my %h2; | ||
142 | +my $counter= "a"; | ||
143 | +$h2{$counter++}++ while $counter ne 'cd'; | ||
144 | |||
145 | ok (!Internals::HvREHASH(%h2), | ||
146 | "starting with pre-populated non-pathological hash (rehash flag if off)"); | ||
147 | |||
148 | my @keys = get_keys(\%h2); | ||
149 | +my $buckets= buckets(\%h2); | ||
150 | $h2{$_}++ for @keys; | ||
151 | +$h2{$counter++}++ while buckets(\%h2) == $buckets; # force a split | ||
152 | ok (Internals::HvREHASH(%h2), | ||
153 | - scalar(@keys) . " colliding into the same bucket keys are triggering rehash"); | ||
154 | + scalar(@keys) . " colliding into the same bucket keys are triggering rehash after split"); | ||
155 | + | ||
156 | +# returns the number of buckets in a hash | ||
157 | +sub buckets { | ||
158 | + my $hr = shift; | ||
159 | + my $keys_buckets= scalar(%$hr); | ||
160 | + if ($keys_buckets=~m!/([0-9]+)\z!) { | ||
161 | + return 0+$1; | ||
162 | + } else { | ||
163 | + return 8; | ||
164 | + } | ||
165 | +} | ||
166 | |||
167 | sub get_keys { | ||
168 | my $hr = shift; | ||
169 | |||
170 | # the minimum of bits required to mount the attack on a hash | ||
171 | my $min_bits = log(THRESHOLD)/log(2); | ||
172 | - | ||
173 | # if the hash has already been populated with a significant amount | ||
174 | # of entries the number of mask bits can be higher | ||
175 | my $keys = scalar keys %$hr; | ||
176 | -- | ||
177 | 1.8.3.1 | ||
178 | |||