summaryrefslogtreecommitdiffstats
path: root/recipes-core/zlib/files/CVE-2018-25032-fuzz-fixed.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes-core/zlib/files/CVE-2018-25032-fuzz-fixed.patch')
-rw-r--r--recipes-core/zlib/files/CVE-2018-25032-fuzz-fixed.patch351
1 files changed, 351 insertions, 0 deletions
diff --git a/recipes-core/zlib/files/CVE-2018-25032-fuzz-fixed.patch b/recipes-core/zlib/files/CVE-2018-25032-fuzz-fixed.patch
new file mode 100644
index 00000000..778b3b9a
--- /dev/null
+++ b/recipes-core/zlib/files/CVE-2018-25032-fuzz-fixed.patch
@@ -0,0 +1,351 @@
1From c3970bb352f4f8cdb27d31e78c558bd1eb332f48 Mon Sep 17 00:00:00 2001
2From: Mark Adler <madler@alumni.caltech.edu>
3Date: Tue, 17 Apr 2018 22:09:22 -0700
4Subject: [PATCH] Fix a bug that can crash deflate on some input when using
5 Z_FIXED.
6
7This bug was reported by Danilo Ramos of Eideticom, Inc. It has
8lain in wait 13 years before being found! The bug was introduced
9in zlib 1.2.2.2, with the addition of the Z_FIXED option. That
10option forces the use of fixed Huffman codes. For rare inputs with
11a large number of distant matches, the pending buffer into which
12the compressed data is written can overwrite the distance symbol
13table which it overlays. That results in corrupted output due to
14invalid distances, and can result in out-of-bound accesses,
15crashing the application.
16
17The fix here combines the distance buffer and literal/length
18buffers into a single symbol buffer. Now three bytes of pending
19buffer space are opened up for each literal or length/distance
20pair consumed, instead of the previous two bytes. This assures
21that the pending buffer cannot overwrite the symbol table, since
22the maximum fixed code compressed length/distance is 31 bits, and
23since there are four bytes of pending space for every three bytes
24of symbol space.
25
26Upstream-Status: Backport
27[https://github.com/madler/zlib/commit/5c44459c3b28a9bd3283aaceab7c615f8020c531]
28CVE: CVE-2018-25032
29Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
30---
31 deflate.c | 74 ++++++++++++++++++++++++++++++++++++++++---------------
32 deflate.h | 25 +++++++++----------
33 trees.c | 50 +++++++++++--------------------------
34 3 files changed, 79 insertions(+), 70 deletions(-)
35
36diff --git a/deflate.c b/deflate.c
37index b5e7849..2730035 100644
38--- a/deflate.c
39+++ b/deflate.c
40@@ -237,11 +237,6 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
41 int wrap = 1;
42 static const char my_version[] = ZLIB_VERSION;
43
44- ushf *overlay;
45- /* We overlay pending_buf and d_buf+l_buf. This works since the average
46- * output size for (length,distance) codes is <= 24 bits.
47- */
48-
49 if (version == Z_NULL || version[0] != my_version[0] ||
50 stream_size != sizeof(z_stream)) {
51 return Z_VERSION_ERROR;
52@@ -324,9 +319,47 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
53
54 s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
55
56- overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
57- s->pending_buf = (uchf *) overlay;
58- s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
59+ /* We overlay pending_buf and sym_buf. This works since the average size
60+ * for length/distance pairs over any compressed block is assured to be 31
61+ * bits or less.
62+ *
63+ * Analysis: The longest fixed codes are a length code of 8 bits plus 5
64+ * extra bits, for lengths 131 to 257. The longest fixed distance codes are
65+ * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest
66+ * possible fixed-codes length/distance pair is then 31 bits total.
67+ *
68+ * sym_buf starts one-fourth of the way into pending_buf. So there are
69+ * three bytes in sym_buf for every four bytes in pending_buf. Each symbol
70+ * in sym_buf is three bytes -- two for the distance and one for the
71+ * literal/length. As each symbol is consumed, the pointer to the next
72+ * sym_buf value to read moves forward three bytes. From that symbol, up to
73+ * 31 bits are written to pending_buf. The closest the written pending_buf
74+ * bits gets to the next sym_buf symbol to read is just before the last
75+ * code is written. At that time, 31*(n-2) bits have been written, just
76+ * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at
77+ * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1
78+ * symbols are written.) The closest the writing gets to what is unread is
79+ * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and
80+ * can range from 128 to 32768.
81+ *
82+ * Therefore, at a minimum, there are 142 bits of space between what is
83+ * written and what is read in the overlain buffers, so the symbols cannot
84+ * be overwritten by the compressed data. That space is actually 139 bits,
85+ * due to the three-bit fixed-code block header.
86+ *
87+ * That covers the case where either Z_FIXED is specified, forcing fixed
88+ * codes, or when the use of fixed codes is chosen, because that choice
89+ * results in a smaller compressed block than dynamic codes. That latter
90+ * condition then assures that the above analysis also covers all dynamic
91+ * blocks. A dynamic-code block will only be chosen to be emitted if it has
92+ * fewer bits than a fixed-code block would for the same set of symbols.
93+ * Therefore its average symbol length is assured to be less than 31. So
94+ * the compressed data for a dynamic block also cannot overwrite the
95+ * symbols from which it is being constructed.
96+ */
97+
98+ s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, 4);
99+ s->pending_buf_size = (ulg)s->lit_bufsize * 4;
100
101 if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
102 s->pending_buf == Z_NULL) {
103@@ -335,8 +368,12 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
104 deflateEnd (strm);
105 return Z_MEM_ERROR;
106 }
107- s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
108- s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
109+ s->sym_buf = s->pending_buf + s->lit_bufsize;
110+ s->sym_end = (s->lit_bufsize - 1) * 3;
111+ /* We avoid equality with lit_bufsize*3 because of wraparound at 64K
112+ * on 16 bit machines and because stored blocks are restricted to
113+ * 64K-1 bytes.
114+ */
115
116 s->level = level;
117 s->strategy = strategy;
118@@ -553,7 +590,7 @@ int ZEXPORT deflatePrime (strm, bits, value)
119
120 if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
121 s = strm->state;
122- if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3))
123+ if (s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3))
124 return Z_BUF_ERROR;
125 do {
126 put = Buf_size - s->bi_valid;
127@@ -1125,7 +1162,6 @@ int ZEXPORT deflateCopy (dest, source)
128 #else
129 deflate_state *ds;
130 deflate_state *ss;
131- ushf *overlay;
132
133
134 if (deflateStateCheck(source) || dest == Z_NULL) {
135@@ -1145,8 +1181,7 @@ int ZEXPORT deflateCopy (dest, source)
136 ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
137 ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
138 ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
139- overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
140- ds->pending_buf = (uchf *) overlay;
141+ ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, 4);
142
143 if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
144 ds->pending_buf == Z_NULL) {
145@@ -1160,8 +1195,7 @@ int ZEXPORT deflateCopy (dest, source)
146 zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
147
148 ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
149- ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
150- ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
151+ ds->sym_buf = ds->pending_buf + ds->lit_bufsize;
152
153 ds->l_desc.dyn_tree = ds->dyn_ltree;
154 ds->d_desc.dyn_tree = ds->dyn_dtree;
155@@ -1717,7 +1751,7 @@ local block_state deflate_fast(s, flush)
156 FLUSH_BLOCK(s, 1);
157 return finish_done;
158 }
159- if (s->last_lit)
160+ if (s->sym_next)
161 FLUSH_BLOCK(s, 0);
162 return block_done;
163 }
164@@ -1848,7 +1882,7 @@ local block_state deflate_slow(s, flush)
165 FLUSH_BLOCK(s, 1);
166 return finish_done;
167 }
168- if (s->last_lit)
169+ if (s->sym_next)
170 FLUSH_BLOCK(s, 0);
171 return block_done;
172 }
173@@ -1923,7 +1957,7 @@ local block_state deflate_rle(s, flush)
174 FLUSH_BLOCK(s, 1);
175 return finish_done;
176 }
177- if (s->last_lit)
178+ if (s->sym_next)
179 FLUSH_BLOCK(s, 0);
180 return block_done;
181 }
182@@ -1962,7 +1996,7 @@ local block_state deflate_huff(s, flush)
183 FLUSH_BLOCK(s, 1);
184 return finish_done;
185 }
186- if (s->last_lit)
187+ if (s->sym_next)
188 FLUSH_BLOCK(s, 0);
189 return block_done;
190 }
191diff --git a/deflate.h b/deflate.h
192index 7f31b54..8d84f8e 100644
193--- a/deflate.h
194+++ b/deflate.h
195@@ -230,7 +230,7 @@ typedef struct internal_state {
196 /* Depth of each subtree used as tie breaker for trees of equal frequency
197 */
198
199- uchf *l_buf; /* buffer for literals or lengths */
200+ uchf *sym_buf; /* buffer for distances and literals/lengths */
201
202 uInt lit_bufsize;
203 /* Size of match buffer for literals/lengths. There are 4 reasons for
204@@ -252,13 +252,8 @@ typedef struct internal_state {
205 * - I can't count above 4
206 */
207
208- uInt last_lit; /* running index in l_buf */
209-
210- ushf *d_buf;
211- /* Buffer for distances. To simplify the code, d_buf and l_buf have
212- * the same number of elements. To use different lengths, an extra flag
213- * array would be necessary.
214- */
215+ uInt sym_next; /* running index in sym_buf */
216+ uInt sym_end; /* symbol table full when sym_next reaches this */
217
218 ulg opt_len; /* bit length of current block with optimal trees */
219 ulg static_len; /* bit length of current block with static trees */
220@@ -344,20 +339,22 @@ void ZLIB_INTERNAL bi_windup OF((deflate_state *s));
221
222 # define _tr_tally_lit(s, c, flush) \
223 { uch cc = (c); \
224- s->d_buf[s->last_lit] = 0; \
225- s->l_buf[s->last_lit++] = cc; \
226+ s->sym_buf[s->sym_next++] = 0; \
227+ s->sym_buf[s->sym_next++] = 0; \
228+ s->sym_buf[s->sym_next++] = cc; \
229 s->dyn_ltree[cc].Freq++; \
230- flush = (s->last_lit == s->lit_bufsize-1); \
231+ flush = (s->sym_next == s->sym_end); \
232 }
233 # define _tr_tally_dist(s, distance, length, flush) \
234 { uch len = (uch)(length); \
235 ush dist = (ush)(distance); \
236- s->d_buf[s->last_lit] = dist; \
237- s->l_buf[s->last_lit++] = len; \
238+ s->sym_buf[s->sym_next++] = dist; \
239+ s->sym_buf[s->sym_next++] = dist >> 8; \
240+ s->sym_buf[s->sym_next++] = len; \
241 dist--; \
242 s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \
243 s->dyn_dtree[d_code(dist)].Freq++; \
244- flush = (s->last_lit == s->lit_bufsize-1); \
245+ flush = (s->sym_next == s->sym_end); \
246 }
247 #else
248 # define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c)
249diff --git a/trees.c b/trees.c
250index 7480efc..2fc7860 100644
251--- a/trees.c
252+++ b/trees.c
253@@ -343,7 +343,7 @@ local void init_block(s)
254
255 s->dyn_ltree[END_BLOCK].Freq = 1;
256 s->opt_len = s->static_len = 0L;
257- s->last_lit = s->matches = 0;
258+ s->sym_next = s->matches = 0;
259 }
260
261 #define SMALLEST 1
262@@ -875,7 +875,7 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last)
263
264 Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
265 opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
266- s->last_lit));
267+ s->sym_next / 3));
268
269 if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
270
271@@ -944,8 +944,9 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc)
272 unsigned dist; /* distance of matched string */
273 unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
274 {
275- s->d_buf[s->last_lit] = (ush)dist;
276- s->l_buf[s->last_lit++] = (uch)lc;
277+ s->sym_buf[s->sym_next++] = dist;
278+ s->sym_buf[s->sym_next++] = dist >> 8;
279+ s->sym_buf[s->sym_next++] = lc;
280 if (dist == 0) {
281 /* lc is the unmatched char */
282 s->dyn_ltree[lc].Freq++;
283@@ -960,30 +961,7 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc)
284 s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++;
285 s->dyn_dtree[d_code(dist)].Freq++;
286 }
287-
288-#ifdef TRUNCATE_BLOCK
289- /* Try to guess if it is profitable to stop the current block here */
290- if ((s->last_lit & 0x1fff) == 0 && s->level > 2) {
291- /* Compute an upper bound for the compressed length */
292- ulg out_length = (ulg)s->last_lit*8L;
293- ulg in_length = (ulg)((long)s->strstart - s->block_start);
294- int dcode;
295- for (dcode = 0; dcode < D_CODES; dcode++) {
296- out_length += (ulg)s->dyn_dtree[dcode].Freq *
297- (5L+extra_dbits[dcode]);
298- }
299- out_length >>= 3;
300- Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
301- s->last_lit, in_length, out_length,
302- 100L - out_length*100L/in_length));
303- if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
304- }
305-#endif
306- return (s->last_lit == s->lit_bufsize-1);
307- /* We avoid equality with lit_bufsize because of wraparound at 64K
308- * on 16 bit machines and because stored blocks are restricted to
309- * 64K-1 bytes.
310- */
311+ return (s->sym_next == s->sym_end);
312 }
313
314 /* ===========================================================================
315@@ -996,13 +974,14 @@ local void compress_block(s, ltree, dtree)
316 {
317 unsigned dist; /* distance of matched string */
318 int lc; /* match length or unmatched char (if dist == 0) */
319- unsigned lx = 0; /* running index in l_buf */
320+ unsigned sx = 0; /* running index in sym_buf */
321 unsigned code; /* the code to send */
322 int extra; /* number of extra bits to send */
323
324- if (s->last_lit != 0) do {
325- dist = s->d_buf[lx];
326- lc = s->l_buf[lx++];
327+ if (s->sym_next != 0) do {
328+ dist = s->sym_buf[sx++] & 0xff;
329+ dist += (unsigned)(s->sym_buf[sx++] & 0xff) << 8;
330+ lc = s->sym_buf[sx++];
331 if (dist == 0) {
332 send_code(s, lc, ltree); /* send a literal byte */
333 Tracecv(isgraph(lc), (stderr," '%c' ", lc));
334@@ -1027,11 +1006,10 @@ local void compress_block(s, ltree, dtree)
335 }
336 } /* literal or match pair ? */
337
338- /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
339- Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx,
340- "pendingBuf overflow");
341+ /* Check that the overlay between pending_buf and sym_buf is ok: */
342+ Assert(s->pending < s->lit_bufsize + sx, "pendingBuf overflow");
343
344- } while (lx < s->last_lit);
345+ } while (sx < s->sym_next);
346
347 send_code(s, END_BLOCK, ltree);
348 }
349--
3502.35.1
351