# if decompressed data is different from the input data, choke.
self.assertEqual(expanded, data, "17K random source doesn't match")
+ def test_empty_flush(self):
+ # Test that calling .flush() on unused objects works.
+ # (Bug #1083110 -- calling .flush() on decompress objects
+ # caused a core dump.)
+
+ co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
+ self.failUnless(co.flush()) # Returns a zlib header
+ dco = zlib.decompressobj()
+ self.assertEqual(dco.flush(), "") # Returns nothing
+
def genblock(seed, length, step=1024, generator=random):
"""length-byte stream of random data from a seed (in step-byte blocks)."""
once when a size argument is given. This prevents a buffer overflow in the
tokenizer with very long source lines.
+- Bug #1083110: ``zlib.decompress.flush()`` would segfault if called immediately
+ after creating the object, without any intervening ``.decompress()`` calls.
+
Build
-----
return(NULL);
self->zst.zalloc = (alloc_func)NULL;
self->zst.zfree = (free_func)Z_NULL;
+ self->zst.next_in = NULL;
+ self->zst.avail_in = 0;
err = deflateInit2(&self->zst, level, method, wbits, memLevel, strategy);
switch(err) {
case (Z_OK):
return(NULL);
self->zst.zalloc = (alloc_func)NULL;
self->zst.zfree = (free_func)Z_NULL;
+ self->zst.next_in = NULL;
+ self->zst.avail_in = 0;
err = inflateInit2(&self->zst, wbits);
switch(err) {
case (Z_OK):
Py_END_ALLOW_THREADS
}
- /* Not all of the compressed data could be accomodated in the output buffer
+ /* Not all of the compressed data could be accommodated in the output buffer
of specified size. Return the unconsumed tail in an attribute.*/
if(max_length) {
Py_DECREF(self->unconsumed_tail);