From: Michihiro NAKAJIMA Date: Fri, 2 Nov 2012 06:21:48 +0000 (+0900) Subject: Cancel HFS+ compression if the compressed size is not enouph small. X-Git-Tag: v3.1.0~39^2~22 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8f7fbd41dd55a2b0ab4808ba3854c1585166c8b9;p=thirdparty%2Flibarchive.git Cancel HFS+ compression if the compressed size is not enouph small. --- diff --git a/libarchive/archive_write_disk_posix.c b/libarchive/archive_write_disk_posix.c index a522d9c8a..90c82c51a 100644 --- a/libarchive/archive_write_disk_posix.c +++ b/libarchive/archive_write_disk_posix.c @@ -322,6 +322,7 @@ struct archive_write_disk { #define DECMPFS_UNCOMPRESSED_SIZE 8 #define DECMPFS_HEADER_SIZE 16 +#define HFS_BLOCKS(s) ((s) >> 12) static int check_symlinks(struct archive_write_disk *); static int create_filesystem_object(struct archive_write_disk *); @@ -1033,6 +1034,67 @@ hfs_reset_compressor(struct archive_write_disk *a) return (ARCHIVE_OK); } +static int +hfs_decompress(struct archive_write_disk *a) +{ + uint32_t *block_info; + unsigned int block_count; + uint32_t data_pos, data_size; + ssize_t r; + ssize_t bytes_written, bytes_to_write; + unsigned char *b; + + block_info = (uint32_t *)(a->resource_fork + RSRC_H_SIZE); + block_count = archive_le32dec(block_info++); + while (block_count--) { + data_pos = RSRC_H_SIZE + archive_le32dec(block_info++); + data_size = archive_le32dec(block_info++); + r = fgetxattr(a->fd, XATTR_RESOURCEFORK_NAME, + a->compressed_buffer, data_size, data_pos, 0); + if (r != data_size) { + archive_set_error(&a->archive, + (r < 0)?errno:ARCHIVE_ERRNO_MISC, + "Failed to read resource fork"); + return (ARCHIVE_WARN); + } + if (a->compressed_buffer[0] == 0xff) { + bytes_to_write = data_size -1; + b = a->compressed_buffer + 1; + } else { + uLong dest_len = MAX_DECMPFS_BLOCK_SIZE; + int zr; + + zr = uncompress((Bytef *)a->uncompressed_buffer, + &dest_len, a->compressed_buffer, data_size); + if (zr != Z_OK) { + archive_set_error(&a->archive, + ARCHIVE_ERRNO_MISC, + "Failed to decompress resource fork"); + return (ARCHIVE_WARN); + } + bytes_to_write = dest_len; + b = (unsigned char *)a->uncompressed_buffer; + } + do { + bytes_written = write(a->fd, b, bytes_to_write); + if (bytes_written < 0) { + archive_set_error(&a->archive, errno, + "Write failed"); + return (ARCHIVE_WARN); + } + bytes_to_write -= bytes_written; + b += bytes_written; + } while (bytes_to_write > 0); + } + r = fremovexattr(a->fd, XATTR_RESOURCEFORK_NAME, 0); + if (r == -1) { + archive_set_error(&a->archive, errno, + "Failed to remove resource fork"); + return (ARCHIVE_WARN); + } + return (ARCHIVE_OK); +} + static int hfs_drive_compressor(struct archive_write_disk *a, const char *buff, size_t size) @@ -1155,6 +1217,7 @@ fprintf(stderr, "block %u bytes --> %u bytes in resource fork\n", (unsigned)size */ if (a->file_remaining_bytes == 0) { size_t rsrc_size; + int64_t bk; /* Append the resource footer. */ rsrc_size = hfs_set_resource_fork_footer( @@ -1162,6 +1225,15 @@ fprintf(stderr, "block %u bytes --> %u bytes in resource fork\n", (unsigned)size a->compressed_buffer_remaining); ret = hfs_write_compressed_data(a, bytes_used + rsrc_size); a->compressed_buffer_remaining = a->compressed_buffer_size; + + /* If the compressed size is not enouph smaller than + * the uncompressed size. cancel HFS+ compression. + * TODO: study a behavior of ditto utility and improve + * the condition to fall back into no HFS+ compression. */ + bk = HFS_BLOCKS(a->compressed_rsrc_position); + bk += bk >> 7; + if (bk > HFS_BLOCKS(a->filesize)) + return hfs_decompress(a); /* * Write the resourcefork header. */