Initial commit

This commit is contained in:
domenico
2025-06-24 13:14:22 +02:00
commit 4002f145fc
9002 changed files with 1731834 additions and 0 deletions

View File

@@ -0,0 +1,253 @@
From d2f06cd65d7ac39c6dd6761eef162abc946b155b Mon Sep 17 00:00:00 2001
From: Adenilson Cavalcanti <adenilson.cavalcanti@arm.com>
Date: Tue, 11 Apr 2017 17:13:02 -0700
Subject: [PATCH] NEON implementation for Adler32
The checksum is calculated in the uncompressed PNG data
and can be made much faster by using SIMD.
Tests in ARMv8 yielded an improvement of about 3x
(e.g. walltime was 350ms x 125ms for a 4096x4096 bytes
executed 30 times). That results in at least 18% improvement
in image decoding in Chromium.
Further details at:
https://bugs.chromium.org/p/chromium/issues/detail?id=688601
---
CMakeLists.txt | 29 +++++++---
adler32.c | 5 ++
contrib/README.contrib | 3 +
contrib/arm/neon_adler32.c | 137 +++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 166 insertions(+), 8 deletions(-)
create mode 100644 contrib/arm/neon_adler32.c
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0fe939df..8e75f664 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -7,6 +7,7 @@ set(VERSION "1.2.11")
option(ASM686 "Enable building i686 assembly implementation")
option(AMD64 "Enable building amd64 assembly implementation")
+option(ARMv8 "Enable building ARM NEON intrinsics implementation")
set(INSTALL_BIN_DIR "${CMAKE_INSTALL_PREFIX}/bin" CACHE PATH "Installation directory for executables")
set(INSTALL_LIB_DIR "${CMAKE_INSTALL_PREFIX}/lib" CACHE PATH "Installation directory for libraries")
@@ -132,14 +133,26 @@ endif()
if(CMAKE_COMPILER_IS_GNUCC)
if(ASM686)
set(ZLIB_ASMS contrib/asm686/match.S)
- elseif (AMD64)
+ elseif(AMD64)
set(ZLIB_ASMS contrib/amd64/amd64-match.S)
- endif ()
+ elseif(ARMv8)
+ set(ZLIB_ARMv8 contrib/arm/neon_adler32.c)
+ endif()
- if(ZLIB_ASMS)
- add_definitions(-DASMV)
- set_source_files_properties(${ZLIB_ASMS} PROPERTIES LANGUAGE C COMPILE_FLAGS -DNO_UNDERLINE)
- endif()
+ if(ZLIB_ASMS)
+ add_definitions(-DASMV)
+ set_source_files_properties(${ZLIB_ASMS} PROPERTIES LANGUAGE C COMPILE_FLAGS -DNO_UNDERLINE)
+ elseif(ZLIB_ARMv8)
+ add_definitions(-DARMv8)
+ set(COMPILER ${CMAKE_C_COMPILER})
+ # NEON is mandatory in ARMv8.
+ if(${COMPILER} MATCHES "aarch64")
+ set_source_files_properties(${ZLIB_ARMv8} PROPERTIES LANGUAGE C COMPILE_FLAGS -march=armv8-a)
+ # But it was optional for ARMv7.
+ elseif(${COMPILER} MATCHES "arm")
+ set_source_files_properties(${ZLIB_ARMv8} PROPERTIES LANGUAGE C COMPILE_FLAGS -mfpu=neon)
+ endif()
+ endif()
endif()
if(MSVC)
@@ -183,8 +196,8 @@ if(MINGW)
set(ZLIB_DLL_SRCS ${CMAKE_CURRENT_BINARY_DIR}/zlib1rc.obj)
endif(MINGW)
-add_library(zlib SHARED ${ZLIB_SRCS} ${ZLIB_ASMS} ${ZLIB_DLL_SRCS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS})
-add_library(zlibstatic STATIC ${ZLIB_SRCS} ${ZLIB_ASMS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS})
+add_library(zlib SHARED ${ZLIB_SRCS} ${ZLIB_ASMS} ${ZLIB_ARMv8} ${ZLIB_DLL_SRCS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS})
+add_library(zlibstatic STATIC ${ZLIB_SRCS} ${ZLIB_ASMS} ${ZLIB_ARMv8} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS})
set_target_properties(zlib PROPERTIES DEFINE_SYMBOL ZLIB_DLL)
set_target_properties(zlib PROPERTIES SOVERSION 1)
diff --git a/adler32.c b/adler32.c
index d0be4380..45ebaa4b 100644
--- a/adler32.c
+++ b/adler32.c
@@ -136,7 +136,12 @@ uLong ZEXPORT adler32(adler, buf, len)
const Bytef *buf;
uInt len;
{
+#ifdef ARMv8
+# pragma message("Using NEON-ized Adler32.")
+ return NEON_adler32(adler, buf, len);
+#else
return adler32_z(adler, buf, len);
+#endif
}
/* ========================================================================= */
diff --git a/contrib/README.contrib b/contrib/README.contrib
index a411d5c3..3fd1d202 100644
--- a/contrib/README.contrib
+++ b/contrib/README.contrib
@@ -12,6 +12,9 @@ amd64/ by Mikhail Teterin <mi@ALDAN.algebra.com>
asm code for AMD64
See patch at http://www.freebsd.org/cgi/query-pr.cgi?pr=bin/96393
+arm/ by Adenilson Cavalcanti <cavalcantii@chromium.org>
+ ARM optimizations (NEON and ARMv8 code).
+
asm686/ by Brian Raiter <breadbox@muppetlabs.com>
asm code for Pentium and PPro/PII, using the AT&T (GNU as) syntax
See http://www.muppetlabs.com/~breadbox/software/assembly.html
diff --git a/contrib/arm/neon_adler32.c b/contrib/arm/neon_adler32.c
new file mode 100644
index 00000000..f173a74f
--- /dev/null
+++ b/contrib/arm/neon_adler32.c
@@ -0,0 +1,137 @@
+/* Copyright (C) 1995-2011, 2016 Mark Adler
+ * Copyright (C) 2017 ARM Holdings Inc.
+ * Authors: Adenilson Cavalcanti <adenilson.cavalcanti@arm.com>
+ * Simon Hosie <simon.hosie@arm.com>
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+#if (defined(__ARM_NEON__) || defined(__ARM_NEON))
+#include <arm_neon.h>
+
+static void NEON_accum32(uint32_t *s, const unsigned char *buf,
+ unsigned int len)
+{
+ static const uint8_t taps[32] = {
+ 32, 31, 30, 29, 28, 27, 26, 25,
+ 24, 23, 22, 21, 20, 19, 18, 17,
+ 16, 15, 14, 13, 12, 11, 10, 9,
+ 8, 7, 6, 5, 4, 3, 2, 1 };
+
+ uint32x2_t adacc2, s2acc2, as;
+ uint8x16_t t0 = vld1q_u8(taps), t1 = vld1q_u8(taps + 16);
+
+ uint32x4_t adacc = vdupq_n_u32(0), s2acc = vdupq_n_u32(0);
+ adacc = vsetq_lane_u32(s[0], adacc, 0);
+ s2acc = vsetq_lane_u32(s[1], s2acc, 0);
+
+ while (len >= 2) {
+ uint8x16_t d0 = vld1q_u8(buf), d1 = vld1q_u8(buf + 16);
+ uint16x8_t adler, sum2;
+ s2acc = vaddq_u32(s2acc, vshlq_n_u32(adacc, 5));
+ adler = vpaddlq_u8( d0);
+ adler = vpadalq_u8(adler, d1);
+ sum2 = vmull_u8( vget_low_u8(t0), vget_low_u8(d0));
+ sum2 = vmlal_u8(sum2, vget_high_u8(t0), vget_high_u8(d0));
+ sum2 = vmlal_u8(sum2, vget_low_u8(t1), vget_low_u8(d1));
+ sum2 = vmlal_u8(sum2, vget_high_u8(t1), vget_high_u8(d1));
+ adacc = vpadalq_u16(adacc, adler);
+ s2acc = vpadalq_u16(s2acc, sum2);
+ len -= 2;
+ buf += 32;
+ }
+
+ while (len > 0) {
+ uint8x16_t d0 = vld1q_u8(buf);
+ uint16x8_t adler, sum2;
+ s2acc = vaddq_u32(s2acc, vshlq_n_u32(adacc, 4));
+ adler = vpaddlq_u8(d0);
+ sum2 = vmull_u8( vget_low_u8(t1), vget_low_u8(d0));
+ sum2 = vmlal_u8(sum2, vget_high_u8(t1), vget_high_u8(d0));
+ adacc = vpadalq_u16(adacc, adler);
+ s2acc = vpadalq_u16(s2acc, sum2);
+ buf += 16;
+ len--;
+ }
+
+ adacc2 = vpadd_u32(vget_low_u32(adacc), vget_high_u32(adacc));
+ s2acc2 = vpadd_u32(vget_low_u32(s2acc), vget_high_u32(s2acc));
+ as = vpadd_u32(adacc2, s2acc2);
+ s[0] = vget_lane_u32(as, 0);
+ s[1] = vget_lane_u32(as, 1);
+}
+
+static void NEON_handle_tail(uint32_t *pair, const unsigned char *buf,
+ unsigned int len)
+{
+ /* Oldie K&R code integration. */
+ unsigned int i;
+ for (i = 0; i < len; ++i) {
+ pair[0] += buf[i];
+ pair[1] += pair[0];
+ }
+}
+
+extern unsigned long NEON_adler32(unsigned long adler, const unsigned char *buf,
+ const unsigned int len)
+{
+ /* initial Adler-32 value (deferred check for len == 1 speed) */
+ if (!buf)
+ return 1L;
+
+ /* The largest prime smaller than 65536. */
+ const uint32_t M_BASE = 65521;
+ /* This is the threshold where doing accumulation may overflow. */
+ const int M_NMAX = 5552;
+
+ unsigned long sum2;
+ uint32_t pair[2];
+ int n = M_NMAX;
+ unsigned int done = 0;
+ /* Oldie K&R code integration. */
+ unsigned int i;
+
+ /* Split Adler-32 into component sums, it can be supplied by
+ * the caller sites (e.g. in a PNG file).
+ */
+ sum2 = (adler >> 16) & 0xffff;
+ adler &= 0xffff;
+ pair[0] = adler;
+ pair[1] = sum2;
+
+ for (i = 0; i < len; i += n) {
+ if ((i + n) > len)
+ n = len - i;
+
+ if (n < 16)
+ break;
+
+ NEON_accum32(pair, buf + i, n / 16);
+ pair[0] %= M_BASE;
+ pair[1] %= M_BASE;
+
+ done += (n / 16) * 16;
+ }
+
+ /* Handle the tail elements. */
+ if (done < len) {
+ NEON_handle_tail(pair, (buf + done), len - done);
+ pair[0] %= M_BASE;
+ pair[1] %= M_BASE;
+ }
+
+ /* D = B * 65536 + A, see: https://en.wikipedia.org/wiki/Adler-32. */
+ return (pair[1] << 16) | pair[0];
+}
+#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,501 @@
From 247147654fe5cd11cf15d8dff91440405ea57040 Mon Sep 17 00:00:00 2001
From: Simon Hosie <simon.hosie@arm.com>
Date: Wed, 12 Apr 2017 15:44:21 -0700
Subject: [PATCH 2/2] Inflate using wider loads and stores
In inflate_fast() the output pointer always has plenty of room to write. This
means that so long as the target is capable, wide un-aligned loads and stores
can be used to transfer several bytes at once. When the reference distance is
too short simply unroll the data a little to increase the distance.
Change-Id: I59854eb25d2b1e43561c8a2afaf9175bf10cf674
---
contrib/arm/chunkcopy.h | 279 ++++++++++++++++++++++++++++++++++++++++++++++++
contrib/arm/inffast.c | 96 +++++++----------
contrib/arm/inflate.c | 22 ++--
3 files changed, 335 insertions(+), 62 deletions(-)
create mode 100644 contrib/arm/chunkcopy.h
diff --git a/contrib/arm/chunkcopy.h b/contrib/arm/chunkcopy.h
new file mode 100644
index 00000000..2d6fd6f9
--- /dev/null
+++ b/contrib/arm/chunkcopy.h
@@ -0,0 +1,279 @@
+/* chunkcopy.h -- fast copies and sets
+ * Copyright (C) 2017 ARM, Inc.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#ifndef CHUNKCOPY_H
+#define CHUNKCOPY_H
+
+#include "zutil.h"
+#include <arm_neon.h>
+
+#if __STDC_VERSION__ >= 199901L
+#define Z_RESTRICT restrict
+#else
+#define Z_RESTRICT
+#endif
+
+typedef uint8x16_t chunkcopy_chunk_t;
+#define CHUNKCOPY_CHUNK_SIZE sizeof(chunkcopy_chunk_t)
+
+/*
+ Ask the compiler to perform a wide, unaligned load with an machine
+ instruction appropriate for the chunkcopy_chunk_t type.
+ */
+static inline chunkcopy_chunk_t loadchunk(const unsigned char FAR *s) {
+ chunkcopy_chunk_t c;
+ __builtin_memcpy(&c, s, sizeof(c));
+ return c;
+}
+
+/*
+ Ask the compiler to perform a wide, unaligned store with an machine
+ instruction appropriate for the chunkcopy_chunk_t type.
+ */
+static inline void storechunk(unsigned char FAR *d, chunkcopy_chunk_t c) {
+ __builtin_memcpy(d, &c, sizeof(c));
+}
+
+/*
+ Perform a memcpy-like operation, but assume that length is non-zero and that
+ it's OK to overwrite at least CHUNKCOPY_CHUNK_SIZE bytes of output even if
+ the length is shorter than this.
+
+ It also guarantees that it will properly unroll the data if the distance
+ between `out` and `from` is at least CHUNKCOPY_CHUNK_SIZE, which we rely on
+ in chunkcopy_relaxed().
+
+ Aside from better memory bus utilisation, this means that short copies
+ (CHUNKCOPY_CHUNK_SIZE bytes or fewer) will fall straight through the loop
+ without iteration, which will hopefully make the branch prediction more
+ reliable.
+ */
+static inline unsigned char FAR *chunkcopy_core(unsigned char FAR *out,
+ const unsigned char FAR *from,
+ unsigned len) {
+ int bump = (--len % CHUNKCOPY_CHUNK_SIZE) + 1;
+ storechunk(out, loadchunk(from));
+ out += bump;
+ from += bump;
+ len /= CHUNKCOPY_CHUNK_SIZE;
+ while (len-- > 0) {
+ storechunk(out, loadchunk(from));
+ out += CHUNKCOPY_CHUNK_SIZE;
+ from += CHUNKCOPY_CHUNK_SIZE;
+ }
+ return out;
+}
+
+/*
+ Like chunkcopy_core, but avoid writing beyond of legal output.
+
+ Accepts an additional pointer to the end of safe output. A generic safe
+ copy would use (out + len), but it's normally the case that the end of the
+ output buffer is beyond the end of the current copy, and this can still be
+ exploited.
+ */
+static inline unsigned char FAR *chunkcopy_core_safe(unsigned char FAR *out,
+ const unsigned char FAR * from,
+ unsigned len,
+ unsigned char FAR *limit) {
+ Assert(out + len <= limit, "chunk copy exceeds safety limit");
+ if (limit - out < CHUNKCOPY_CHUNK_SIZE) {
+ const unsigned char FAR * Z_RESTRICT rfrom = from;
+ if (len & 8) { __builtin_memcpy(out, rfrom, 8); out += 8; rfrom += 8; }
+ if (len & 4) { __builtin_memcpy(out, rfrom, 4); out += 4; rfrom += 4; }
+ if (len & 2) { __builtin_memcpy(out, rfrom, 2); out += 2; rfrom += 2; }
+ if (len & 1) { *out++ = *rfrom++; }
+ return out;
+ }
+ return chunkcopy_core(out, from, len);
+}
+
+/*
+ Perform short copies until distance can be rewritten as being at least
+ CHUNKCOPY_CHUNK_SIZE.
+
+ This assumes that it's OK to overwrite at least the first
+ 2*CHUNKCOPY_CHUNK_SIZE bytes of output even if the copy is shorter than
+ this. This assumption holds within inflate_fast() which starts every
+ iteration with at least 258 bytes of output space available (258 being the
+ maximum length output from a single token; see inffast.c).
+ */
+static inline unsigned char FAR *chunkunroll_relaxed(unsigned char FAR *out,
+ unsigned FAR *dist,
+ unsigned FAR *len) {
+ const unsigned char FAR *from = out - *dist;
+ while (*dist < *len && *dist < CHUNKCOPY_CHUNK_SIZE) {
+ storechunk(out, loadchunk(from));
+ out += *dist;
+ *len -= *dist;
+ *dist += *dist;
+ }
+ return out;
+}
+
+
+static inline uint8x16_t chunkset_vld1q_dup_u8x8(const unsigned char FAR * Z_RESTRICT from) {
+#if defined(__clang__) || defined(__aarch64__)
+ return vreinterpretq_u8_u64(vld1q_dup_u64((void *)from));
+#else
+ /* 32-bit GCC uses an alignment hint for vld1q_dup_u64, even when given a
+ * void pointer, so here's an alternate implementation.
+ */
+ uint8x8_t h = vld1_u8(from);
+ return vcombine_u8(h, h);
+#endif
+}
+
+/*
+ Perform an overlapping copy which behaves as a memset() operation, but
+ supporting periods other than one, and assume that length is non-zero and
+ that it's OK to overwrite at least CHUNKCOPY_CHUNK_SIZE*3 bytes of output
+ even if the length is shorter than this.
+ */
+static inline unsigned char FAR *chunkset_core(unsigned char FAR *out,
+ unsigned period,
+ unsigned len) {
+ uint8x16_t f;
+ int bump = ((len - 1) % sizeof(f)) + 1;
+
+ switch (period) {
+ case 1:
+ f = vld1q_dup_u8(out - 1);
+ vst1q_u8(out, f);
+ out += bump;
+ len -= bump;
+ while (len > 0) {
+ vst1q_u8(out, f);
+ out += sizeof(f);
+ len -= sizeof(f);
+ }
+ return out;
+ case 2:
+ f = vreinterpretq_u8_u16(vld1q_dup_u16((void *)(out - 2)));
+ vst1q_u8(out, f);
+ out += bump;
+ len -= bump;
+ if (len > 0) {
+ f = vreinterpretq_u8_u16(vld1q_dup_u16((void *)(out - 2)));
+ do {
+ vst1q_u8(out, f);
+ out += sizeof(f);
+ len -= sizeof(f);
+ } while (len > 0);
+ }
+ return out;
+ case 4:
+ f = vreinterpretq_u8_u32(vld1q_dup_u32((void *)(out - 4)));
+ vst1q_u8(out, f);
+ out += bump;
+ len -= bump;
+ if (len > 0) {
+ f = vreinterpretq_u8_u32(vld1q_dup_u32((void *)(out - 4)));
+ do {
+ vst1q_u8(out, f);
+ out += sizeof(f);
+ len -= sizeof(f);
+ } while (len > 0);
+ }
+ return out;
+ case 8:
+ f = chunkset_vld1q_dup_u8x8(out - 8);
+ vst1q_u8(out, f);
+ out += bump;
+ len -= bump;
+ if (len > 0) {
+ f = chunkset_vld1q_dup_u8x8(out - 8);
+ do {
+ vst1q_u8(out, f);
+ out += sizeof(f);
+ len -= sizeof(f);
+ } while (len > 0);
+ }
+ return out;
+ }
+ out = chunkunroll_relaxed(out, &period, &len);
+ return chunkcopy_core(out, out - period, len);
+}
+
+/*
+ Perform a memcpy-like operation, but assume that length is non-zero and that
+ it's OK to overwrite at least CHUNKCOPY_CHUNK_SIZE bytes of output even if
+ the length is shorter than this.
+
+ Unlike chunkcopy_core() above, no guarantee is made regarding the behaviour
+ of overlapping buffers, regardless of the distance between the pointers.
+ This is reflected in the `restrict`-qualified pointers, allowing the
+ compiler to reorder loads and stores.
+ */
+static inline unsigned char FAR *chunkcopy_relaxed(unsigned char FAR * Z_RESTRICT out,
+ const unsigned char FAR * Z_RESTRICT from,
+ unsigned len) {
+ return chunkcopy_core(out, from, len);
+}
+
+/*
+ Like chunkcopy_relaxed, but avoid writing beyond of legal output.
+
+ Unlike chunkcopy_core_safe() above, no guarantee is made regarding the
+ behaviour of overlapping buffers, regardless of the distance between the
+ pointers. This is reflected in the `restrict`-qualified pointers, allowing
+ the compiler to reorder loads and stores.
+
+ Accepts an additional pointer to the end of safe output. A generic safe
+ copy would use (out + len), but it's normally the case that the end of the
+ output buffer is beyond the end of the current copy, and this can still be
+ exploited.
+ */
+static inline unsigned char FAR *chunkcopy_safe(unsigned char FAR *out,
+ const unsigned char FAR * Z_RESTRICT from,
+ unsigned len,
+ unsigned char FAR *limit) {
+ Assert(out + len <= limit, "chunk copy exceeds safety limit");
+ return chunkcopy_core_safe(out, from, len, limit);
+}
+
+/*
+ Perform chunky copy within the same buffer, where the source and destination
+ may potentially overlap.
+
+ Assumes that len > 0 on entry, and that it's safe to write at least
+ CHUNKCOPY_CHUNK_SIZE*3 bytes to the output.
+ */
+static inline unsigned char FAR *chunkcopy_lapped_relaxed(unsigned char FAR *out,
+ unsigned dist,
+ unsigned len) {
+ if (dist < len && dist < CHUNKCOPY_CHUNK_SIZE) {
+ return chunkset_core(out, dist, len);
+ }
+ return chunkcopy_core(out, out - dist, len);
+}
+
+/*
+ Behave like chunkcopy_lapped_relaxed, but avoid writing beyond of legal output.
+
+ Accepts an additional pointer to the end of safe output. A generic safe
+ copy would use (out + len), but it's normally the case that the end of the
+ output buffer is beyond the end of the current copy, and this can still be
+ exploited.
+ */
+static inline unsigned char FAR *chunkcopy_lapped_safe(unsigned char FAR *out,
+ unsigned dist,
+ unsigned len,
+ unsigned char FAR *limit) {
+ Assert(out + len <= limit, "chunk copy exceeds safety limit");
+ if (limit - out < CHUNKCOPY_CHUNK_SIZE * 3) {
+ /* TODO: try harder to optimise this */
+ while (len-- > 0) {
+ *out = *(out - dist);
+ out++;
+ }
+ return out;
+ }
+ return chunkcopy_lapped_relaxed(out, dist, len);
+}
+
+#undef Z_RESTRICT
+
+#endif /* CHUNKCOPY_H */
diff --git a/contrib/arm/inffast.c b/contrib/arm/inffast.c
index 0dbd1dbc..f7f50071 100644
--- a/contrib/arm/inffast.c
+++ b/contrib/arm/inffast.c
@@ -7,6 +7,7 @@
#include "inftrees.h"
#include "inflate.h"
#include "inffast.h"
+#include "chunkcopy.h"
#ifdef ASMINF
# pragma message("Assembler code may have bugs -- use at your own risk")
@@ -57,6 +58,7 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
unsigned char FAR *out; /* local strm->next_out */
unsigned char FAR *beg; /* inflate()'s initial strm->next_out */
unsigned char FAR *end; /* while out < end, enough space available */
+ unsigned char FAR *limit; /* safety limit for chunky copies */
#ifdef INFLATE_STRICT
unsigned dmax; /* maximum distance from zlib header */
#endif
@@ -84,12 +86,13 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
out = strm->next_out;
beg = out - (start - strm->avail_out);
end = out + (strm->avail_out - 257);
+ limit = out + strm->avail_out;
#ifdef INFLATE_STRICT
dmax = state->dmax;
#endif
wsize = state->wsize;
whave = state->whave;
- wnext = state->wnext;
+ wnext = (state->wnext == 0 && whave >= wsize) ? wsize : state->wnext;
window = state->window;
hold = state->hold;
bits = state->bits;
@@ -197,70 +200,51 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
#endif
}
from = window;
- if (wnext == 0) { /* very common case */
- from += wsize - op;
- if (op < len) { /* some from window */
- len -= op;
- do {
- *out++ = *from++;
- } while (--op);
- from = out - dist; /* rest from output */
- }
+ if (wnext >= op) { /* contiguous in window */
+ from += wnext - op;
}
- else if (wnext < op) { /* wrap around window */
- from += wsize + wnext - op;
+ else { /* wrap around window */
op -= wnext;
+ from += wsize - op;
if (op < len) { /* some from end of window */
len -= op;
- do {
- *out++ = *from++;
- } while (--op);
- from = window;
- if (wnext < len) { /* some from start of window */
- op = wnext;
- len -= op;
- do {
- *out++ = *from++;
- } while (--op);
- from = out - dist; /* rest from output */
- }
+ out = chunkcopy_safe(out, from, op, limit);
+ from = window; /* more from start of window */
+ op = wnext;
+ /* This (rare) case can create a situation where
+ the first chunkcopy below must be checked.
+ */
}
}
- else { /* contiguous in window */
- from += wnext - op;
- if (op < len) { /* some from window */
- len -= op;
- do {
- *out++ = *from++;
- } while (--op);
- from = out - dist; /* rest from output */
- }
- }
- while (len > 2) {
- *out++ = *from++;
- *out++ = *from++;
- *out++ = *from++;
- len -= 3;
- }
- if (len) {
- *out++ = *from++;
- if (len > 1)
- *out++ = *from++;
+ if (op < len) { /* still need some from output */
+ out = chunkcopy_safe(out, from, op, limit);
+ len -= op;
+ /* When dist is small the amount of data that can be
+ copied from the window is also small, and progress
+ towards the dangerous end of the output buffer is
+ also small. This means that for trivial memsets and
+ for chunkunroll_relaxed() a safety check is
+ unnecessary. However, these conditions may not be
+ entered at all, and in that case it's possible that
+ the main copy is near the end.
+ */
+ out = chunkunroll_relaxed(out, &dist, &len);
+ out = chunkcopy_safe(out, out - dist, len, limit);
+ } else {
+ /* from points to window, so there is no risk of
+ overlapping pointers requiring memset-like behaviour
+ */
+ out = chunkcopy_safe(out, from, len, limit);
}
}
else {
- from = out - dist; /* copy direct from output */
- do { /* minimum length is three */
- *out++ = *from++;
- *out++ = *from++;
- *out++ = *from++;
- len -= 3;
- } while (len > 2);
- if (len) {
- *out++ = *from++;
- if (len > 1)
- *out++ = *from++;
- }
+ /* Whole reference is in range of current output. No
+ range checks are necessary because we start with room
+ for at least 258 bytes of output, so unroll and roundoff
+ operations can write beyond `out+len` so long as they
+ stay within 258 bytes of `out`.
+ */
+ out = chunkcopy_lapped_relaxed(out, dist, len);
}
}
else if ((op & 64) == 0) { /* 2nd level distance code */
diff --git a/contrib/arm/inflate.c b/contrib/arm/inflate.c
index ac333e8c..e40322c3 100644
--- a/contrib/arm/inflate.c
+++ b/contrib/arm/inflate.c
@@ -84,6 +84,7 @@
#include "inftrees.h"
#include "inflate.h"
#include "inffast.h"
+#include "contrib/arm/chunkcopy.h"
#ifdef MAKEFIXED
# ifndef BUILDFIXED
@@ -405,10 +406,20 @@ unsigned copy;
/* if it hasn't been done already, allocate space for the window */
if (state->window == Z_NULL) {
+ unsigned wsize = 1U << state->wbits;
state->window = (unsigned char FAR *)
- ZALLOC(strm, 1U << state->wbits,
+ ZALLOC(strm, wsize + CHUNKCOPY_CHUNK_SIZE,
sizeof(unsigned char));
if (state->window == Z_NULL) return 1;
+#ifdef INFLATE_CLEAR_UNUSED_UNDEFINED
+ /* Copies from the overflow portion of this buffer are undefined and
+ may cause analysis tools to raise a warning if we don't initialize
+ it. However, this undefined data overwrites other undefined data
+ and is subsequently either overwritten or left deliberately
+ undefined at the end of decode; so there's really no point.
+ */
+ memset(state->window + wsize, 0, CHUNKCOPY_CHUNK_SIZE);
+#endif
}
/* if window not in use yet, initialize */
@@ -1175,17 +1186,16 @@ int flush;
else
from = state->window + (state->wnext - copy);
if (copy > state->length) copy = state->length;
+ if (copy > left) copy = left;
+ put = chunkcopy_safe(put, from, copy, put + left);
}
else { /* copy from output */
- from = put - state->offset;
copy = state->length;
+ if (copy > left) copy = left;
+ put = chunkcopy_lapped_safe(put, state->offset, copy, put + left);
}
- if (copy > left) copy = left;
left -= copy;
state->length -= copy;
- do {
- *put++ = *from++;
- } while (--copy);
if (state->length == 0) state->mode = LEN;
break;
case LIT:

View File

@@ -0,0 +1,100 @@
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8e75f66..24d7329 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -95,34 +95,67 @@ set(ZLIB_PUBLIC_HDRS
${CMAKE_CURRENT_BINARY_DIR}/zconf.h
zlib.h
)
-set(ZLIB_PRIVATE_HDRS
- crc32.h
- deflate.h
- gzguts.h
- inffast.h
- inffixed.h
- inflate.h
- inftrees.h
- trees.h
- zutil.h
-)
-set(ZLIB_SRCS
- adler32.c
- compress.c
- crc32.c
- deflate.c
- gzclose.c
- gzlib.c
- gzread.c
- gzwrite.c
- inflate.c
- infback.c
- inftrees.c
- inffast.c
- trees.c
- uncompr.c
- zutil.c
-)
+
+if(ARMv8)
+ set(ZLIB_PRIVATE_HDRS
+ crc32.h
+ deflate.h
+ gzguts.h
+ inffast.h
+ inffixed.h
+ inflate.h
+ inftrees.h
+ trees.h
+ zutil.h
+ contrib/arm/chunkcopy.h
+ )
+ set(ZLIB_SRCS
+ adler32.c
+ compress.c
+ crc32.c
+ deflate.c
+ gzclose.c
+ gzlib.c
+ gzread.c
+ gzwrite.c
+ infback.c
+ inftrees.c
+ contrib/arm/inflate.c
+ contrib/arm/inffast.c
+ trees.c
+ uncompr.c
+ zutil.c
+ )
+ else()
+ set(ZLIB_PRIVATE_HDRS
+ crc32.h
+ deflate.h
+ gzguts.h
+ inffast.h
+ inffixed.h
+ inflate.h
+ inftrees.h
+ trees.h
+ zutil.h
+ )
+ set(ZLIB_SRCS
+ adler32.c
+ compress.c
+ crc32.c
+ deflate.c
+ gzclose.c
+ gzlib.c
+ gzread.c
+ gzwrite.c
+ inflate.c
+ infback.c
+ inftrees.c
+ inffast.c
+ trees.c
+ uncompr.c
+ zutil.c
+ )
+endif()
if(NOT MINGW)
set(ZLIB_DLL_SRCS

View File

@@ -0,0 +1,14 @@
--- a/zlib.pc.cmakein
+++ b/zlib.pc.cmakein
@@ -1,8 +1,8 @@
prefix=@CMAKE_INSTALL_PREFIX@
exec_prefix=@CMAKE_INSTALL_PREFIX@
-libdir=@INSTALL_LIB_DIR@
-sharedlibdir=@INSTALL_LIB_DIR@
-includedir=@INSTALL_INC_DIR@
+libdir=${exec_prefix}/lib
+sharedlibdir=${exec_prefix}/lib
+includedir=${prefix}/include
Name: zlib
Description: zlib compression library