Commit 474010a91d35fef5ca7dea77205b6a5c7e68c3e9

Authored by Janne Grunau
1 parent bec15359
Exists in master and in 2 other branches v2, v3

arm: NEON optimisations for gf_w16

Optimisations for the 4,16 split table region multiplications.

Selected time_tool.sh 16 -A -B results for a 1.7 GHz cortex-a9:
Region Best (MB/s):   532.14   W-Method: 16 -m SPLIT 16 4 -r SIMD -
Region Best (MB/s):   212.34   W-Method: 16 -m SPLIT 16 4 -r NOSIMD -
Region Best (MB/s):   801.36   W-Method: 16 -m SPLIT 16 4 -r SIMD -r ALTMAP -
Region Best (MB/s):    93.20   W-Method: 16 -m SPLIT 16 4 -r NOSIMD -r ALTMAP -
Region Best (MB/s):   273.99   W-Method: 16 -m SPLIT 16 8 -
Region Best (MB/s):   270.81   W-Method: 16 -m SPLIT 8 8 -
Region Best (MB/s):    70.42   W-Method: 16 -m COMPOSITE 2 - -
Region Best (MB/s):   393.54   W-Method: 16 -m COMPOSITE 2 - -r ALTMAP -
include/gf_w16.h 0 → 100644
... ... @@ -0,0 +1,66 @@
  1 +/*
  2 + * GF-Complete: A Comprehensive Open Source Library for Galois Field Arithmetic
  3 + * James S. Plank, Ethan L. Miller, Kevin M. Greenan,
  4 + * Benjamin A. Arnold, John A. Burnum, Adam W. Disney, Allen C. McBride.
  5 + *
  6 + * gf_w16.h
  7 + *
  8 + * Defines and data structures for 16-bit Galois fields
  9 + */
  10 +
  11 +#ifndef GF_COMPLETE_GF_W16_H
  12 +#define GF_COMPLETE_GF_W16_H
  13 +
  14 +#include <stdint.h>
  15 +
  16 +#define GF_FIELD_WIDTH (16)
  17 +#define GF_FIELD_SIZE (1 << GF_FIELD_WIDTH)
  18 +#define GF_MULT_GROUP_SIZE GF_FIELD_SIZE-1
  19 +
  20 +#define GF_BASE_FIELD_WIDTH (8)
  21 +#define GF_BASE_FIELD_SIZE (1 << GF_BASE_FIELD_WIDTH)
  22 +
  23 +struct gf_w16_logtable_data {
  24 + uint16_t log_tbl[GF_FIELD_SIZE];
  25 + uint16_t antilog_tbl[GF_FIELD_SIZE * 2];
  26 + uint16_t inv_tbl[GF_FIELD_SIZE];
  27 + uint16_t *d_antilog;
  28 +};
  29 +
  30 +struct gf_w16_zero_logtable_data {
  31 + int log_tbl[GF_FIELD_SIZE];
  32 + uint16_t _antilog_tbl[GF_FIELD_SIZE * 4];
  33 + uint16_t *antilog_tbl;
  34 + uint16_t inv_tbl[GF_FIELD_SIZE];
  35 +};
  36 +
  37 +struct gf_w16_lazytable_data {
  38 + uint16_t log_tbl[GF_FIELD_SIZE];
  39 + uint16_t antilog_tbl[GF_FIELD_SIZE * 2];
  40 + uint16_t inv_tbl[GF_FIELD_SIZE];
  41 + uint16_t *d_antilog;
  42 + uint16_t lazytable[GF_FIELD_SIZE];
  43 +};
  44 +
  45 +struct gf_w16_bytwo_data {
  46 + uint64_t prim_poly;
  47 + uint64_t mask1;
  48 + uint64_t mask2;
  49 +};
  50 +
  51 +struct gf_w16_split_8_8_data {
  52 + uint16_t tables[3][256][256];
  53 +};
  54 +
  55 +struct gf_w16_group_4_4_data {
  56 + uint16_t reduce[16];
  57 + uint16_t shift[16];
  58 +};
  59 +
  60 +struct gf_w16_composite_data {
  61 + uint8_t *mult_table;
  62 +};
  63 +
  64 +void gf_w16_neon_split_init(gf_t *gf);
  65 +
  66 +#endif /* GF_COMPLETE_GF_W16_H */
... ...
src/Makefile.am
... ... @@ -12,7 +12,8 @@ libgf_complete_la_SOURCES = gf.c gf_method.c gf_wgen.c gf_w4.c gf_w8.c gf_w16.c
12 12  
13 13 if HAVE_NEON
14 14 libgf_complete_la_SOURCES += neon/gf_w4_neon.c \
15   - neon/gf_w8_neon.c
  15 + neon/gf_w8_neon.c \
  16 + neon/gf_w16_neon.c
16 17 endif
17 18  
18 19 libgf_complete_la_LDFLAGS = -version-info 1:0:0
... ...
src/gf_w16.c
... ... @@ -11,54 +11,7 @@
11 11 #include "gf_int.h"
12 12 #include <stdio.h>
13 13 #include <stdlib.h>
14   -
15   -#define GF_FIELD_WIDTH (16)
16   -#define GF_FIELD_SIZE (1 << GF_FIELD_WIDTH)
17   -#define GF_MULT_GROUP_SIZE GF_FIELD_SIZE-1
18   -
19   -#define GF_BASE_FIELD_WIDTH (8)
20   -#define GF_BASE_FIELD_SIZE (1 << GF_BASE_FIELD_WIDTH)
21   -
22   -struct gf_w16_logtable_data {
23   - uint16_t log_tbl[GF_FIELD_SIZE];
24   - uint16_t antilog_tbl[GF_FIELD_SIZE * 2];
25   - uint16_t inv_tbl[GF_FIELD_SIZE];
26   - uint16_t *d_antilog;
27   -};
28   -
29   -struct gf_w16_zero_logtable_data {
30   - int log_tbl[GF_FIELD_SIZE];
31   - uint16_t _antilog_tbl[GF_FIELD_SIZE * 4];
32   - uint16_t *antilog_tbl;
33   - uint16_t inv_tbl[GF_FIELD_SIZE];
34   -};
35   -
36   -struct gf_w16_lazytable_data {
37   - uint16_t log_tbl[GF_FIELD_SIZE];
38   - uint16_t antilog_tbl[GF_FIELD_SIZE * 2];
39   - uint16_t inv_tbl[GF_FIELD_SIZE];
40   - uint16_t *d_antilog;
41   - uint16_t lazytable[GF_FIELD_SIZE];
42   -};
43   -
44   -struct gf_w16_bytwo_data {
45   - uint64_t prim_poly;
46   - uint64_t mask1;
47   - uint64_t mask2;
48   -};
49   -
50   -struct gf_w16_split_8_8_data {
51   - uint16_t tables[3][256][256];
52   -};
53   -
54   -struct gf_w16_group_4_4_data {
55   - uint16_t reduce[16];
56   - uint16_t shift[16];
57   -};
58   -
59   -struct gf_w16_composite_data {
60   - uint8_t *mult_table;
61   -};
  14 +#include "gf_w16.h"
62 15  
63 16 #define AB2(ip, am1 ,am2, b, t1, t2) {\
64 17 t1 = (b << 1) & am1;\
... ... @@ -1264,6 +1217,7 @@ int gf_w16_split_init(gf_t *gf)
1264 1217 gf_internal_t *h;
1265 1218 struct gf_w16_split_8_8_data *d8;
1266 1219 int i, j, exp, issse3;
  1220 + int isneon = 0;
1267 1221 uint32_t p, basep;
1268 1222  
1269 1223 h = (gf_internal_t *) gf->scratch;
... ... @@ -1273,6 +1227,9 @@ int gf_w16_split_init(gf_t *gf)
1273 1227 #else
1274 1228 issse3 = 0;
1275 1229 #endif
  1230 +#ifdef ARM_NEON
  1231 + isneon = 1;
  1232 +#endif
1276 1233  
1277 1234 if (h->arg1 == 8 && h->arg2 == 8) {
1278 1235 d8 = (struct gf_w16_split_8_8_data *) h->private;
... ... @@ -1317,6 +1274,10 @@ int gf_w16_split_init(gf_t *gf)
1317 1274  
1318 1275 if (issse3) {
1319 1276 gf->multiply_region.w32 = gf_w16_split_4_16_lazy_sse_multiply_region;
  1277 + } else if (isneon) {
  1278 +#ifdef ARM_NEON
  1279 + gf_w16_neon_split_init(gf);
  1280 +#endif
1320 1281 } else {
1321 1282 gf->multiply_region.w32 = gf_w16_split_8_16_lazy_multiply_region;
1322 1283 }
... ... @@ -1326,12 +1287,12 @@ int gf_w16_split_init(gf_t *gf)
1326 1287 gf->multiply_region.w32 = gf_w16_split_8_16_lazy_multiply_region;
1327 1288  
1328 1289 } else if ((h->arg1 == 4 && h->arg2 == 16) || (h->arg2 == 4 && h->arg1 == 16)) {
1329   - if (issse3) {
  1290 + if (issse3 || isneon) {
1330 1291 if(h->region_type & GF_REGION_ALTMAP && h->region_type & GF_REGION_NOSIMD)
1331 1292 gf->multiply_region.w32 = gf_w16_split_4_16_lazy_nosse_altmap_multiply_region;
1332 1293 else if(h->region_type & GF_REGION_NOSIMD)
1333 1294 gf->multiply_region.w32 = gf_w16_split_4_16_lazy_multiply_region;
1334   - else if(h->region_type & GF_REGION_ALTMAP)
  1295 + else if(h->region_type & GF_REGION_ALTMAP && issse3)
1335 1296 gf->multiply_region.w32 = gf_w16_split_4_16_lazy_sse_altmap_multiply_region;
1336 1297 } else {
1337 1298 if(h->region_type & GF_REGION_SIMD)
... ...
src/neon/gf_w16_neon.c 0 → 100644
... ... @@ -0,0 +1,356 @@
  1 +/*
  2 + * GF-Complete: A Comprehensive Open Source Library for Galois Field Arithmetic
  3 + * James S. Plank, Ethan L. Miller, Kevin M. Greenan,
  4 + * Benjamin A. Arnold, John A. Burnum, Adam W. Disney, Allen C. McBride.
  5 + *
  6 + * Copyright (c) 2014: Janne Grunau <j@jannau.net>
  7 + *
  8 + * Redistribution and use in source and binary forms, with or without
  9 + * modification, are permitted provided that the following conditions
  10 + * are met:
  11 + *
  12 + * - Redistributions of source code must retain the above copyright
  13 + * notice, this list of conditions and the following disclaimer.
  14 + *
  15 + * - Redistributions in binary form must reproduce the above copyright
  16 + * notice, this list of conditions and the following disclaimer in
  17 + * the documentation and/or other materials provided with the
  18 + * distribution.
  19 + *
  20 + * - Neither the name of the University of Tennessee nor the names of its
  21 + * contributors may be used to endorse or promote products derived
  22 + * from this software without specific prior written permission.
  23 + *
  24 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  25 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  26 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  27 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  28 + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  29 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  30 + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
  31 + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
  32 + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  33 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
  34 + * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  35 + * POSSIBILITY OF SUCH DAMAGE.
  36 + *
  37 + *
  38 + * gf_w16_neon.c
  39 + *
  40 + * Neon routines for 16-bit Galois fields
  41 + *
  42 + */
  43 +
  44 +#include "gf_int.h"
  45 +#include <stdio.h>
  46 +#include <stdlib.h>
  47 +#include "gf_w16.h"
  48 +
  49 +#ifdef ARCH_AARCH64
  50 +static
  51 +inline
  52 +void
  53 +neon_w16_split_4_multiply_region(gf_t *gf, uint16_t *src, uint16_t *dst,
  54 + uint16_t *d_end, uint8_t *tbl,
  55 + gf_val_32_t val, int xor)
  56 +{
  57 + unsigned i;
  58 + uint8_t *high = tbl + 4 * 16;
  59 + uint16x8_t va0, va1, r0, r1;
  60 + uint8x16_t loset, rl, rh;
  61 + uint8x16x2_t va;
  62 +
  63 + uint8x16_t tbl_h[4], tbl_l[4];
  64 + for (i = 0; i < 4; i++) {
  65 + tbl_l[i] = vld1q_u8(tbl + i*16);
  66 + tbl_h[i] = vld1q_u8(high + i*16);
  67 + }
  68 +
  69 + loset = vdupq_n_u8(0xf);
  70 +
  71 + while (dst < d_end) {
  72 + va0 = vld1q_u16(src);
  73 + va1 = vld1q_u16(src + 8);
  74 +
  75 + va = vtrnq_u8(vreinterpretq_u8_u16(va0), vreinterpretq_u8_u16(va1));
  76 +
  77 + rl = vqtbl1q_u8(tbl_l[0], vandq_u8(va.val[0], loset));
  78 + rh = vqtbl1q_u8(tbl_h[0], vandq_u8(va.val[0], loset));
  79 + rl = veorq_u8(rl, vqtbl1q_u8(tbl_l[2], vandq_u8(va.val[1], loset)));
  80 + rh = veorq_u8(rh, vqtbl1q_u8(tbl_h[2], vandq_u8(va.val[1], loset)));
  81 +
  82 + va.val[0] = vshrq_n_u8(va.val[0], 4);
  83 + va.val[1] = vshrq_n_u8(va.val[1], 4);
  84 +
  85 + rl = veorq_u8(rl, vqtbl1q_u8(tbl_l[1], va.val[0]));
  86 + rh = veorq_u8(rh, vqtbl1q_u8(tbl_h[1], va.val[0]));
  87 + rl = veorq_u8(rl, vqtbl1q_u8(tbl_l[3], va.val[1]));
  88 + rh = veorq_u8(rh, vqtbl1q_u8(tbl_h[3], va.val[1]));
  89 +
  90 + va = vtrnq_u8(rl, rh);
  91 + r0 = vreinterpretq_u16_u8(va.val[0]);
  92 + r1 = vreinterpretq_u16_u8(va.val[1]);
  93 +
  94 + if (xor) {
  95 + va0 = vld1q_u16(dst);
  96 + va1 = vld1q_u16(dst + 8);
  97 + r0 = veorq_u16(r0, va0);
  98 + r1 = veorq_u16(r1, va1);
  99 + }
  100 + vst1q_u16(dst, r0);
  101 + vst1q_u16(dst + 8, r1);
  102 +
  103 + src += 16;
  104 + dst += 16;
  105 + }
  106 +}
  107 +
  108 +static
  109 +inline
  110 +void
  111 +neon_w16_split_4_altmap_multiply_region(gf_t *gf, uint8_t *src,
  112 + uint8_t *dst, uint8_t *d_end,
  113 + uint8_t *tbl, gf_val_32_t val,
  114 + int xor)
  115 +{
  116 + unsigned i;
  117 + uint8_t *high = tbl + 4 * 16;
  118 + uint8x16_t vh, vl, rh, rl;
  119 + uint8x16_t loset;
  120 +
  121 + uint8x16_t tbl_h[4], tbl_l[4];
  122 + for (i = 0; i < 4; i++) {
  123 + tbl_l[i] = vld1q_u8(tbl + i*16);
  124 + tbl_h[i] = vld1q_u8(high + i*16);
  125 + }
  126 +
  127 + loset = vdupq_n_u8(0xf);
  128 +
  129 + while (dst < d_end) {
  130 + vh = vld1q_u8(src);
  131 + vl = vld1q_u8(src + 16);
  132 +
  133 + rl = vqtbl1q_u8(tbl_l[0], vandq_u8(vl, loset));
  134 + rh = vqtbl1q_u8(tbl_h[0], vandq_u8(vl, loset));
  135 + rl = veorq_u8(rl, vqtbl1q_u8(tbl_l[2], vandq_u8(vh, loset)));
  136 + rh = veorq_u8(rh, vqtbl1q_u8(tbl_h[2], vandq_u8(vh, loset)));
  137 +
  138 + vl = vshrq_n_u8(vl, 4);
  139 + vh = vshrq_n_u8(vh, 4);
  140 +
  141 + rl = veorq_u8(rl, vqtbl1q_u8(tbl_l[1], vl));
  142 + rh = veorq_u8(rh, vqtbl1q_u8(tbl_h[1], vl));
  143 + rl = veorq_u8(rl, vqtbl1q_u8(tbl_l[3], vh));
  144 + rh = veorq_u8(rh, vqtbl1q_u8(tbl_h[3], vh));
  145 +
  146 + if (xor) {
  147 + vh = vld1q_u8(dst);
  148 + vl = vld1q_u8(dst + 16);
  149 + rh = veorq_u8(rh, vh);
  150 + rl = veorq_u8(rl, vl);
  151 + }
  152 + vst1q_u8(dst, rh);
  153 + vst1q_u8(dst + 16, rl);
  154 +
  155 + src += 32;
  156 + dst += 32;
  157 + }
  158 +}
  159 +
  160 +#else /* ARCH_AARCH64 */
  161 +
  162 +static
  163 +inline
  164 +void
  165 +neon_w16_split_4_multiply_region(gf_t *gf, uint16_t *src, uint16_t *dst,
  166 + uint16_t *d_end, uint8_t *tbl,
  167 + gf_val_32_t val, int xor)
  168 +{
  169 + unsigned i;
  170 + uint8_t *high = tbl + 4 * 16;
  171 + uint16x8_t va, r;
  172 + uint8x8_t loset, vb, vc, rl, rh;
  173 +
  174 + uint8x8x2_t tbl_h[4], tbl_l[4];
  175 + for (i = 0; i < 4; i++) {
  176 + tbl_l[i].val[0] = vld1_u8(tbl + i*16);
  177 + tbl_l[i].val[1] = vld1_u8(tbl + i*16 + 8);
  178 + tbl_h[i].val[0] = vld1_u8(high + i*16);
  179 + tbl_h[i].val[1] = vld1_u8(high + i*16 + 8);
  180 + }
  181 +
  182 + loset = vdup_n_u8(0xf);
  183 +
  184 + while (dst < d_end) {
  185 + va = vld1q_u16(src);
  186 +
  187 + vb = vmovn_u16(va);
  188 + vc = vshrn_n_u16(va, 8);
  189 +
  190 + rl = vtbl2_u8(tbl_l[0], vand_u8(vb, loset));
  191 + rh = vtbl2_u8(tbl_h[0], vand_u8(vb, loset));
  192 + vb = vshr_n_u8(vb, 4);
  193 + rl = veor_u8(rl, vtbl2_u8(tbl_l[2], vand_u8(vc, loset)));
  194 + rh = veor_u8(rh, vtbl2_u8(tbl_h[2], vand_u8(vc, loset)));
  195 + vc = vshr_n_u8(vc, 4);
  196 + rl = veor_u8(rl, vtbl2_u8(tbl_l[1], vb));
  197 + rh = veor_u8(rh, vtbl2_u8(tbl_h[1], vb));
  198 + rl = veor_u8(rl, vtbl2_u8(tbl_l[3], vc));
  199 + rh = veor_u8(rh, vtbl2_u8(tbl_h[3], vc));
  200 +
  201 + r = vmovl_u8(rl);
  202 + r = vorrq_u16(r, vshll_n_u8(rh, 8));
  203 +
  204 + if (xor) {
  205 + va = vld1q_u16(dst);
  206 + r = veorq_u16(r, va);
  207 + }
  208 + vst1q_u16(dst, r);
  209 +
  210 + src += 8;
  211 + dst += 8;
  212 + }
  213 +}
  214 +
  215 +static
  216 +inline
  217 +void
  218 +neon_w16_split_4_altmap_multiply_region(gf_t *gf, uint8_t *src,
  219 + uint8_t *dst, uint8_t *d_end,
  220 + uint8_t *tbl, gf_val_32_t val,
  221 + int xor)
  222 +{
  223 + unsigned i;
  224 + uint8_t *high = tbl + 4 * 16;
  225 + uint8x8_t vh0, vh1, vl0, vl1, r0, r1, r2, r3;
  226 + uint8x8_t loset;
  227 +
  228 + uint8x8x2_t tbl_h[4], tbl_l[4];
  229 + for (i = 0; i < 4; i++) {
  230 + tbl_l[i].val[0] = vld1_u8(tbl + i*16);
  231 + tbl_l[i].val[1] = vld1_u8(tbl + i*16 + 8);
  232 + tbl_h[i].val[0] = vld1_u8(high + i*16);
  233 + tbl_h[i].val[1] = vld1_u8(high + i*16 + 8);
  234 + }
  235 +
  236 + loset = vdup_n_u8(0xf);
  237 +
  238 + while (dst < d_end) {
  239 + vh0 = vld1_u8(src);
  240 + vh1 = vld1_u8(src + 8);
  241 + vl0 = vld1_u8(src + 16);
  242 + vl1 = vld1_u8(src + 24);
  243 +
  244 + r0 = vtbl2_u8(tbl_l[0], vand_u8(vh0, loset));
  245 + r1 = vtbl2_u8(tbl_h[0], vand_u8(vh1, loset));
  246 + r2 = vtbl2_u8(tbl_l[2], vand_u8(vl0, loset));
  247 + r3 = vtbl2_u8(tbl_h[2], vand_u8(vl1, loset));
  248 +
  249 + vh0 = vshr_n_u8(vh0, 4);
  250 + vh1 = vshr_n_u8(vh1, 4);
  251 + vl0 = vshr_n_u8(vl0, 4);
  252 + vl1 = vshr_n_u8(vl1, 4);
  253 +
  254 + r0 = veor_u8(r0, vtbl2_u8(tbl_l[1], vh0));
  255 + r1 = veor_u8(r1, vtbl2_u8(tbl_h[1], vh1));
  256 + r2 = veor_u8(r2, vtbl2_u8(tbl_l[3], vl0));
  257 + r3 = veor_u8(r3, vtbl2_u8(tbl_h[3], vl1));
  258 +
  259 + if (xor) {
  260 + vh0 = vld1_u8(dst);
  261 + vh1 = vld1_u8(dst + 8);
  262 + vl0 = vld1_u8(dst + 16);
  263 + vl1 = vld1_u8(dst + 24);
  264 + r0 = veor_u8(r0, vh0);
  265 + r1 = veor_u8(r1, vh1);
  266 + r2 = veor_u8(r2, vl0);
  267 + r3 = veor_u8(r3, vl1);
  268 + }
  269 + vst1_u8(dst, r0);
  270 + vst1_u8(dst + 8, r1);
  271 + vst1_u8(dst + 16, r2);
  272 + vst1_u8(dst + 24, r3);
  273 +
  274 + src += 32;
  275 + dst += 32;
  276 + }
  277 +}
  278 +#endif /* ARCH_AARCH64 */
  279 +
  280 +static
  281 +inline
  282 +void
  283 +neon_w16_split_4_16_lazy_multiply_region(gf_t *gf, void *src, void *dest,
  284 + gf_val_32_t val, int bytes, int xor,
  285 + int altmap)
  286 +{
  287 + gf_region_data rd;
  288 + unsigned i, j;
  289 + uint64_t c, prod;
  290 + uint8_t tbl[2 * 4 * 16];
  291 + uint8_t *high = tbl + 4 * 16;
  292 +
  293 + if (val == 0) { gf_multby_zero(dest, bytes, xor); return; }
  294 + if (val == 1) { gf_multby_one(src, dest, bytes, xor); return; }
  295 +
  296 + for (i = 0; i < 4; i++) {
  297 + for (j = 0; j < 16; j++) {
  298 + c = (j << (i*4));
  299 + prod = gf->multiply.w32(gf, c, val);
  300 + tbl[i*16 + j] = prod & 0xff;
  301 + high[i*16 + j] = prod >> 8;
  302 + }
  303 + }
  304 +
  305 + gf_set_region_data(&rd, gf, src, dest, bytes, val, xor, 32);
  306 + gf_do_initial_region_alignment(&rd);
  307 +
  308 + if (altmap) {
  309 + uint8_t *s8 = rd.s_start;
  310 + uint8_t *d8 = rd.d_start;
  311 + uint8_t *end8 = rd.d_top;
  312 + if (xor)
  313 + neon_w16_split_4_altmap_multiply_region(gf, s8, d8, end8, tbl, val, 1);
  314 + else
  315 + neon_w16_split_4_altmap_multiply_region(gf, s8, d8, end8, tbl, val, 0);
  316 + } else {
  317 + uint16_t *s16 = rd.s_start;
  318 + uint16_t *d16 = rd.d_start;
  319 + uint16_t *end16 = rd.d_top;
  320 + if (xor)
  321 + neon_w16_split_4_multiply_region(gf, s16, d16, end16, tbl, val, 1);
  322 + else
  323 + neon_w16_split_4_multiply_region(gf, s16, d16, end16, tbl, val, 0);
  324 + }
  325 +
  326 + gf_do_final_region_alignment(&rd);
  327 +}
  328 +
  329 +static
  330 +void
  331 +gf_w16_split_4_16_lazy_multiply_region_neon(gf_t *gf, void *src, void *dest,
  332 + gf_val_32_t val, int bytes, int xor)
  333 +{
  334 + neon_w16_split_4_16_lazy_multiply_region(gf, src, dest, val, bytes, xor, 0);
  335 +}
  336 +
  337 +static
  338 +void
  339 +gf_w16_split_4_16_lazy_altmap_multiply_region_neon(gf_t *gf, void *src,
  340 + void *dest,
  341 + gf_val_32_t val, int bytes,
  342 + int xor)
  343 +{
  344 + neon_w16_split_4_16_lazy_multiply_region(gf, src, dest, val, bytes, xor, 1);
  345 +}
  346 +
  347 +
  348 +void gf_w16_neon_split_init(gf_t *gf)
  349 +{
  350 + gf_internal_t *h = (gf_internal_t *) gf->scratch;
  351 +
  352 + if (h->region_type & GF_REGION_ALTMAP)
  353 + gf->multiply_region.w32 = gf_w16_split_4_16_lazy_altmap_multiply_region_neon;
  354 + else
  355 + gf->multiply_region.w32 = gf_w16_split_4_16_lazy_multiply_region_neon;
  356 +}
... ...