[prev in list] [next in list] [prev in thread] [next in thread]
List: glibc-cvs
Subject: GNU C Library master sources branch hjl/memcpy created. glibc-2.20-581-gf63a681
From: hjl () sourceware ! org
Date: 2015-01-30 16:58:27
Message-ID: 20150130165827.29312.qmail () sourceware ! org
[Download RAW message or body]
This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "GNU C Library master sources".
The branch, hjl/memcpy has been created
at f63a6815da4c72626b14b456a6902cc8d3671729 (commit)
- Log -----------------------------------------------------------------
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=f63a6815da4c72626b14b456a6902cc8d3671729
commit f63a6815da4c72626b14b456a6902cc8d3671729
Author: H.J. Lu <hjl.tools@gmail.com>
Date: Fri Jan 30 08:44:30 2015 -0800
Add memcpy-rte-avx.c
Don't inline rte_memcpy.
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
index d7002a9..581beb9 100644
--- a/sysdeps/x86_64/multiarch/Makefile
+++ b/sysdeps/x86_64/multiarch/Makefile
@@ -28,6 +28,11 @@ CFLAGS-strpbrk-c.c += -msse4
CFLAGS-strspn-c.c += -msse4
endif
+ifeq (yes,$(config-cflags-avx))
+sysdep_routines += memcpy-rte-avx
+CFLAGS-memcpy-rte-avx.c += -mavx
+endif
+
ifeq (yes,$(config-cflags-avx2))
sysdep_routines += memset-avx2
endif
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c \
b/sysdeps/x86_64/multiarch/ifunc-impl-list.c index b64e4f1..59e48dc 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -230,6 +230,8 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl \
*array, /* Support sysdeps/x86_64/multiarch/memcpy.S. */
IFUNC_IMPL (i, name, memcpy,
IFUNC_IMPL_ADD (array, i, memcpy, HAS_AVX,
+ __memcpy_rte_avx)
+ IFUNC_IMPL_ADD (array, i, memcpy, HAS_AVX,
__memcpy_avx_unaligned)
IFUNC_IMPL_ADD (array, i, memcpy, HAS_SSSE3,
__memcpy_ssse3_back)
diff --git a/sysdeps/x86_64/multiarch/memcpy-rte-avx.c \
b/sysdeps/x86_64/multiarch/memcpy-rte-avx.c new file mode 100644
index 0000000..3e14556
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/memcpy-rte-avx.c
@@ -0,0 +1,5 @@
+#if IS_IN (libc) && defined SHARED
+#define RTE_MACHINE_CPUFLAG_AVX2
+#define rte_memcpy __memcpy_rte_avx
+#include "rte_memcpy.h"
+#endif
diff --git a/sysdeps/x86_64/multiarch/rte_memcpy.h \
b/sysdeps/x86_64/multiarch/rte_memcpy.h index 7b2d382..740112a 100644
--- a/sysdeps/x86_64/multiarch/rte_memcpy.h
+++ b/sysdeps/x86_64/multiarch/rte_memcpy.h
@@ -64,8 +64,8 @@ extern "C" {
* @return
* Pointer to the destination data.
*/
-static inline void *
-rte_memcpy(void *dst, const void *src, size_t n) __attribute__((always_inline));
+void *
+rte_memcpy(void *dst, const void *src, size_t n);
#ifdef RTE_MACHINE_CPUFLAG_AVX2
@@ -192,7 +192,7 @@ rte_mov256blocks(uint8_t *dst, const uint8_t *src, size_t n)
}
}
-static inline void *
+void *
rte_memcpy(void *dst, const void *src, size_t n)
{
void *ret = dst;
@@ -488,7 +488,7 @@ rte_mov256(uint8_t *dst, const uint8_t *src)
} \
})
-static inline void *
+void *
rte_memcpy(void *dst, const void *src, size_t n)
{
__m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=d2ca99bf141c78bd8d9c1f314ce8a1f12c439d4b
commit d2ca99bf141c78bd8d9c1f314ce8a1f12c439d4b
Author: H.J. Lu <hjl.tools@gmail.com>
Date: Fri Jan 30 08:51:45 2015 -0800
Import rte_memcpy.h
rte_memcpy.h is a memcpy implementation from DPDK:
http://dpdk.org/
optimized for Sandy Bridge and Haswell. See
http://dpdk.org/ml/archives/dev/2014-November/008158.html
The original code is at
https://gist.github.com/lukego/efc82a15bde5ec83cb1b
diff --git a/sysdeps/x86_64/multiarch/rte_memcpy.h \
b/sysdeps/x86_64/multiarch/rte_memcpy.h new file mode 100644
index 0000000..7b2d382
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/rte_memcpy.h
@@ -0,0 +1,635 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMCPY_X86_64_H_
+#define _RTE_MEMCPY_X86_64_H_
+
+/**
+ * @file
+ *
+ * Functions for SSE/AVX/AVX2 implementation of memcpy().
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <x86intrin.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Copy bytes from one location to another. The locations must not overlap.
+ *
+ * @note This is implemented as a macro, so it's address should not be taken
+ * and care is needed as parameter expressions may be evaluated multiple times.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ * @param n
+ * Number of bytes to copy.
+ * @return
+ * Pointer to the destination data.
+ */
+static inline void *
+rte_memcpy(void *dst, const void *src, size_t n) __attribute__((always_inline));
+
+#ifdef RTE_MACHINE_CPUFLAG_AVX2
+
+/**
+ * AVX2 implementation below
+ */
+
+/**
+ * Copy 16 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+ __m128i xmm0;
+
+ xmm0 = _mm_loadu_si128((const __m128i *)src);
+ _mm_storeu_si128((__m128i *)dst, xmm0);
+}
+
+/**
+ * Copy 32 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+ __m256i ymm0;
+
+ ymm0 = _mm256_loadu_si256((const __m256i *)src);
+ _mm256_storeu_si256((__m256i *)dst, ymm0);
+}
+
+/**
+ * Copy 64 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
+ rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
+}
+
+/**
+ * Copy 128 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
+ rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
+ rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
+ rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
+}
+
+/**
+ * Copy 256 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
+ rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
+ rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
+ rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
+ rte_mov32((uint8_t *)dst + 4 * 32, (const uint8_t *)src + 4 * 32);
+ rte_mov32((uint8_t *)dst + 5 * 32, (const uint8_t *)src + 5 * 32);
+ rte_mov32((uint8_t *)dst + 6 * 32, (const uint8_t *)src + 6 * 32);
+ rte_mov32((uint8_t *)dst + 7 * 32, (const uint8_t *)src + 7 * 32);
+}
+
+/**
+ * Copy 64-byte blocks from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov64blocks(uint8_t *dst, const uint8_t *src, size_t n)
+{
+ __m256i ymm0, ymm1;
+
+ while (n >= 64) {
+ ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
+ n -= 64;
+ ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
+ src = (const uint8_t *)src + 64;
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
+ dst = (uint8_t *)dst + 64;
+ }
+}
+
+/**
+ * Copy 256-byte blocks from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov256blocks(uint8_t *dst, const uint8_t *src, size_t n)
+{
+ __m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
+
+ while (n >= 256) {
+ ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
+ n -= 256;
+ ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
+ ymm2 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 2 * 32));
+ ymm3 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 3 * 32));
+ ymm4 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 4 * 32));
+ ymm5 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 5 * 32));
+ ymm6 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 6 * 32));
+ ymm7 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 7 * 32));
+ src = (const uint8_t *)src + 256;
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32), ymm2);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32), ymm3);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 4 * 32), ymm4);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 5 * 32), ymm5);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 6 * 32), ymm6);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 7 * 32), ymm7);
+ dst = (uint8_t *)dst + 256;
+ }
+}
+
+static inline void *
+rte_memcpy(void *dst, const void *src, size_t n)
+{
+ void *ret = dst;
+ int dstofss;
+ int bits;
+
+ /**
+ * Copy less than 16 bytes
+ */
+ if (n < 16) {
+ if (n & 0x01) {
+ *(uint8_t *)dst = *(const uint8_t *)src;
+ src = (const uint8_t *)src + 1;
+ dst = (uint8_t *)dst + 1;
+ }
+ if (n & 0x02) {
+ *(uint16_t *)dst = *(const uint16_t *)src;
+ src = (const uint16_t *)src + 1;
+ dst = (uint16_t *)dst + 1;
+ }
+ if (n & 0x04) {
+ *(uint32_t *)dst = *(const uint32_t *)src;
+ src = (const uint32_t *)src + 1;
+ dst = (uint32_t *)dst + 1;
+ }
+ if (n & 0x08) {
+ *(uint64_t *)dst = *(const uint64_t *)src;
+ }
+ return ret;
+ }
+
+ /**
+ * Fast way when copy size doesn't exceed 512 bytes
+ */
+ if (n <= 32) {
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ return ret;
+ }
+ if (n <= 64) {
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+ return ret;
+ }
+ if (n <= 512) {
+ if (n >= 256) {
+ n -= 256;
+ rte_mov256((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + 256;
+ dst = (uint8_t *)dst + 256;
+ }
+ if (n >= 128) {
+ n -= 128;
+ rte_mov128((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + 128;
+ dst = (uint8_t *)dst + 128;
+ }
+ if (n >= 64) {
+ n -= 64;
+ rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + 64;
+ dst = (uint8_t *)dst + 64;
+ }
+COPY_BLOCK_64_BACK31:
+ if (n > 32) {
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+ return ret;
+ }
+ if (n > 0) {
+ rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+ }
+ return ret;
+ }
+
+ /**
+ * Make store aligned when copy size exceeds 512 bytes
+ */
+ dstofss = 32 - (int)((long long)(void *)dst & 0x1F);
+ n -= dstofss;
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + dstofss;
+ dst = (uint8_t *)dst + dstofss;
+
+ /**
+ * Copy 256-byte blocks.
+ * Use copy block function for better instruction order control,
+ * which is important when load is unaligned.
+ */
+ rte_mov256blocks((uint8_t *)dst, (const uint8_t *)src, n);
+ bits = n;
+ n = n & 255;
+ bits -= n;
+ src = (const uint8_t *)src + bits;
+ dst = (uint8_t *)dst + bits;
+
+ /**
+ * Copy 64-byte blocks.
+ * Use copy block function for better instruction order control,
+ * which is important when load is unaligned.
+ */
+ if (n >= 64) {
+ rte_mov64blocks((uint8_t *)dst, (const uint8_t *)src, n);
+ bits = n;
+ n = n & 63;
+ bits -= n;
+ src = (const uint8_t *)src + bits;
+ dst = (uint8_t *)dst + bits;
+ }
+
+ /**
+ * Copy whatever left
+ */
+ goto COPY_BLOCK_64_BACK31;
+}
+
+#else /* RTE_MACHINE_CPUFLAG_AVX2 */
+
+/**
+ * SSE & AVX implementation below
+ */
+
+/**
+ * Copy 16 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+ __m128i xmm0;
+
+ xmm0 = _mm_loadu_si128((const __m128i *)(const __m128i *)src);
+ _mm_storeu_si128((__m128i *)dst, xmm0);
+}
+
+/**
+ * Copy 32 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+ rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+}
+
+/**
+ * Copy 64 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+ rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+ rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
+ rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
+}
+
+/**
+ * Copy 128 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+ rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+ rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
+ rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
+ rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
+ rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
+ rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
+ rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
+}
+
+/**
+ * Copy 256 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+ rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+ rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
+ rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
+ rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
+ rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
+ rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
+ rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
+ rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
+ rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
+ rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
+ rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
+ rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
+ rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
+ rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
+ rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
+}
+
+/**
+ * Macro for copying unaligned block from one location to another with constant load \
offset, + * 47 bytes leftover maximum,
+ * locations should not overlap.
+ * Requirements:
+ * - Store is aligned
+ * - Load offset is <offset>, which must be immediate value within [1, 15]
+ * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are \
available for loading + * - <dst>, <src>, <len> must be variables
+ * - __m128i <xmm0> ~ <xmm8> must be pre-defined
+ */
+#define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset) \
\ +({ \
\ + int tmp; \
\ + while (len >= 128 + 16 - offset) { \
\ + xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 \
* 16)); \ + len -= 128; \
\ + xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 \
* 16)); \ + xmm2 = _mm_loadu_si128((const __m128i *)((const \
uint8_t *)src - offset + 2 * 16)); \ + xmm3 = \
_mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 3 * 16)); \
\ + xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 4 \
* 16)); \ + xmm5 = _mm_loadu_si128((const __m128i *)((const \
uint8_t *)src - offset + 5 * 16)); \ + xmm6 = \
_mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 6 * 16)); \
\ + xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 7 \
* 16)); \ + xmm8 = _mm_loadu_si128((const __m128i *)((const \
uint8_t *)src - offset + 8 * 16)); \ + src = (const uint8_t \
*)src + 128; \ + \
_mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, \
offset)); \ + _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), \
_mm_alignr_epi8(xmm2, xmm1, offset)); \ + _mm_storeu_si128((__m128i \
*)((uint8_t *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \ + \
_mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, \
offset)); \ + _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), \
_mm_alignr_epi8(xmm5, xmm4, offset)); \ + _mm_storeu_si128((__m128i \
*)((uint8_t *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \ + \
_mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, \
offset)); \ + _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), \
_mm_alignr_epi8(xmm8, xmm7, offset)); \ + dst = (uint8_t *)dst + 128; \
\ + } \
\ + tmp = len; \
\ + len = ((len - 16 + offset) & 127) + 16 - offset; \
\ + tmp -= len; \
\ + src = (const uint8_t *)src + tmp; \
\ + dst = (uint8_t *)dst + tmp; \
\ + if (len >= 32 + 16 - offset) { \
\ + while (len >= 32 + 16 - offset) { \
\ + xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset \
+ 0 * 16)); \ + len -= 32; \
\ + xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset \
+ 1 * 16)); \ + xmm2 = _mm_loadu_si128((const __m128i \
*)((const uint8_t *)src - offset + 2 * 16)); \ + src = (const \
uint8_t *)src + 32; \ \
+ _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), \
_mm_alignr_epi8(xmm1, xmm0, offset)); \ + _mm_storeu_si128((__m128i \
*)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \ + \
dst = (uint8_t *)dst + 32; \
\ + } \
\ + tmp = len; \
\ + len = ((len - 16 + offset) & 31) + 16 - offset; \
\ + tmp -= len; \
\ + src = (const uint8_t *)src + tmp; \
\ + dst = (uint8_t *)dst + tmp; \
\ + } \
\ +})
+
+/**
+ * Macro for copying unaligned block from one location to another,
+ * 47 bytes leftover maximum,
+ * locations should not overlap.
+ * Use switch here because the aligning instruction requires immediate value for \
shift count. + * Requirements:
+ * - Store is aligned
+ * - Load offset is <offset>, which must be within [1, 15]
+ * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are \
available for loading + * - <dst>, <src>, <len> must be variables
+ * - __m128i <xmm0> ~ <xmm8> used in MOVEUNALIGNED_LEFT47_IMM must be pre-defined
+ */
+#define MOVEUNALIGNED_LEFT47(dst, src, len, offset) \
+({ \
+ switch (offset) { \
+ case 0x01: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x01); break; \
+ case 0x02: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x02); break; \
+ case 0x03: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x03); break; \
+ case 0x04: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x04); break; \
+ case 0x05: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x05); break; \
+ case 0x06: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x06); break; \
+ case 0x07: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x07); break; \
+ case 0x08: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x08); break; \
+ case 0x09: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x09); break; \
+ case 0x0A: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0A); break; \
+ case 0x0B: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0B); break; \
+ case 0x0C: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0C); break; \
+ case 0x0D: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0D); break; \
+ case 0x0E: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0E); break; \
+ case 0x0F: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0F); break; \
+ default:; \
+ } \
+})
+
+static inline void *
+rte_memcpy(void *dst, const void *src, size_t n)
+{
+ __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
+ void *ret = dst;
+ int dstofss;
+ int srcofs;
+
+ /**
+ * Copy less than 16 bytes
+ */
+ if (n < 16) {
+ if (n & 0x01) {
+ *(uint8_t *)dst = *(const uint8_t *)src;
+ src = (const uint8_t *)src + 1;
+ dst = (uint8_t *)dst + 1;
+ }
+ if (n & 0x02) {
+ *(uint16_t *)dst = *(const uint16_t *)src;
+ src = (const uint16_t *)src + 1;
+ dst = (uint16_t *)dst + 1;
+ }
+ if (n & 0x04) {
+ *(uint32_t *)dst = *(const uint32_t *)src;
+ src = (const uint32_t *)src + 1;
+ dst = (uint32_t *)dst + 1;
+ }
+ if (n & 0x08) {
+ *(uint64_t *)dst = *(const uint64_t *)src;
+ }
+ return ret;
+ }
+
+ /**
+ * Fast way when copy size doesn't exceed 512 bytes
+ */
+ if (n <= 32) {
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ return ret;
+ }
+ if (n <= 48) {
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ return ret;
+ }
+ if (n <= 64) {
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
+ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ return ret;
+ }
+ if (n <= 128) {
+ goto COPY_BLOCK_128_BACK15;
+ }
+ if (n <= 512) {
+ if (n >= 256) {
+ n -= 256;
+ rte_mov128((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
+ src = (const uint8_t *)src + 256;
+ dst = (uint8_t *)dst + 256;
+ }
+COPY_BLOCK_255_BACK15:
+ if (n >= 128) {
+ n -= 128;
+ rte_mov128((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + 128;
+ dst = (uint8_t *)dst + 128;
+ }
+COPY_BLOCK_128_BACK15:
+ if (n >= 64) {
+ n -= 64;
+ rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + 64;
+ dst = (uint8_t *)dst + 64;
+ }
+COPY_BLOCK_64_BACK15:
+ if (n >= 32) {
+ n -= 32;
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + 32;
+ dst = (uint8_t *)dst + 32;
+ }
+ if (n > 16) {
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ return ret;
+ }
+ if (n > 0) {
+ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ }
+ return ret;
+ }
+
+ /**
+ * Make store aligned when copy size exceeds 512 bytes,
+ * and make sure the first 15 bytes are copied, because
+ * unaligned copy functions require up to 15 bytes
+ * backwards access.
+ */
+ dstofss = 16 - (int)((long long)(void *)dst & 0x0F) + 16;
+ n -= dstofss;
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + dstofss;
+ dst = (uint8_t *)dst + dstofss;
+ srcofs = (int)((long long)(const void *)src & 0x0F);
+
+ /**
+ * For aligned copy
+ */
+ if (srcofs == 0) {
+ /**
+ * Copy 256-byte blocks
+ */
+ for (; n >= 256; n -= 256) {
+ rte_mov256((uint8_t *)dst, (const uint8_t *)src);
+ dst = (uint8_t *)dst + 256;
+ src = (const uint8_t *)src + 256;
+ }
+
+ /**
+ * Copy whatever left
+ */
+ goto COPY_BLOCK_255_BACK15;
+ }
+
+ /**
+ * For copy with unaligned load
+ */
+ MOVEUNALIGNED_LEFT47(dst, src, n, srcofs);
+
+ /**
+ * Copy whatever left
+ */
+ goto COPY_BLOCK_64_BACK15;
+}
+
+#endif /* RTE_MACHINE_CPUFLAG_AVX2 */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMCPY_X86_64_H_ */
-----------------------------------------------------------------------
hooks/post-receive
--
GNU C Library master sources
[prev in list] [next in list] [prev in thread] [next in thread]
Configure |
About |
News |
Add a list |
Sponsored by KoreLogic