sparc: move the XOR code to lib/raid/

Move the optimized XOR into lib/raid and include it it in xor.ko instead
of always building it into the main kernel image.

The code should probably be split into separate files for the two
implementations, but for now this just does the trivial move.

Link: https://lkml.kernel.org/r/20260327061704.3707577-18-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: "Borislav Petkov (AMD)" <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Mason <clm@fb.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: David Sterba <dsterba@suse.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason A. Donenfeld <jason@zx2c4.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Li Nan <linan122@huawei.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Magnus Lindholm <linmag7@gmail.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Richard Henderson <richard.henderson@linaro.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Song Liu <song@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Ted Ts'o <tytso@mit.edu>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: WANG Xuerui <kernel@xen0n.name>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Christoph Hellwig 2026-03-27 07:16:49 +01:00 committed by Andrew Morton
parent 5265d55b21
commit 7f96362396
7 changed files with 52 additions and 57 deletions

View File

@ -14,7 +14,6 @@
#include <asm/oplib.h>
#include <asm/pgtable.h>
#include <asm/trap_block.h>
#include <asm/xor.h>
void *__memscan_zero(void *, size_t);
void *__memscan_generic(void *, int, size_t);

View File

@ -1,9 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
*/
#ifndef ___ASM_SPARC_XOR_H
#define ___ASM_SPARC_XOR_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/xor_64.h>
#else
#include <asm/xor_32.h>
#endif
#endif
#include <asm/spitfire.h>
extern struct xor_block_template xor_block_VIS;
extern struct xor_block_template xor_block_niagara;
#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
/* Force VIS for everything except Niagara. */
if (tlb_type == hypervisor &&
(sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5))
xor_force(&xor_block_niagara);
else
xor_force(&xor_block_VIS);
}
#else /* sparc64 */
/* For grins, also test the generic routines. */
#include <asm-generic/xor.h>
extern struct xor_block_template xor_block_SPARC;
#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
xor_register(&xor_block_8regs);
xor_register(&xor_block_32regs);
xor_register(&xor_block_SPARC);
}
#endif /* !sparc64 */
#endif /* ___ASM_SPARC_XOR_H */

View File

@ -48,7 +48,7 @@ lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
lib-$(CONFIG_SPARC64) += copy_in_user.o memmove.o
lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o hweight.o ffs.o
obj-$(CONFIG_SPARC64) += iomap.o
obj-$(CONFIG_SPARC32) += atomic32.o

View File

@ -18,6 +18,8 @@ xor-$(CONFIG_CPU_HAS_LSX) += loongarch/xor_simd.o
xor-$(CONFIG_CPU_HAS_LSX) += loongarch/xor_simd_glue.o
xor-$(CONFIG_ALTIVEC) += powerpc/xor_vmx.o powerpc/xor_vmx_glue.o
xor-$(CONFIG_RISCV_ISA_V) += riscv/xor.o riscv/xor-glue.o
xor-$(CONFIG_SPARC32) += sparc/xor-sparc32.o
xor-$(CONFIG_SPARC64) += sparc/xor-sparc64.o sparc/xor-sparc64-glue.o
CFLAGS_arm/xor-neon.o += $(CC_FLAGS_FPU)

View File

@ -1,16 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/asm/xor.h
*
* Optimized RAID-5 checksumming functions for 32-bit Sparc.
*/
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* High speed xor_block operation for RAID4/5 utilizing the
* ldd/std SPARC instructions.
*
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <linux/raid/xor_impl.h>
#include <asm/xor.h>
static void
sparc_2(unsigned long bytes, unsigned long * __restrict p1,
@ -248,21 +244,10 @@ sparc_5(unsigned long bytes, unsigned long * __restrict p1,
} while (--lines > 0);
}
static struct xor_block_template xor_block_SPARC = {
struct xor_block_template xor_block_SPARC = {
.name = "SPARC",
.do_2 = sparc_2,
.do_3 = sparc_3,
.do_4 = sparc_4,
.do_5 = sparc_5,
};
/* For grins, also test the generic routines. */
#include <asm-generic/xor.h>
#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
xor_register(&xor_block_8regs);
xor_register(&xor_block_32regs);
xor_register(&xor_block_SPARC);
}

View File

@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* include/asm/xor.h
*
* High speed xor_block operation for RAID4/5 utilizing the
* UltraSparc Visual Instruction Set and Niagara block-init
* twin-load instructions.
@ -10,7 +8,8 @@
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
*/
#include <asm/spitfire.h>
#include <linux/raid/xor_impl.h>
#include <asm/xor.h>
void xor_vis_2(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2);
@ -29,7 +28,7 @@ void xor_vis_5(unsigned long bytes, unsigned long * __restrict p1,
/* XXX Ugh, write cheetah versions... -DaveM */
static struct xor_block_template xor_block_VIS = {
struct xor_block_template xor_block_VIS = {
.name = "VIS",
.do_2 = xor_vis_2,
.do_3 = xor_vis_3,
@ -52,25 +51,10 @@ void xor_niagara_5(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p4,
const unsigned long * __restrict p5);
static struct xor_block_template xor_block_niagara = {
struct xor_block_template xor_block_niagara = {
.name = "Niagara",
.do_2 = xor_niagara_2,
.do_3 = xor_niagara_3,
.do_4 = xor_niagara_4,
.do_5 = xor_niagara_5,
};
#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
/* Force VIS for everything except Niagara. */
if (tlb_type == hypervisor &&
(sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5))
xor_force(&xor_block_niagara);
else
xor_force(&xor_block_VIS);
}

View File

@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/sparc64/lib/xor.S
*
* High speed xor_block operation for RAID4/5 utilizing the
* UltraSparc Visual Instruction Set and Niagara store-init/twin-load.
*
@ -92,7 +90,6 @@ ENTRY(xor_vis_2)
retl
wr %g0, 0, %fprs
ENDPROC(xor_vis_2)
EXPORT_SYMBOL(xor_vis_2)
ENTRY(xor_vis_3)
rd %fprs, %o5
@ -159,7 +156,6 @@ ENTRY(xor_vis_3)
retl
wr %g0, 0, %fprs
ENDPROC(xor_vis_3)
EXPORT_SYMBOL(xor_vis_3)
ENTRY(xor_vis_4)
rd %fprs, %o5
@ -245,7 +241,6 @@ ENTRY(xor_vis_4)
retl
wr %g0, 0, %fprs
ENDPROC(xor_vis_4)
EXPORT_SYMBOL(xor_vis_4)
ENTRY(xor_vis_5)
save %sp, -192, %sp
@ -352,7 +347,6 @@ ENTRY(xor_vis_5)
ret
restore
ENDPROC(xor_vis_5)
EXPORT_SYMBOL(xor_vis_5)
/* Niagara versions. */
ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */
@ -399,7 +393,6 @@ ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */
ret
restore
ENDPROC(xor_niagara_2)
EXPORT_SYMBOL(xor_niagara_2)
ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
save %sp, -192, %sp
@ -461,7 +454,6 @@ ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
ret
restore
ENDPROC(xor_niagara_3)
EXPORT_SYMBOL(xor_niagara_3)
ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
save %sp, -192, %sp
@ -544,7 +536,6 @@ ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
ret
restore
ENDPROC(xor_niagara_4)
EXPORT_SYMBOL(xor_niagara_4)
ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */
save %sp, -192, %sp
@ -643,4 +634,3 @@ ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=s
ret
restore
ENDPROC(xor_niagara_5)
EXPORT_SYMBOL(xor_niagara_5)