diff options
author | Heiko Carstens <hca@linux.ibm.com> | 2024-02-03 11:45:24 +0100 |
---|---|---|
committer | Heiko Carstens <hca@linux.ibm.com> | 2024-02-16 14:30:17 +0100 |
commit | c8dde11df19192c421f5b70c2b8ba55d32e07c66 (patch) | |
tree | 6982a612df6d1e6e70e66bd2e3a6a66ceb2cfd1e /arch/s390/include/asm | |
parent | s390/checksum: provide csum_partial_copy_nocheck() (diff) | |
download | linux-c8dde11df19192c421f5b70c2b8ba55d32e07c66.tar.xz linux-c8dde11df19192c421f5b70c2b8ba55d32e07c66.zip |
s390/raid6: convert to use standard fpu_*() inline assemblies
Move the s390 specific raid6 inline assemblies, make them generic, and
reuse them to implement the raid6 gen/xor implementation.
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r-- | arch/s390/include/asm/fpu-insn.h | 48 |
1 files changed, 48 insertions, 0 deletions
diff --git a/arch/s390/include/asm/fpu-insn.h b/arch/s390/include/asm/fpu-insn.h index 35c4fbe0bdd6..028d28570170 100644 --- a/arch/s390/include/asm/fpu-insn.h +++ b/arch/s390/include/asm/fpu-insn.h @@ -108,6 +108,14 @@ static __always_inline void fpu_stfpc(unsigned int *fpc) : "memory"); } +static __always_inline void fpu_vab(u8 v1, u8 v2, u8 v3) +{ + asm volatile("VAB %[v1],%[v2],%[v3]" + : + : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3) + : "memory"); +} + static __always_inline void fpu_vcksm(u8 v1, u8 v2, u8 v3) { asm volatile("VCKSM %[v1],%[v2],%[v3]" @@ -116,6 +124,14 @@ static __always_inline void fpu_vcksm(u8 v1, u8 v2, u8 v3) : "memory"); } +static __always_inline void fpu_vesravb(u8 v1, u8 v2, u8 v3) +{ + asm volatile("VESRAVB %[v1],%[v2],%[v3]" + : + : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3) + : "memory"); +} + #ifdef CONFIG_CC_IS_CLANG static __always_inline void fpu_vl(u8 v1, const void *vxr) @@ -231,6 +247,14 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr) #endif /* CONFIG_CC_IS_CLANG */ +static __always_inline void fpu_vlr(u8 v1, u8 v2) +{ + asm volatile("VLR %[v1],%[v2]" + : + : [v1] "I" (v1), [v2] "I" (v2) + : "memory"); +} + static __always_inline void fpu_vlvgf(u8 v, u32 val, u16 index) { asm volatile("VLVGF %[v],%[val],%[index]" @@ -239,6 +263,22 @@ static __always_inline void fpu_vlvgf(u8 v, u32 val, u16 index) : "memory"); } +static __always_inline void fpu_vn(u8 v1, u8 v2, u8 v3) +{ + asm volatile("VN %[v1],%[v2],%[v3]" + : + : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3) + : "memory"); +} + +static __always_inline void fpu_vrepib(u8 v1, s16 i2) +{ + asm volatile("VREPIB %[v1],%[i2]" + : + : [v1] "I" (v1), [i2] "K" (i2) + : "memory"); +} + #ifdef CONFIG_CC_IS_CLANG static __always_inline void fpu_vst(u8 v1, const void *vxr) @@ -335,6 +375,14 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr) #endif /* CONFIG_CC_IS_CLANG */ +static __always_inline void fpu_vx(u8 v1, u8 v2, u8 v3) +{ + asm volatile("VX %[v1],%[v2],%[v3]" + : + : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3) + : "memory"); +} + static __always_inline void fpu_vzero(u8 v) { asm volatile("VZERO %[v]" |